Total coverage: 352498 (18%)of 1988581
4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 /* mii.c: MII interface library Maintained by Jeff Garzik <jgarzik@pobox.com> Copyright 2001,2002 Jeff Garzik Various code came from myson803.c and other files by Donald Becker. Copyright: Written 1998-2002 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); return mii_lpa_to_ethtool_lpa_t(advert); } /** * mii_ethtool_gset - get settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * The @ecmd parameter is expected to have been cleared before calling * mii_ethtool_gset(). */ void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ ecmd->port = PORT_MII; /* only supports internal transceiver */ ecmd->transceiver = XCVR_INTERNAL; /* this isn't fully supported at higher layers */ ecmd->phy_address = mii->phy_id; ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) ecmd->advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); ecmd->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { ecmd->lp_advertising = 0; } nego = ecmd->advertising & ecmd->lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_1000); ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_100); ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); } else { ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { ecmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10))); ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } mii->full_duplex = ecmd->duplex; /* ignore maxtxpkt, maxrxpkt for now */ } /** * mii_ethtool_get_link_ksettings - get settings that are specified in @cmd * @mii: MII interface * @cmd: requested ethtool_link_ksettings * * The @cmd parameter is expected to have been cleared before calling * mii_ethtool_get_link_ksettings(). */ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii, struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego, supported, advertising, lp_advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ cmd->base.port = PORT_MII; /* this isn't fully supported at higher layers */ cmd->base.phy_address = mii->phy_id; cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { advertising |= ADVERTISED_Autoneg; cmd->base.autoneg = AUTONEG_ENABLE; if (bmsr & BMSR_ANEGCOMPLETE) { lp_advertising = mii_get_an(mii, MII_LPA); lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { lp_advertising = 0; } nego = advertising & lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { cmd->base.speed = SPEED_1000; cmd->base.duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { cmd->base.speed = SPEED_100; cmd->base.duplex = !!(nego & ADVERTISED_100baseT_Full); } else { cmd->base.speed = SPEED_10; cmd->base.duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.speed = ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10)); cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; lp_advertising = 0; } mii->full_duplex = cmd->base.duplex; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, lp_advertising); /* ignore maxtxpkt, maxrxpkt for now */ } /** * mii_ethtool_sset - set settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * Returns 0 for success, negative on error. */ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u32 speed = ethtool_cmd_speed(ecmd); if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; if (ecmd->port != PORT_MII) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->phy_address != mii->phy_id) return -EINVAL; if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (ecmd->autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (ecmd->duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else mii->full_duplex = 0; if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_ethtool_set_link_ksettings - set settings that are specified in @cmd * @mii: MII interfaces * @cmd: requested ethtool_link_ksettings * * Returns 0 for success, negative on error. */ int mii_ethtool_set_link_ksettings(struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u32 speed = cmd->base.speed; if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL) return -EINVAL; if (cmd->base.port != PORT_MII) return -EINVAL; if (cmd->base.phy_address != mii->phy_id) return -EINVAL; if (cmd->base.autoneg != AUTONEG_DISABLE && cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; u32 advertising; ethtool_convert_link_mode_to_legacy_u32( &advertising, cmd->link_modes.advertising); if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (cmd->base.duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else { mii->full_duplex = 0; } if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_check_gmii_support - check if the MII supports Gb interfaces * @mii: the MII interface */ int mii_check_gmii_support(struct mii_if_info *mii) { int reg; reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (reg & BMSR_ESTATEN) { reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) return 1; } return 0; } /** * mii_link_ok - is link status up/ok * @mii: the MII interface * * Returns 1 if the MII reports link status up/ok, 0 otherwise. */ int mii_link_ok (struct mii_if_info *mii) { /* first, a dummy read, needed to latch some MII phys */ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) return 1; return 0; } /** * mii_nway_restart - restart NWay (autonegotiation) for this interface * @mii: the MII interface * * Returns 0 on success, negative on error. */ int mii_nway_restart (struct mii_if_info *mii) { int bmcr; int r = -EINVAL; /* if autoneg is off, it's an error */ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { bmcr |= BMCR_ANRESTART; mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); r = 0; } return r; } /** * mii_check_link - check MII link status * @mii: MII interface * * If the link status changed (previous != current), call * netif_carrier_on() if current link status is Up or call * netif_carrier_off() if current link status is Down. */ void mii_check_link (struct mii_if_info *mii) { int cur_link = mii_link_ok(mii); int prev_link = netif_carrier_ok(mii->dev); if (cur_link && !prev_link) netif_carrier_on(mii->dev); else if (prev_link && !cur_link) netif_carrier_off(mii->dev); } /** * mii_check_media - check the MII interface for a carrier/speed/duplex change * @mii: the MII interface * @ok_to_print: OK to print link up/down messages * @init_media: OK to save duplex mode in @mii * * Returns 1 if the duplex mode changed, 0 if not. * If the media type is forced, always returns 0. */ unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, unsigned int init_media) { unsigned int old_carrier, new_carrier; int advertise, lpa, media, duplex; int lpa2 = 0; /* check current and old link status */ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; new_carrier = (unsigned int) mii_link_ok(mii); /* if carrier state did not change, this is a "bounce", * just exit as everything is already set correctly */ if ((!init_media) && (old_carrier == new_carrier)) return 0; /* duplex did not change */ /* no carrier, nothing much to do */ if (!new_carrier) { netif_carrier_off(mii->dev); if (ok_to_print) netdev_info(mii->dev, "link down\n"); return 0; /* duplex did not change */ } /* * we have carrier, see who's on the other end */ netif_carrier_on(mii->dev); if (mii->force_media) { if (ok_to_print) netdev_info(mii->dev, "link up\n"); return 0; /* duplex did not change */ } /* get MII advertise and LPA values */ if ((!init_media) && (mii->advertising)) advertise = mii->advertising; else { advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); mii->advertising = advertise; } lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); if (mii->supports_gmii) lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; if (lpa2 & LPA_1000FULL) duplex = 1; if (ok_to_print) netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10, duplex ? "full" : "half", lpa); if ((init_media) || (mii->full_duplex != duplex)) { mii->full_duplex = duplex; return 1; /* duplex changed */ } return 0; /* duplex did not change */ } /** * generic_mii_ioctl - main MII ioctl interface * @mii_if: the MII interface * @mii_data: MII ioctl data structure * @cmd: MII ioctl command * @duplex_chg_out: pointer to @duplex_changed status if there was no * ioctl error * * Returns 0 on success, negative on error. */ int generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_chg_out) { int rc = 0; unsigned int duplex_changed = 0; if (duplex_chg_out) *duplex_chg_out = 0; mii_data->phy_id &= mii_if->phy_id_mask; mii_data->reg_num &= mii_if->reg_num_mask; switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; fallthrough; case SIOCGMIIREG: mii_data->val_out = mii_if->mdio_read(mii_if->dev, mii_data->phy_id, mii_data->reg_num); break; case SIOCSMIIREG: { u16 val = mii_data->val_in; if (mii_data->phy_id == mii_if->phy_id) { switch(mii_data->reg_num) { case MII_BMCR: { unsigned int new_duplex = 0; if (val & (BMCR_RESET|BMCR_ANENABLE)) mii_if->force_media = 0; else mii_if->force_media = 1; if (mii_if->force_media && (val & BMCR_FULLDPLX)) new_duplex = 1; if (mii_if->full_duplex != new_duplex) { duplex_changed = 1; mii_if->full_duplex = new_duplex; } break; } case MII_ADVERTISE: mii_if->advertising = val; break; default: /* do nothing */ break; } } mii_if->mdio_write(mii_if->dev, mii_data->phy_id, mii_data->reg_num, val); break; } default: rc = -EOPNOTSUPP; break; } if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) *duplex_chg_out = 1; return rc; } MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION ("MII hardware support library"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(mii_link_ok); EXPORT_SYMBOL(mii_nway_restart); EXPORT_SYMBOL(mii_ethtool_gset); EXPORT_SYMBOL(mii_ethtool_get_link_ksettings); EXPORT_SYMBOL(mii_ethtool_sset); EXPORT_SYMBOL(mii_ethtool_set_link_ksettings); EXPORT_SYMBOL(mii_check_link); EXPORT_SYMBOL(mii_check_media); EXPORT_SYMBOL(mii_check_gmii_support); EXPORT_SYMBOL(generic_mii_ioctl);
6 5 4 1 6 3 1 1 1 1 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/module.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tproxy.h> #include <net/inet_sock.h> #include <net/tcp.h> #include <linux/if_ether.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif struct nft_tproxy { u8 sreg_addr; u8 sreg_port; u8 family; }; static void nft_tproxy_eval_v4(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_tproxy *priv = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp; __be32 taddr = 0; __be16 tport = 0; struct sock *sk; if (pkt->tprot != IPPROTO_TCP && pkt->tprot != IPPROTO_UDP) { regs->verdict.code = NFT_BREAK; return; } hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (!hp) { regs->verdict.code = NFT_BREAK; return; } /* check if there's an ongoing connection on the packet addresses, this * happens if the redirect already happened and the current packet * belongs to an already established connection */ sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol, iph->saddr, iph->daddr, hp->source, hp->dest, skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); if (priv->sreg_addr) taddr = nft_reg_load_be32(&regs->data[priv->sreg_addr]); taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr); if (priv->sreg_port) tport = nft_reg_load_be16(&regs->data[priv->sreg_port]); if (!tport) tport = hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) { /* reopening a TIME_WAIT connection needs special handling */ sk = nf_tproxy_handle_time_wait4(nft_net(pkt), skb, taddr, tport, sk); } else if (!sk) { /* no, there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol, iph->saddr, taddr, hp->source, tport, skb->dev, NF_TPROXY_LOOKUP_LISTENER); } if (sk && nf_tproxy_sk_is_transparent(sk)) nf_tproxy_assign_sock(skb, sk); else regs->verdict.code = NFT_BREAK; } #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) static void nft_tproxy_eval_v6(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_tproxy *priv = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; const struct ipv6hdr *iph = ipv6_hdr(skb); int thoff = nft_thoff(pkt); struct udphdr _hdr, *hp; struct in6_addr taddr; __be16 tport = 0; struct sock *sk; int l4proto; memset(&taddr, 0, sizeof(taddr)); if (pkt->tprot != IPPROTO_TCP && pkt->tprot != IPPROTO_UDP) { regs->verdict.code = NFT_BREAK; return; } l4proto = pkt->tprot; hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { regs->verdict.code = NFT_BREAK; return; } /* check if there's an ongoing connection on the packet addresses, this * happens if the redirect already happened and the current packet * belongs to an already established connection */ sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto, &iph->saddr, &iph->daddr, hp->source, hp->dest, nft_in(pkt), NF_TPROXY_LOOKUP_ESTABLISHED); if (priv->sreg_addr) memcpy(&taddr, &regs->data[priv->sreg_addr], sizeof(taddr)); taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr); if (priv->sreg_port) tport = nft_reg_load_be16(&regs->data[priv->sreg_port]); if (!tport) tport = hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) { /* reopening a TIME_WAIT connection needs special handling */ sk = nf_tproxy_handle_time_wait6(skb, l4proto, thoff, nft_net(pkt), &taddr, tport, sk); } else if (!sk) { /* no there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto, &iph->saddr, &taddr, hp->source, tport, nft_in(pkt), NF_TPROXY_LOOKUP_LISTENER); } /* NOTE: assign_sock consumes our sk reference */ if (sk && nf_tproxy_sk_is_transparent(sk)) nf_tproxy_assign_sock(skb, sk); else regs->verdict.code = NFT_BREAK; } #endif static void nft_tproxy_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_tproxy *priv = nft_expr_priv(expr); switch (nft_pf(pkt)) { case NFPROTO_IPV4: switch (priv->family) { case NFPROTO_IPV4: case NFPROTO_UNSPEC: nft_tproxy_eval_v4(expr, regs, pkt); return; } break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: switch (priv->family) { case NFPROTO_IPV6: case NFPROTO_UNSPEC: nft_tproxy_eval_v6(expr, regs, pkt); return; } #endif } regs->verdict.code = NFT_BREAK; } static const struct nla_policy nft_tproxy_policy[NFTA_TPROXY_MAX + 1] = { [NFTA_TPROXY_FAMILY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_TPROXY_REG_ADDR] = { .type = NLA_U32 }, [NFTA_TPROXY_REG_PORT] = { .type = NLA_U32 }, }; static int nft_tproxy_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_tproxy *priv = nft_expr_priv(expr); unsigned int alen = 0; int err; if (!tb[NFTA_TPROXY_FAMILY] || (!tb[NFTA_TPROXY_REG_ADDR] && !tb[NFTA_TPROXY_REG_PORT])) return -EINVAL; priv->family = ntohl(nla_get_be32(tb[NFTA_TPROXY_FAMILY])); switch (ctx->family) { case NFPROTO_IPV4: if (priv->family != NFPROTO_IPV4) return -EINVAL; break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: if (priv->family != NFPROTO_IPV6) return -EINVAL; break; #endif case NFPROTO_INET: break; default: return -EOPNOTSUPP; } /* Address is specified but the rule family is not set accordingly */ if (priv->family == NFPROTO_UNSPEC && tb[NFTA_TPROXY_REG_ADDR]) return -EINVAL; switch (priv->family) { case NFPROTO_IPV4: alen = sizeof_field(union nf_inet_addr, in); err = nf_defrag_ipv4_enable(ctx->net); if (err) return err; break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: alen = sizeof_field(union nf_inet_addr, in6); err = nf_defrag_ipv6_enable(ctx->net); if (err) return err; break; #endif case NFPROTO_UNSPEC: /* No address is specified here */ err = nf_defrag_ipv4_enable(ctx->net); if (err) return err; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) err = nf_defrag_ipv6_enable(ctx->net); if (err) return err; #endif break; default: return -EOPNOTSUPP; } if (tb[NFTA_TPROXY_REG_ADDR]) { err = nft_parse_register_load(ctx, tb[NFTA_TPROXY_REG_ADDR], &priv->sreg_addr, alen); if (err < 0) return err; } if (tb[NFTA_TPROXY_REG_PORT]) { err = nft_parse_register_load(ctx, tb[NFTA_TPROXY_REG_PORT], &priv->sreg_port, sizeof(u16)); if (err < 0) return err; } return 0; } static void nft_tproxy_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { const struct nft_tproxy *priv = nft_expr_priv(expr); switch (priv->family) { case NFPROTO_IPV4: nf_defrag_ipv4_disable(ctx->net); break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: nf_defrag_ipv6_disable(ctx->net); break; #endif case NFPROTO_UNSPEC: nf_defrag_ipv4_disable(ctx->net); #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) nf_defrag_ipv6_disable(ctx->net); #endif break; } } static int nft_tproxy_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_tproxy *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_TPROXY_FAMILY, htonl(priv->family))) return -1; if (priv->sreg_addr && nft_dump_register(skb, NFTA_TPROXY_REG_ADDR, priv->sreg_addr)) return -1; if (priv->sreg_port && nft_dump_register(skb, NFTA_TPROXY_REG_PORT, priv->sreg_port)) return -1; return 0; } static int nft_tproxy_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { if (ctx->family != NFPROTO_IPV4 && ctx->family != NFPROTO_IPV6 && ctx->family != NFPROTO_INET) return -EOPNOTSUPP; return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING); } static struct nft_expr_type nft_tproxy_type; static const struct nft_expr_ops nft_tproxy_ops = { .type = &nft_tproxy_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_tproxy)), .eval = nft_tproxy_eval, .init = nft_tproxy_init, .destroy = nft_tproxy_destroy, .dump = nft_tproxy_dump, .reduce = NFT_REDUCE_READONLY, .validate = nft_tproxy_validate, }; static struct nft_expr_type nft_tproxy_type __read_mostly = { .name = "tproxy", .ops = &nft_tproxy_ops, .policy = nft_tproxy_policy, .maxattr = NFTA_TPROXY_MAX, .owner = THIS_MODULE, }; static int __init nft_tproxy_module_init(void) { return nft_register_expr(&nft_tproxy_type); } static void __exit nft_tproxy_module_exit(void) { nft_unregister_expr(&nft_tproxy_type); } module_init(nft_tproxy_module_init); module_exit(nft_tproxy_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Máté Eckl"); MODULE_DESCRIPTION("nf_tables tproxy support module"); MODULE_ALIAS_NFT_EXPR("tproxy");
18 19 16 19 4 19 4 18 13 13 13 13 13 13 13 13 13 13 13 9 6 4 6 6 6 6 9 1 8 5 5 5 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 // SPDX-License-Identifier: GPL-2.0-or-later /* * x64 SIMD accelerated ChaCha and XChaCha stream ciphers, * including ChaCha20 (RFC7539) * * Copyright (C) 2015 Martin Willi */ #include <crypto/algapi.h> #include <crypto/internal/chacha.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sizes.h> #include <asm/simd.h> asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds); asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd); static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2); static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl); static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks) { len = min(len, maxblocks * CHACHA_BLOCK_SIZE); return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE; } static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&chacha_use_avx512vl)) { while (bytes >= CHACHA_BLOCK_SIZE * 8) { chacha_8block_xor_avx512vl(state, dst, src, bytes, nrounds); bytes -= CHACHA_BLOCK_SIZE * 8; src += CHACHA_BLOCK_SIZE * 8; dst += CHACHA_BLOCK_SIZE * 8; state[12] += 8; } if (bytes > CHACHA_BLOCK_SIZE * 4) { chacha_8block_xor_avx512vl(state, dst, src, bytes, nrounds); state[12] += chacha_advance(bytes, 8); return; } if (bytes > CHACHA_BLOCK_SIZE * 2) { chacha_4block_xor_avx512vl(state, dst, src, bytes, nrounds); state[12] += chacha_advance(bytes, 4); return; } if (bytes) { chacha_2block_xor_avx512vl(state, dst, src, bytes, nrounds); state[12] += chacha_advance(bytes, 2); return; } } if (static_branch_likely(&chacha_use_avx2)) { while (bytes >= CHACHA_BLOCK_SIZE * 8) { chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); bytes -= CHACHA_BLOCK_SIZE * 8; src += CHACHA_BLOCK_SIZE * 8; dst += CHACHA_BLOCK_SIZE * 8; state[12] += 8; } if (bytes > CHACHA_BLOCK_SIZE * 4) { chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); state[12] += chacha_advance(bytes, 8); return; } if (bytes > CHACHA_BLOCK_SIZE * 2) { chacha_4block_xor_avx2(state, dst, src, bytes, nrounds); state[12] += chacha_advance(bytes, 4); return; } if (bytes > CHACHA_BLOCK_SIZE) { chacha_2block_xor_avx2(state, dst, src, bytes, nrounds); state[12] += chacha_advance(bytes, 2); return; } } while (bytes >= CHACHA_BLOCK_SIZE * 4) { chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); bytes -= CHACHA_BLOCK_SIZE * 4; src += CHACHA_BLOCK_SIZE * 4; dst += CHACHA_BLOCK_SIZE * 4; state[12] += 4; } if (bytes > CHACHA_BLOCK_SIZE) { chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); state[12] += chacha_advance(bytes, 4); return; } if (bytes) { chacha_block_xor_ssse3(state, dst, src, bytes, nrounds); state[12]++; } } void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) { if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { hchacha_block_generic(state, stream, nrounds); } else { kernel_fpu_begin(); hchacha_block_ssse3(state, stream, nrounds); kernel_fpu_end(); } } EXPORT_SYMBOL(hchacha_block_arch); void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) { chacha_init_generic(state, key, iv); } EXPORT_SYMBOL(chacha_init_arch); void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() || bytes <= CHACHA_BLOCK_SIZE) return chacha_crypt_generic(state, dst, src, bytes, nrounds); do { unsigned int todo = min_t(unsigned int, bytes, SZ_4K); kernel_fpu_begin(); chacha_dosimd(state, dst, src, todo, nrounds); kernel_fpu_end(); bytes -= todo; src += todo; dst += todo; } while (bytes); } EXPORT_SYMBOL(chacha_crypt_arch); static int chacha_simd_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { u32 state[CHACHA_STATE_WORDS] __aligned(8); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); chacha_init_generic(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; if (nbytes < walk.total) nbytes = round_down(nbytes, walk.stride); if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { chacha_crypt_generic(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes, ctx->nrounds); } else { kernel_fpu_begin(); chacha_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes, ctx->nrounds); kernel_fpu_end(); } err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } static int chacha_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); return chacha_simd_stream_xor(req, ctx, req->iv); } static int xchacha_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); u32 state[CHACHA_STATE_WORDS] __aligned(8); struct chacha_ctx subctx; u8 real_iv[16]; chacha_init_generic(state, ctx->key, req->iv); if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { kernel_fpu_begin(); hchacha_block_ssse3(state, subctx.key, ctx->nrounds); kernel_fpu_end(); } else { hchacha_block_generic(state, subctx.key, ctx->nrounds); } subctx.nrounds = ctx->nrounds; memcpy(&real_iv[0], req->iv + 24, 8); memcpy(&real_iv[8], req->iv + 16, 8); return chacha_simd_stream_xor(req, &subctx, real_iv); } static struct skcipher_alg algs[] = { { .base.cra_name = "chacha20", .base.cra_driver_name = "chacha20-simd", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = CHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = chacha_simd, .decrypt = chacha_simd, }, { .base.cra_name = "xchacha20", .base.cra_driver_name = "xchacha20-simd", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = xchacha_simd, .decrypt = xchacha_simd, }, { .base.cra_name = "xchacha12", .base.cra_driver_name = "xchacha12-simd", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha12_setkey, .encrypt = xchacha_simd, .decrypt = xchacha_simd, }, }; static int __init chacha_simd_mod_init(void) { if (!boot_cpu_has(X86_FEATURE_SSSE3)) return 0; static_branch_enable(&chacha_use_simd); if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { static_branch_enable(&chacha_use_avx2); if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX512VL) && boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */ static_branch_enable(&chacha_use_avx512vl); } return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ? crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; } static void __exit chacha_simd_mod_fini(void) { if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3)) crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); } module_init(chacha_simd_mod_init); module_exit(chacha_simd_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)"); MODULE_ALIAS_CRYPTO("chacha20"); MODULE_ALIAS_CRYPTO("chacha20-simd"); MODULE_ALIAS_CRYPTO("xchacha20"); MODULE_ALIAS_CRYPTO("xchacha20-simd"); MODULE_ALIAS_CRYPTO("xchacha12"); MODULE_ALIAS_CRYPTO("xchacha12-simd");
7 5 7 4 1 4 4 4 4 1 1 14 10 10 4 6 1 10 3 14 6 6 13 13 13 13 17 14 13 12 13 16 7 6 1 3 2 2 1 1 8 6 6 5 3 2 1 1 5 5 5 5 5 5 3 3 5 5 5 5 5 5 5 5 5 3 3 3 4 5 2 2 2 2 2 2 2 2 2 1 1 1 1 7 4 1 1 4 4 7 2 7 13 13 6 13 13 2 1 11 7 7 2 2 2 2 11 11 3 3 2 3 11 11 7 9 7 7 9 4 9 1 9 9 2 1 2 1 2 11 11 11 11 11 17 17 17 17 14 14 12 13 13 13 9 16 8 8 17 10 9 9 8 8 10 13 13 3 3 3 13 2 3 1 2 1 3 48 48 46 44 4 42 2 2 41 3 2 3 2 3 3 1 3 2 3 1 2 3 21 4 11 7 7 32 23 2 2 32 23 10 11 32 13 13 2 2 2 2 2 2 2 31 29 31 31 4 31 13 18 31 1 1 32 5 31 6 32 1 31 31 33 31 23 1 32 32 33 21 21 28 29 29 4 27 26 25 24 23 23 22 1 21 21 2 21 21 21 21 21 20 1 20 19 1 20 2 20 20 3 28 17 17 17 17 13 17 38 36 38 37 12 35 16 71 67 61 71 70 69 69 61 5 62 60 9 60 58 58 58 58 58 57 56 17 14 53 50 3 1 3 1 2 48 40 38 38 17 17 3 25 11 5 35 12 1 4 2 2 34 21 3 2 26 33 4 43 70 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 // SPDX-License-Identifier: GPL-2.0 #include <linux/fanotify.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/poll.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <linux/sched/signal.h> #include <linux/memcontrol.h> #include <linux/statfs.h> #include <linux/exportfs.h> #include <asm/ioctls.h> #include "../fsnotify.h" #include "../fdinfo.h" #include "fanotify.h" #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 #define FANOTIFY_OLD_DEFAULT_MAX_MARKS 8192 #define FANOTIFY_DEFAULT_MAX_GROUPS 128 #define FANOTIFY_DEFAULT_FEE_POOL_SIZE 32 /* * Legacy fanotify marks limits (8192) is per group and we introduced a tunable * limit of marks per user, similar to inotify. Effectively, the legacy limit * of fanotify marks per user is <max marks per group> * <max groups per user>. * This default limit (1M) also happens to match the increased limit of inotify * max_user_watches since v5.10. */ #define FANOTIFY_DEFAULT_MAX_USER_MARKS \ (FANOTIFY_OLD_DEFAULT_MAX_MARKS * FANOTIFY_DEFAULT_MAX_GROUPS) /* * Most of the memory cost of adding an inode mark is pinning the marked inode. * The size of the filesystem inode struct is not uniform across filesystems, * so double the size of a VFS inode is used as a conservative approximation. */ #define INODE_MARK_COST (2 * sizeof(struct inode)) /* configurable via /proc/sys/fs/fanotify/ */ static int fanotify_max_queued_events __read_mostly; #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static long ft_zero = 0; static long ft_int_max = INT_MAX; static struct ctl_table fanotify_table[] = { { .procname = "max_user_groups", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &ft_zero, .extra2 = &ft_int_max, }, { .procname = "max_user_marks", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &ft_zero, .extra2 = &ft_int_max, }, { .procname = "max_queued_events", .data = &fanotify_max_queued_events, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO }, }; static void __init fanotify_sysctls_init(void) { register_sysctl("fs/fanotify", fanotify_table); } #else #define fanotify_sysctls_init() do { } while (0) #endif /* CONFIG_SYSCTL */ /* * All flags that may be specified in parameter event_f_flags of fanotify_init. * * Internal and external open flags are stored together in field f_flags of * struct file. Only external open flags shall be allowed in event_f_flags. * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be * excluded. */ #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ O_ACCMODE | O_APPEND | O_NONBLOCK | \ __O_SYNC | O_DSYNC | O_CLOEXEC | \ O_LARGEFILE | O_NOATIME ) extern const struct fsnotify_ops fanotify_fsnotify_ops; struct kmem_cache *fanotify_mark_cache __ro_after_init; struct kmem_cache *fanotify_fid_event_cachep __ro_after_init; struct kmem_cache *fanotify_path_event_cachep __ro_after_init; struct kmem_cache *fanotify_perm_event_cachep __ro_after_init; #define FANOTIFY_EVENT_ALIGN 4 #define FANOTIFY_FID_INFO_HDR_LEN \ (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle)) #define FANOTIFY_PIDFD_INFO_HDR_LEN \ sizeof(struct fanotify_event_info_pidfd) #define FANOTIFY_ERROR_INFO_LEN \ (sizeof(struct fanotify_event_info_error)) static int fanotify_fid_info_len(int fh_len, int name_len) { int info_len = fh_len; if (name_len) info_len += name_len + 1; return roundup(FANOTIFY_FID_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN); } /* FAN_RENAME may have one or two dir+name info records */ static int fanotify_dir_name_info_len(struct fanotify_event *event) { struct fanotify_info *info = fanotify_event_info(event); int dir_fh_len = fanotify_event_dir_fh_len(event); int dir2_fh_len = fanotify_event_dir2_fh_len(event); int info_len = 0; if (dir_fh_len) info_len += fanotify_fid_info_len(dir_fh_len, info->name_len); if (dir2_fh_len) info_len += fanotify_fid_info_len(dir2_fh_len, info->name2_len); return info_len; } static size_t fanotify_event_len(unsigned int info_mode, struct fanotify_event *event) { size_t event_len = FAN_EVENT_METADATA_LEN; int fh_len; int dot_len = 0; if (!info_mode) return event_len; if (fanotify_is_error_event(event->mask)) event_len += FANOTIFY_ERROR_INFO_LEN; if (fanotify_event_has_any_dir_fh(event)) { event_len += fanotify_dir_name_info_len(event); } else if ((info_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_NAME, if name was not recorded in * event on a directory, we will report the name ".". */ dot_len = 1; } if (info_mode & FAN_REPORT_PIDFD) event_len += FANOTIFY_PIDFD_INFO_HDR_LEN; if (fanotify_event_has_object_fh(event)) { fh_len = fanotify_event_object_fh_len(event); event_len += fanotify_fid_info_len(fh_len, dot_len); } return event_len; } /* * Remove an hashed event from merge hash table. */ static void fanotify_unhash_event(struct fsnotify_group *group, struct fanotify_event *event) { assert_spin_locked(&group->notification_lock); pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, group, event, fanotify_event_hash_bucket(group, event)); if (WARN_ON_ONCE(hlist_unhashed(&event->merge_list))) return; hlist_del_init(&event->merge_list); } /* * Get an fanotify notification event if one exists and is small * enough to fit in "count". Return an error pointer if the count * is not large enough. When permission event is dequeued, its state is * updated accordingly. */ static struct fanotify_event *get_one_event(struct fsnotify_group *group, size_t count) { size_t event_size; struct fanotify_event *event = NULL; struct fsnotify_event *fsn_event; unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES); pr_debug("%s: group=%p count=%zd\n", __func__, group, count); spin_lock(&group->notification_lock); fsn_event = fsnotify_peek_first_event(group); if (!fsn_event) goto out; event = FANOTIFY_E(fsn_event); event_size = fanotify_event_len(info_mode, event); if (event_size > count) { event = ERR_PTR(-EINVAL); goto out; } /* * Held the notification_lock the whole time, so this is the * same event we peeked above. */ fsnotify_remove_first_event(group); if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->state = FAN_EVENT_REPORTED; if (fanotify_is_hashed_event(event->mask)) fanotify_unhash_event(group, event); out: spin_unlock(&group->notification_lock); return event; } static int create_fd(struct fsnotify_group *group, const struct path *path, struct file **file) { int client_fd; struct file *new_file; client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); if (client_fd < 0) return client_fd; /* * we need a new file handle for the userspace program so it can read even if it was * originally opened O_WRONLY. */ new_file = dentry_open(path, group->fanotify_data.f_flags | __FMODE_NONOTIFY, current_cred()); if (IS_ERR(new_file)) { put_unused_fd(client_fd); client_fd = PTR_ERR(new_file); } else { *file = new_file; } return client_fd; } static int process_access_response_info(const char __user *info, size_t info_len, struct fanotify_response_info_audit_rule *friar) { if (info_len != sizeof(*friar)) return -EINVAL; if (copy_from_user(friar, info, sizeof(*friar))) return -EFAULT; if (friar->hdr.type != FAN_RESPONSE_INFO_AUDIT_RULE) return -EINVAL; if (friar->hdr.pad != 0) return -EINVAL; if (friar->hdr.len != sizeof(*friar)) return -EINVAL; return info_len; } /* * Finish processing of permission event by setting it to ANSWERED state and * drop group->notification_lock. */ static void finish_permission_event(struct fsnotify_group *group, struct fanotify_perm_event *event, u32 response, struct fanotify_response_info_audit_rule *friar) __releases(&group->notification_lock) { bool destroy = false; assert_spin_locked(&group->notification_lock); event->response = response & ~FAN_INFO; if (response & FAN_INFO) memcpy(&event->audit_rule, friar, sizeof(*friar)); if (event->state == FAN_EVENT_CANCELED) destroy = true; else event->state = FAN_EVENT_ANSWERED; spin_unlock(&group->notification_lock); if (destroy) fsnotify_destroy_event(group, &event->fae.fse); } static int process_access_response(struct fsnotify_group *group, struct fanotify_response *response_struct, const char __user *info, size_t info_len) { struct fanotify_perm_event *event; int fd = response_struct->fd; u32 response = response_struct->response; int ret = info_len; struct fanotify_response_info_audit_rule friar; pr_debug("%s: group=%p fd=%d response=%u buf=%p size=%zu\n", __func__, group, fd, response, info, info_len); /* * make sure the response is valid, if invalid we do nothing and either * userspace can send a valid response or we will clean it up after the * timeout */ if (response & ~FANOTIFY_RESPONSE_VALID_MASK) return -EINVAL; switch (response & FANOTIFY_RESPONSE_ACCESS) { case FAN_ALLOW: case FAN_DENY: break; default: return -EINVAL; } if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT)) return -EINVAL; if (response & FAN_INFO) { ret = process_access_response_info(info, info_len, &friar); if (ret < 0) return ret; if (fd == FAN_NOFD) return ret; } else { ret = 0; } if (fd < 0) return -EINVAL; spin_lock(&group->notification_lock); list_for_each_entry(event, &group->fanotify_data.access_list, fae.fse.list) { if (event->fd != fd) continue; list_del_init(&event->fae.fse.list); finish_permission_event(group, event, response, &friar); wake_up(&group->fanotify_data.access_waitq); return ret; } spin_unlock(&group->notification_lock); return -ENOENT; } static size_t copy_error_info_to_user(struct fanotify_event *event, char __user *buf, int count) { struct fanotify_event_info_error info = { }; struct fanotify_error_event *fee = FANOTIFY_EE(event); info.hdr.info_type = FAN_EVENT_INFO_TYPE_ERROR; info.hdr.len = FANOTIFY_ERROR_INFO_LEN; if (WARN_ON(count < info.hdr.len)) return -EFAULT; info.error = fee->error; info.error_count = fee->err_count; if (copy_to_user(buf, &info, sizeof(info))) return -EFAULT; return info.hdr.len; } static int copy_fid_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, int info_type, const char *name, size_t name_len, char __user *buf, size_t count) { struct fanotify_event_info_fid info = { }; struct file_handle handle = { }; unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh_buf; size_t fh_len = fh ? fh->len : 0; size_t info_len = fanotify_fid_info_len(fh_len, name_len); size_t len = info_len; pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n", __func__, fh_len, name_len, info_len, count); if (WARN_ON_ONCE(len < sizeof(info) || len > count)) return -EFAULT; /* * Copy event info fid header followed by variable sized file handle * and optionally followed by variable sized filename. */ switch (info_type) { case FAN_EVENT_INFO_TYPE_FID: case FAN_EVENT_INFO_TYPE_DFID: if (WARN_ON_ONCE(name_len)) return -EFAULT; break; case FAN_EVENT_INFO_TYPE_DFID_NAME: case FAN_EVENT_INFO_TYPE_OLD_DFID_NAME: case FAN_EVENT_INFO_TYPE_NEW_DFID_NAME: if (WARN_ON_ONCE(!name || !name_len)) return -EFAULT; break; default: return -EFAULT; } info.hdr.info_type = info_type; info.hdr.len = len; info.fsid = *fsid; if (copy_to_user(buf, &info, sizeof(info))) return -EFAULT; buf += sizeof(info); len -= sizeof(info); if (WARN_ON_ONCE(len < sizeof(handle))) return -EFAULT; handle.handle_type = fh->type; handle.handle_bytes = fh_len; /* Mangle handle_type for bad file_handle */ if (!fh_len) handle.handle_type = FILEID_INVALID; if (copy_to_user(buf, &handle, sizeof(handle))) return -EFAULT; buf += sizeof(handle); len -= sizeof(handle); if (WARN_ON_ONCE(len < fh_len)) return -EFAULT; /* * For an inline fh and inline file name, copy through stack to exclude * the copy from usercopy hardening protections. */ fh_buf = fanotify_fh_buf(fh); if (fh_len <= FANOTIFY_INLINE_FH_LEN) { memcpy(bounce, fh_buf, fh_len); fh_buf = bounce; } if (copy_to_user(buf, fh_buf, fh_len)) return -EFAULT; buf += fh_len; len -= fh_len; if (name_len) { /* Copy the filename with terminating null */ name_len++; if (WARN_ON_ONCE(len < name_len)) return -EFAULT; if (copy_to_user(buf, name, name_len)) return -EFAULT; buf += name_len; len -= name_len; } /* Pad with 0's */ WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN); if (len > 0 && clear_user(buf, len)) return -EFAULT; return info_len; } static int copy_pidfd_info_to_user(int pidfd, char __user *buf, size_t count) { struct fanotify_event_info_pidfd info = { }; size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN; if (WARN_ON_ONCE(info_len > count)) return -EFAULT; info.hdr.info_type = FAN_EVENT_INFO_TYPE_PIDFD; info.hdr.len = info_len; info.pidfd = pidfd; if (copy_to_user(buf, &info, info_len)) return -EFAULT; return info_len; } static int copy_info_records_to_user(struct fanotify_event *event, struct fanotify_info *info, unsigned int info_mode, int pidfd, char __user *buf, size_t count) { int ret, total_bytes = 0, info_type = 0; unsigned int fid_mode = info_mode & FANOTIFY_FID_BITS; unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD; /* * Event info records order is as follows: * 1. dir fid + name * 2. (optional) new dir fid + new name * 3. (optional) child fid */ if (fanotify_event_has_dir_fh(event)) { info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : FAN_EVENT_INFO_TYPE_DFID; /* FAN_RENAME uses special info types */ if (event->mask & FAN_RENAME) info_type = FAN_EVENT_INFO_TYPE_OLD_DFID_NAME; ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_info_dir_fh(info), info_type, fanotify_info_name(info), info->name_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } /* New dir fid+name may be reported in addition to old dir fid+name */ if (fanotify_event_has_dir2_fh(event)) { info_type = FAN_EVENT_INFO_TYPE_NEW_DFID_NAME; ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_info_dir2_fh(info), info_type, fanotify_info_name2(info), info->name2_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (fanotify_event_has_object_fh(event)) { const char *dot = NULL; int dot_len = 0; if (fid_mode == FAN_REPORT_FID || info_type) { /* * With only group flag FAN_REPORT_FID only type FID is * reported. Second info record type is always FID. */ info_type = FAN_EVENT_INFO_TYPE_FID; } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_NAME, if name was not * recorded in an event on a directory, report the name * "." with info type DFID_NAME. */ info_type = FAN_EVENT_INFO_TYPE_DFID_NAME; dot = "."; dot_len = 1; } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) || (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_DIR_FID, a single info * record has type DFID for directory entry modification * event and for event on a directory. */ info_type = FAN_EVENT_INFO_TYPE_DFID; } else { /* * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID, * a single info record has type FID for event on a * non-directory, when there is no directory to report. * For example, on FAN_DELETE_SELF event. */ info_type = FAN_EVENT_INFO_TYPE_FID; } ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_event_object_fh(event), info_type, dot, dot_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (pidfd_mode) { ret = copy_pidfd_info_to_user(pidfd, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (fanotify_is_error_event(event->mask)) { ret = copy_error_info_to_user(event, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } return total_bytes; } static ssize_t copy_event_to_user(struct fsnotify_group *group, struct fanotify_event *event, char __user *buf, size_t count) { struct fanotify_event_metadata metadata; const struct path *path = fanotify_event_path(event); struct fanotify_info *info = fanotify_event_info(event); unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES); unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD; struct file *f = NULL, *pidfd_file = NULL; int ret, pidfd = -ESRCH, fd = -EBADF; pr_debug("%s: group=%p event=%p\n", __func__, group, event); metadata.event_len = fanotify_event_len(info_mode, event); metadata.metadata_len = FAN_EVENT_METADATA_LEN; metadata.vers = FANOTIFY_METADATA_VERSION; metadata.reserved = 0; metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS; metadata.pid = pid_vnr(event->pid); /* * For an unprivileged listener, event->pid can be used to identify the * events generated by the listener process itself, without disclosing * the pids of other processes. */ if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && task_tgid(current) != event->pid) metadata.pid = 0; /* * For now, fid mode is required for an unprivileged listener and * fid mode does not report fd in events. Keep this check anyway * for safety in case fid mode requirement is relaxed in the future * to allow unprivileged listener to get events with no fd and no fid. */ if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && path && path->mnt && path->dentry) { fd = create_fd(group, path, &f); /* * Opening an fd from dentry can fail for several reasons. * For example, when tasks are gone and we try to open their * /proc files or we try to open a WRONLY file like in sysfs * or when trying to open a file that was deleted on the * remote network server. * * For a group with FAN_REPORT_FD_ERROR, we will send the * event with the error instead of the open fd, otherwise * Userspace may not get the error at all. * In any case, userspace will not know which file failed to * open, so add a debug print for further investigation. */ if (fd < 0) { pr_debug("fanotify: create_fd(%pd2) failed err=%d\n", path->dentry, fd); if (!FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR)) { /* * Historically, we've handled EOPENSTALE in a * special way and silently dropped such * events. Now we have to keep it to maintain * backward compatibility... */ if (fd == -EOPENSTALE) fd = 0; return fd; } } } if (FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR)) metadata.fd = fd; else metadata.fd = fd >= 0 ? fd : FAN_NOFD; if (pidfd_mode) { /* * Complain if the FAN_REPORT_PIDFD and FAN_REPORT_TID mutual * exclusion is ever lifted. At the time of incoporating pidfd * support within fanotify, the pidfd API only supported the * creation of pidfds for thread-group leaders. */ WARN_ON_ONCE(FAN_GROUP_FLAG(group, FAN_REPORT_TID)); /* * The PIDTYPE_TGID check for an event->pid is performed * preemptively in an attempt to catch out cases where the event * listener reads events after the event generating process has * already terminated. Depending on flag FAN_REPORT_FD_ERROR, * report either -ESRCH or FAN_NOPIDFD to the event listener in * those cases with all other pidfd creation errors reported as * the error code itself or as FAN_EPIDFD. */ if (metadata.pid && pid_has_task(event->pid, PIDTYPE_TGID)) pidfd = pidfd_prepare(event->pid, 0, &pidfd_file); if (!FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR) && pidfd < 0) pidfd = pidfd == -ESRCH ? FAN_NOPIDFD : FAN_EPIDFD; } ret = -EFAULT; /* * Sanity check copy size in case get_one_event() and * event_len sizes ever get out of sync. */ if (WARN_ON_ONCE(metadata.event_len > count)) goto out_close_fd; if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN)) goto out_close_fd; buf += FAN_EVENT_METADATA_LEN; count -= FAN_EVENT_METADATA_LEN; if (info_mode) { ret = copy_info_records_to_user(event, info, info_mode, pidfd, buf, count); if (ret < 0) goto out_close_fd; } if (f) fd_install(fd, f); if (pidfd_file) fd_install(pidfd, pidfd_file); if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; return metadata.event_len; out_close_fd: if (f) { put_unused_fd(fd); fput(f); } if (pidfd_file) { put_unused_fd(pidfd); fput(pidfd_file); } return ret; } /* intofiy userspace file descriptor functions */ static __poll_t fanotify_poll(struct file *file, poll_table *wait) { struct fsnotify_group *group = file->private_data; __poll_t ret = 0; poll_wait(file, &group->notification_waitq, wait); spin_lock(&group->notification_lock); if (!fsnotify_notify_queue_is_empty(group)) ret = EPOLLIN | EPOLLRDNORM; spin_unlock(&group->notification_lock); return ret; } static ssize_t fanotify_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct fsnotify_group *group; struct fanotify_event *event; char __user *start; int ret; DEFINE_WAIT_FUNC(wait, woken_wake_function); start = buf; group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); add_wait_queue(&group->notification_waitq, &wait); while (1) { /* * User can supply arbitrarily large buffer. Avoid softlockups * in case there are lots of available events. */ cond_resched(); event = get_one_event(group, count); if (IS_ERR(event)) { ret = PTR_ERR(event); break; } if (!event) { ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; if (start != buf) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); continue; } ret = copy_event_to_user(group, event, buf, count); /* * Permission events get queued to wait for response. Other * events can be destroyed now. */ if (!fanotify_is_perm_event(event->mask)) { fsnotify_destroy_event(group, &event->fse); } else { if (ret <= 0 || FANOTIFY_PERM(event)->fd < 0) { spin_lock(&group->notification_lock); finish_permission_event(group, FANOTIFY_PERM(event), FAN_DENY, NULL); wake_up(&group->fanotify_data.access_waitq); } else { spin_lock(&group->notification_lock); list_add_tail(&event->fse.list, &group->fanotify_data.access_list); spin_unlock(&group->notification_lock); } } if (ret < 0) break; buf += ret; count -= ret; } remove_wait_queue(&group->notification_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; return ret; } static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct fanotify_response response; struct fsnotify_group *group; int ret; const char __user *info_buf = buf + sizeof(struct fanotify_response); size_t info_len; if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) return -EINVAL; group = file->private_data; pr_debug("%s: group=%p count=%zu\n", __func__, group, count); if (count < sizeof(response)) return -EINVAL; if (copy_from_user(&response, buf, sizeof(response))) return -EFAULT; info_len = count - sizeof(response); ret = process_access_response(group, &response, info_buf, info_len); if (ret < 0) count = ret; else count = sizeof(response) + ret; return count; } static int fanotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; struct fsnotify_event *fsn_event; /* * Stop new events from arriving in the notification queue. since * userspace cannot use fanotify fd anymore, no event can enter or * leave access_list by now either. */ fsnotify_group_stop_queueing(group); /* * Process all permission events on access_list and notification queue * and simulate reply from userspace. */ spin_lock(&group->notification_lock); while (!list_empty(&group->fanotify_data.access_list)) { struct fanotify_perm_event *event; event = list_first_entry(&group->fanotify_data.access_list, struct fanotify_perm_event, fae.fse.list); list_del_init(&event->fae.fse.list); finish_permission_event(group, event, FAN_ALLOW, NULL); spin_lock(&group->notification_lock); } /* * Destroy all non-permission events. For permission events just * dequeue them and set the response. They will be freed once the * response is consumed and fanotify_get_response() returns. */ while ((fsn_event = fsnotify_remove_first_event(group))) { struct fanotify_event *event = FANOTIFY_E(fsn_event); if (!(event->mask & FANOTIFY_PERM_EVENTS)) { spin_unlock(&group->notification_lock); fsnotify_destroy_event(group, fsn_event); } else { finish_permission_event(group, FANOTIFY_PERM(event), FAN_ALLOW, NULL); } spin_lock(&group->notification_lock); } spin_unlock(&group->notification_lock); /* Response for all permission events it set, wakeup waiters */ wake_up(&group->fanotify_data.access_waitq); /* matches the fanotify_init->fsnotify_alloc_group */ fsnotify_destroy_group(group); return 0; } static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fsnotify_group *group; struct fsnotify_event *fsn_event; void __user *p; int ret = -ENOTTY; size_t send_len = 0; group = file->private_data; p = (void __user *) arg; switch (cmd) { case FIONREAD: spin_lock(&group->notification_lock); list_for_each_entry(fsn_event, &group->notification_list, list) send_len += FAN_EVENT_METADATA_LEN; spin_unlock(&group->notification_lock); ret = put_user(send_len, (int __user *) p); break; } return ret; } static const struct file_operations fanotify_fops = { .show_fdinfo = fanotify_show_fdinfo, .poll = fanotify_poll, .read = fanotify_read, .write = fanotify_write, .fasync = NULL, .release = fanotify_release, .unlocked_ioctl = fanotify_ioctl, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; static int fanotify_find_path(int dfd, const char __user *filename, struct path *path, unsigned int flags, __u64 mask, unsigned int obj_type) { int ret; pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, dfd, filename, flags); if (filename == NULL) { CLASS(fd, f)(dfd); if (fd_empty(f)) return -EBADF; if ((flags & FAN_MARK_ONLYDIR) && !(S_ISDIR(file_inode(fd_file(f))->i_mode))) return -ENOTDIR; *path = fd_file(f)->f_path; path_get(path); } else { unsigned int lookup_flags = 0; if (!(flags & FAN_MARK_DONT_FOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flags & FAN_MARK_ONLYDIR) lookup_flags |= LOOKUP_DIRECTORY; ret = user_path_at(dfd, filename, lookup_flags, path); if (ret) goto out; } /* you can only watch an inode if you have read permissions on it */ ret = path_permission(path, MAY_READ); if (ret) { path_put(path); goto out; } ret = security_path_notify(path, mask, obj_type); if (ret) path_put(path); out: return ret; } static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags, __u32 umask, int *destroy) { __u32 oldmask, newmask; /* umask bits cannot be removed by user */ mask &= ~umask; spin_lock(&fsn_mark->lock); oldmask = fsnotify_calc_mask(fsn_mark); if (!(flags & FANOTIFY_MARK_IGNORE_BITS)) { fsn_mark->mask &= ~mask; } else { fsn_mark->ignore_mask &= ~mask; } newmask = fsnotify_calc_mask(fsn_mark); /* * We need to keep the mark around even if remaining mask cannot * result in any events (e.g. mask == FAN_ONDIR) to support incremenal * changes to the mask. * Destroy mark when only umask bits remain. */ *destroy = !((fsn_mark->mask | fsn_mark->ignore_mask) & ~umask); spin_unlock(&fsn_mark->lock); return oldmask & ~newmask; } static int fanotify_remove_mark(struct fsnotify_group *group, void *obj, unsigned int obj_type, __u32 mask, unsigned int flags, __u32 umask) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; int destroy_mark; fsnotify_group_lock(group); fsn_mark = fsnotify_find_mark(obj, obj_type, group); if (!fsn_mark) { fsnotify_group_unlock(group); return -ENOENT; } removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, umask, &destroy_mark); if (removed & fsnotify_conn_mask(fsn_mark->connector)) fsnotify_recalc_mask(fsn_mark->connector); if (destroy_mark) fsnotify_detach_mark(fsn_mark); fsnotify_group_unlock(group); if (destroy_mark) fsnotify_free_mark(fsn_mark); /* matches the fsnotify_find_mark() */ fsnotify_put_mark(fsn_mark); return 0; } static bool fanotify_mark_update_flags(struct fsnotify_mark *fsn_mark, unsigned int fan_flags) { bool want_iref = !(fan_flags & FAN_MARK_EVICTABLE); unsigned int ignore = fan_flags & FANOTIFY_MARK_IGNORE_BITS; bool recalc = false; /* * When using FAN_MARK_IGNORE for the first time, mark starts using * independent event flags in ignore mask. After that, trying to * update the ignore mask with the old FAN_MARK_IGNORED_MASK API * will result in EEXIST error. */ if (ignore == FAN_MARK_IGNORE) fsn_mark->flags |= FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS; /* * Setting FAN_MARK_IGNORED_SURV_MODIFY for the first time may lead to * the removal of the FS_MODIFY bit in calculated mask if it was set * because of an ignore mask that is now going to survive FS_MODIFY. */ if (ignore && (fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) && !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) { fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; if (!(fsn_mark->mask & FS_MODIFY)) recalc = true; } if (fsn_mark->connector->type != FSNOTIFY_OBJ_TYPE_INODE || want_iref == !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) return recalc; /* * NO_IREF may be removed from a mark, but not added. * When removed, fsnotify_recalc_mask() will take the inode ref. */ WARN_ON_ONCE(!want_iref); fsn_mark->flags &= ~FSNOTIFY_MARK_FLAG_NO_IREF; return true; } static bool fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int fan_flags) { bool recalc; spin_lock(&fsn_mark->lock); if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS)) fsn_mark->mask |= mask; else fsn_mark->ignore_mask |= mask; recalc = fsnotify_calc_mask(fsn_mark) & ~fsnotify_conn_mask(fsn_mark->connector); recalc |= fanotify_mark_update_flags(fsn_mark, fan_flags); spin_unlock(&fsn_mark->lock); return recalc; } struct fan_fsid { struct super_block *sb; __kernel_fsid_t id; bool weak; }; static int fanotify_set_mark_fsid(struct fsnotify_group *group, struct fsnotify_mark *mark, struct fan_fsid *fsid) { struct fsnotify_mark_connector *conn; struct fsnotify_mark *old; struct super_block *old_sb = NULL; FANOTIFY_MARK(mark)->fsid = fsid->id; mark->flags |= FSNOTIFY_MARK_FLAG_HAS_FSID; if (fsid->weak) mark->flags |= FSNOTIFY_MARK_FLAG_WEAK_FSID; /* First mark added will determine if group is single or multi fsid */ if (list_empty(&group->marks_list)) return 0; /* Find sb of an existing mark */ list_for_each_entry(old, &group->marks_list, g_list) { conn = READ_ONCE(old->connector); if (!conn) continue; old_sb = fsnotify_connector_sb(conn); if (old_sb) break; } /* Only detached marks left? */ if (!old_sb) return 0; /* Do not allow mixing of marks with weak and strong fsid */ if ((mark->flags ^ old->flags) & FSNOTIFY_MARK_FLAG_WEAK_FSID) return -EXDEV; /* Allow mixing of marks with strong fsid from different fs */ if (!fsid->weak) return 0; /* Do not allow mixing marks with weak fsid from different fs */ if (old_sb != fsid->sb) return -EXDEV; /* Do not allow mixing marks from different btrfs sub-volumes */ if (!fanotify_fsid_equal(&FANOTIFY_MARK(old)->fsid, &FANOTIFY_MARK(mark)->fsid)) return -EXDEV; return 0; } static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, void *obj, unsigned int obj_type, unsigned int fan_flags, struct fan_fsid *fsid) { struct ucounts *ucounts = group->fanotify_data.ucounts; struct fanotify_mark *fan_mark; struct fsnotify_mark *mark; int ret; /* * Enforce per user marks limits per user in all containing user ns. * A group with FAN_UNLIMITED_MARKS does not contribute to mark count * in the limited groups account. */ if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS) && !inc_ucount(ucounts->ns, ucounts->uid, UCOUNT_FANOTIFY_MARKS)) return ERR_PTR(-ENOSPC); fan_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); if (!fan_mark) { ret = -ENOMEM; goto out_dec_ucounts; } mark = &fan_mark->fsn_mark; fsnotify_init_mark(mark, group); if (fan_flags & FAN_MARK_EVICTABLE) mark->flags |= FSNOTIFY_MARK_FLAG_NO_IREF; /* Cache fsid of filesystem containing the marked object */ if (fsid) { ret = fanotify_set_mark_fsid(group, mark, fsid); if (ret) goto out_put_mark; } else { fan_mark->fsid.val[0] = fan_mark->fsid.val[1] = 0; } ret = fsnotify_add_mark_locked(mark, obj, obj_type, 0); if (ret) goto out_put_mark; return mark; out_put_mark: fsnotify_put_mark(mark); out_dec_ucounts: if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS)) dec_ucount(ucounts, UCOUNT_FANOTIFY_MARKS); return ERR_PTR(ret); } static int fanotify_group_init_error_pool(struct fsnotify_group *group) { if (mempool_initialized(&group->fanotify_data.error_events_pool)) return 0; return mempool_init_kmalloc_pool(&group->fanotify_data.error_events_pool, FANOTIFY_DEFAULT_FEE_POOL_SIZE, sizeof(struct fanotify_error_event)); } static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark, unsigned int fan_flags) { /* * Non evictable mark cannot be downgraded to evictable mark. */ if (fan_flags & FAN_MARK_EVICTABLE && !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) return -EEXIST; /* * New ignore mask semantics cannot be downgraded to old semantics. */ if (fan_flags & FAN_MARK_IGNORED_MASK && fsn_mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS) return -EEXIST; /* * An ignore mask that survives modify could never be downgraded to not * survive modify. With new FAN_MARK_IGNORE semantics we make that rule * explicit and return an error when trying to update the ignore mask * without the original FAN_MARK_IGNORED_SURV_MODIFY value. */ if (fan_flags & FAN_MARK_IGNORE && !(fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) && fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) return -EEXIST; return 0; } static int fanotify_add_mark(struct fsnotify_group *group, void *obj, unsigned int obj_type, __u32 mask, unsigned int fan_flags, struct fan_fsid *fsid) { struct fsnotify_mark *fsn_mark; bool recalc; int ret = 0; fsnotify_group_lock(group); fsn_mark = fsnotify_find_mark(obj, obj_type, group); if (!fsn_mark) { fsn_mark = fanotify_add_new_mark(group, obj, obj_type, fan_flags, fsid); if (IS_ERR(fsn_mark)) { fsnotify_group_unlock(group); return PTR_ERR(fsn_mark); } } /* * Check if requested mark flags conflict with an existing mark flags. */ ret = fanotify_may_update_existing_mark(fsn_mark, fan_flags); if (ret) goto out; /* * Error events are pre-allocated per group, only if strictly * needed (i.e. FAN_FS_ERROR was requested). */ if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS) && (mask & FAN_FS_ERROR)) { ret = fanotify_group_init_error_pool(group); if (ret) goto out; } recalc = fanotify_mark_add_to_mask(fsn_mark, mask, fan_flags); if (recalc) fsnotify_recalc_mask(fsn_mark->connector); out: fsnotify_group_unlock(group); fsnotify_put_mark(fsn_mark); return ret; } static struct fsnotify_event *fanotify_alloc_overflow_event(void) { struct fanotify_event *oevent; oevent = kmalloc(sizeof(*oevent), GFP_KERNEL_ACCOUNT); if (!oevent) return NULL; fanotify_init_event(oevent, 0, FS_Q_OVERFLOW); oevent->type = FANOTIFY_EVENT_TYPE_OVERFLOW; return &oevent->fse; } static struct hlist_head *fanotify_alloc_merge_hash(void) { struct hlist_head *hash; hash = kmalloc(sizeof(struct hlist_head) << FANOTIFY_HTABLE_BITS, GFP_KERNEL_ACCOUNT); if (!hash) return NULL; __hash_init(hash, FANOTIFY_HTABLE_SIZE); return hash; } /* fanotify syscalls */ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) { struct fsnotify_group *group; int f_flags, fd; unsigned int fid_mode = flags & FANOTIFY_FID_BITS; unsigned int class = flags & FANOTIFY_CLASS_BITS; unsigned int internal_flags = 0; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); if (!capable(CAP_SYS_ADMIN)) { /* * An unprivileged user can setup an fanotify group with * limited functionality - an unprivileged group is limited to * notification events with file handles and it cannot use * unlimited queue/marks. */ if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode) return -EPERM; /* * Setting the internal flag FANOTIFY_UNPRIV on the group * prevents setting mount/filesystem marks on this group and * prevents reporting pid and open fd in events. */ internal_flags |= FANOTIFY_UNPRIV; } #ifdef CONFIG_AUDITSYSCALL if (flags & ~(FANOTIFY_INIT_FLAGS | FAN_ENABLE_AUDIT)) #else if (flags & ~FANOTIFY_INIT_FLAGS) #endif return -EINVAL; /* * A pidfd can only be returned for a thread-group leader; thus * FAN_REPORT_PIDFD and FAN_REPORT_TID need to remain mutually * exclusive. */ if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID)) return -EINVAL; if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) return -EINVAL; switch (event_f_flags & O_ACCMODE) { case O_RDONLY: case O_RDWR: case O_WRONLY: break; default: return -EINVAL; } if (fid_mode && class != FAN_CLASS_NOTIF) return -EINVAL; /* * Child name is reported with parent fid so requires dir fid. * We can report both child fid and dir fid with or without name. */ if ((fid_mode & FAN_REPORT_NAME) && !(fid_mode & FAN_REPORT_DIR_FID)) return -EINVAL; /* * FAN_REPORT_TARGET_FID requires FAN_REPORT_NAME and FAN_REPORT_FID * and is used as an indication to report both dir and child fid on all * dirent events. */ if ((fid_mode & FAN_REPORT_TARGET_FID) && (!(fid_mode & FAN_REPORT_NAME) || !(fid_mode & FAN_REPORT_FID))) return -EINVAL; f_flags = O_RDWR | __FMODE_NONOTIFY; if (flags & FAN_CLOEXEC) f_flags |= O_CLOEXEC; if (flags & FAN_NONBLOCK) f_flags |= O_NONBLOCK; /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ group = fsnotify_alloc_group(&fanotify_fsnotify_ops, FSNOTIFY_GROUP_USER); if (IS_ERR(group)) { return PTR_ERR(group); } /* Enforce groups limits per user in all containing user ns */ group->fanotify_data.ucounts = inc_ucount(current_user_ns(), current_euid(), UCOUNT_FANOTIFY_GROUPS); if (!group->fanotify_data.ucounts) { fd = -EMFILE; goto out_destroy_group; } group->fanotify_data.flags = flags | internal_flags; group->memcg = get_mem_cgroup_from_mm(current->mm); group->fanotify_data.merge_hash = fanotify_alloc_merge_hash(); if (!group->fanotify_data.merge_hash) { fd = -ENOMEM; goto out_destroy_group; } group->overflow_event = fanotify_alloc_overflow_event(); if (unlikely(!group->overflow_event)) { fd = -ENOMEM; goto out_destroy_group; } if (force_o_largefile()) event_f_flags |= O_LARGEFILE; group->fanotify_data.f_flags = event_f_flags; init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); switch (class) { case FAN_CLASS_NOTIF: group->priority = FSNOTIFY_PRIO_NORMAL; break; case FAN_CLASS_CONTENT: group->priority = FSNOTIFY_PRIO_CONTENT; break; case FAN_CLASS_PRE_CONTENT: group->priority = FSNOTIFY_PRIO_PRE_CONTENT; break; default: fd = -EINVAL; goto out_destroy_group; } if (flags & FAN_UNLIMITED_QUEUE) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; group->max_events = UINT_MAX; } else { group->max_events = fanotify_max_queued_events; } if (flags & FAN_UNLIMITED_MARKS) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; } if (flags & FAN_ENABLE_AUDIT) { fd = -EPERM; if (!capable(CAP_AUDIT_WRITE)) goto out_destroy_group; } fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); if (fd < 0) goto out_destroy_group; return fd; out_destroy_group: fsnotify_destroy_group(group); return fd; } static int fanotify_test_fsid(struct dentry *dentry, unsigned int flags, struct fan_fsid *fsid) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; __kernel_fsid_t root_fsid; int err; /* * Make sure dentry is not of a filesystem with zero fsid (e.g. fuse). */ err = vfs_get_fsid(dentry, &fsid->id); if (err) return err; fsid->sb = dentry->d_sb; if (!fsid->id.val[0] && !fsid->id.val[1]) { err = -ENODEV; goto weak; } /* * Make sure dentry is not of a filesystem subvolume (e.g. btrfs) * which uses a different fsid than sb root. */ err = vfs_get_fsid(dentry->d_sb->s_root, &root_fsid); if (err) return err; if (!fanotify_fsid_equal(&root_fsid, &fsid->id)) { err = -EXDEV; goto weak; } fsid->weak = false; return 0; weak: /* Allow weak fsid when marking inodes */ fsid->weak = true; return (mark_type == FAN_MARK_INODE) ? 0 : err; } /* Check if filesystem can encode a unique fid */ static int fanotify_test_fid(struct dentry *dentry, unsigned int flags) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; const struct export_operations *nop = dentry->d_sb->s_export_op; /* * We need to make sure that the filesystem supports encoding of * file handles so user can use name_to_handle_at() to compare fids * reported with events to the file handle of watched objects. */ if (!exportfs_can_encode_fid(nop)) return -EOPNOTSUPP; /* * For sb/mount mark, we also need to make sure that the filesystem * supports decoding file handles, so user has a way to map back the * reported fids to filesystem objects. */ if (mark_type != FAN_MARK_INODE && !exportfs_can_decode_fh(nop)) return -EOPNOTSUPP; return 0; } static int fanotify_events_supported(struct fsnotify_group *group, const struct path *path, __u64 mask, unsigned int flags) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; /* Strict validation of events in non-dir inode mask with v5.17+ APIs */ bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) || (mask & FAN_RENAME) || (flags & FAN_MARK_IGNORE); /* * Some filesystems such as 'proc' acquire unusual locks when opening * files. For them fanotify permission events have high chances of * deadlocking the system - open done when reporting fanotify event * blocks on this "unusual" lock while another process holding the lock * waits for fanotify permission event to be answered. Just disallow * permission events for such filesystems. */ if (mask & FANOTIFY_PERM_EVENTS && path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM) return -EINVAL; /* * mount and sb marks are not allowed on kernel internal pseudo fs, * like pipe_mnt, because that would subscribe to events on all the * anonynous pipes in the system. * * SB_NOUSER covers all of the internal pseudo fs whose objects are not * exposed to user's mount namespace, but there are other SB_KERNMOUNT * fs, like nsfs, debugfs, for which the value of allowing sb and mount * mark is questionable. For now we leave them alone. */ if (mark_type != FAN_MARK_INODE && path->mnt->mnt_sb->s_flags & SB_NOUSER) return -EINVAL; /* * We shouldn't have allowed setting dirent events and the directory * flags FAN_ONDIR and FAN_EVENT_ON_CHILD in mask of non-dir inode, * but because we always allowed it, error only when using new APIs. */ if (strict_dir_events && mark_type == FAN_MARK_INODE && !d_is_dir(path->dentry) && (mask & FANOTIFY_DIRONLY_EVENT_BITS)) return -ENOTDIR; return 0; } static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, int dfd, const char __user *pathname) { struct inode *inode = NULL; struct vfsmount *mnt = NULL; struct fsnotify_group *group; struct path path; struct fan_fsid __fsid, *fsid = NULL; u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS; unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; unsigned int mark_cmd = flags & FANOTIFY_MARK_CMD_BITS; unsigned int ignore = flags & FANOTIFY_MARK_IGNORE_BITS; unsigned int obj_type, fid_mode; void *obj; u32 umask = 0; int ret; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", __func__, fanotify_fd, flags, dfd, pathname, mask); /* we only use the lower 32 bits as of right now. */ if (upper_32_bits(mask)) return -EINVAL; if (flags & ~FANOTIFY_MARK_FLAGS) return -EINVAL; switch (mark_type) { case FAN_MARK_INODE: obj_type = FSNOTIFY_OBJ_TYPE_INODE; break; case FAN_MARK_MOUNT: obj_type = FSNOTIFY_OBJ_TYPE_VFSMOUNT; break; case FAN_MARK_FILESYSTEM: obj_type = FSNOTIFY_OBJ_TYPE_SB; break; default: return -EINVAL; } switch (mark_cmd) { case FAN_MARK_ADD: case FAN_MARK_REMOVE: if (!mask) return -EINVAL; break; case FAN_MARK_FLUSH: if (flags & ~(FANOTIFY_MARK_TYPE_BITS | FAN_MARK_FLUSH)) return -EINVAL; break; default: return -EINVAL; } if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) valid_mask |= FANOTIFY_PERM_EVENTS; if (mask & ~valid_mask) return -EINVAL; /* We don't allow FAN_MARK_IGNORE & FAN_MARK_IGNORED_MASK together */ if (ignore == (FAN_MARK_IGNORE | FAN_MARK_IGNORED_MASK)) return -EINVAL; /* * Event flags (FAN_ONDIR, FAN_EVENT_ON_CHILD) have no effect with * FAN_MARK_IGNORED_MASK. */ if (ignore == FAN_MARK_IGNORED_MASK) { mask &= ~FANOTIFY_EVENT_FLAGS; umask = FANOTIFY_EVENT_FLAGS; } CLASS(fd, f)(fanotify_fd); if (fd_empty(f)) return -EBADF; /* verify that this is indeed an fanotify instance */ if (unlikely(fd_file(f)->f_op != &fanotify_fops)) return -EINVAL; group = fd_file(f)->private_data; /* * An unprivileged user is not allowed to setup mount nor filesystem * marks. This also includes setting up such marks by a group that * was initialized by an unprivileged user. */ if ((!capable(CAP_SYS_ADMIN) || FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) && mark_type != FAN_MARK_INODE) return -EPERM; /* * Permission events require minimum priority FAN_CLASS_CONTENT. */ if (mask & FANOTIFY_PERM_EVENTS && group->priority < FSNOTIFY_PRIO_CONTENT) return -EINVAL; if (mask & FAN_FS_ERROR && mark_type != FAN_MARK_FILESYSTEM) return -EINVAL; /* * Evictable is only relevant for inode marks, because only inode object * can be evicted on memory pressure. */ if (flags & FAN_MARK_EVICTABLE && mark_type != FAN_MARK_INODE) return -EINVAL; /* * Events that do not carry enough information to report * event->fd require a group that supports reporting fid. Those * events are not supported on a mount mark, because they do not * carry enough information (i.e. path) to be filtered by mount * point. */ fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); if (mask & ~(FANOTIFY_FD_EVENTS|FANOTIFY_EVENT_FLAGS) && (!fid_mode || mark_type == FAN_MARK_MOUNT)) return -EINVAL; /* * FAN_RENAME uses special info type records to report the old and * new parent+name. Reporting only old and new parent id is less * useful and was not implemented. */ if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME)) return -EINVAL; if (mark_cmd == FAN_MARK_FLUSH) { if (mark_type == FAN_MARK_MOUNT) fsnotify_clear_vfsmount_marks_by_group(group); else if (mark_type == FAN_MARK_FILESYSTEM) fsnotify_clear_sb_marks_by_group(group); else fsnotify_clear_inode_marks_by_group(group); return 0; } ret = fanotify_find_path(dfd, pathname, &path, flags, (mask & ALL_FSNOTIFY_EVENTS), obj_type); if (ret) return ret; if (mark_cmd == FAN_MARK_ADD) { ret = fanotify_events_supported(group, &path, mask, flags); if (ret) goto path_put_and_out; } if (fid_mode) { ret = fanotify_test_fsid(path.dentry, flags, &__fsid); if (ret) goto path_put_and_out; ret = fanotify_test_fid(path.dentry, flags); if (ret) goto path_put_and_out; fsid = &__fsid; } /* inode held in place by reference to path; group by fget on fd */ if (mark_type == FAN_MARK_INODE) { inode = path.dentry->d_inode; obj = inode; } else { mnt = path.mnt; if (mark_type == FAN_MARK_MOUNT) obj = mnt; else obj = mnt->mnt_sb; } /* * If some other task has this inode open for write we should not add * an ignore mask, unless that ignore mask is supposed to survive * modification changes anyway. */ if (mark_cmd == FAN_MARK_ADD && (flags & FANOTIFY_MARK_IGNORE_BITS) && !(flags & FAN_MARK_IGNORED_SURV_MODIFY)) { ret = mnt ? -EINVAL : -EISDIR; /* FAN_MARK_IGNORE requires SURV_MODIFY for sb/mount/dir marks */ if (ignore == FAN_MARK_IGNORE && (mnt || S_ISDIR(inode->i_mode))) goto path_put_and_out; ret = 0; if (inode && inode_is_open_for_write(inode)) goto path_put_and_out; } /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */ if (mnt || !S_ISDIR(inode->i_mode)) { mask &= ~FAN_EVENT_ON_CHILD; umask = FAN_EVENT_ON_CHILD; /* * If group needs to report parent fid, register for getting * events with parent/name info for non-directory. */ if ((fid_mode & FAN_REPORT_DIR_FID) && (flags & FAN_MARK_ADD) && !ignore) mask |= FAN_EVENT_ON_CHILD; } /* create/update an inode mark */ switch (mark_cmd) { case FAN_MARK_ADD: ret = fanotify_add_mark(group, obj, obj_type, mask, flags, fsid); break; case FAN_MARK_REMOVE: ret = fanotify_remove_mark(group, obj, obj_type, mask, flags, umask); break; default: ret = -EINVAL; } path_put_and_out: path_put(&path); return ret; } #ifndef CONFIG_ARCH_SPLIT_ARG64 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, __u64, mask, int, dfd, const char __user *, pathname) { return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); } #endif #if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT) SYSCALL32_DEFINE6(fanotify_mark, int, fanotify_fd, unsigned int, flags, SC_ARG64(mask), int, dfd, const char __user *, pathname) { return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask), dfd, pathname); } #endif /* * fanotify_user_setup - Our initialization function. Note that we cannot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here * must result in panic(). */ static int __init fanotify_user_setup(void) { struct sysinfo si; int max_marks; si_meminfo(&si); /* * Allow up to 1% of addressable memory to be accounted for per user * marks limited to the range [8192, 1048576]. mount and sb marks are * a lot cheaper than inode marks, but there is no reason for a user * to have many of those, so calculate by the cost of inode marks. */ max_marks = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) / INODE_MARK_COST; max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS, FANOTIFY_DEFAULT_MAX_USER_MARKS); BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 13); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 11); fanotify_mark_cache = KMEM_CACHE(fanotify_mark, SLAB_PANIC|SLAB_ACCOUNT); fanotify_fid_event_cachep = KMEM_CACHE(fanotify_fid_event, SLAB_PANIC); fanotify_path_event_cachep = KMEM_CACHE(fanotify_path_event, SLAB_PANIC); if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) { fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event, SLAB_PANIC); } fanotify_max_queued_events = FANOTIFY_DEFAULT_MAX_EVENTS; init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] = FANOTIFY_DEFAULT_MAX_GROUPS; init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS] = max_marks; fanotify_sysctls_init(); return 0; } device_initcall(fanotify_user_setup);
1444 357 357 354 44 2579 2579 357 2254 2251 2579 2578 2579 44 854 357 1659 1650 1649 1661 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 // SPDX-License-Identifier: GPL-2.0-only /* * fs/anon_inodes.c * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * * Thanks to Arnd Bergmann for code review and suggestions. * More changes for Thomas Gleixner suggestions. * */ #include <linux/cred.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/magic.h> #include <linux/anon_inodes.h> #include <linux/pseudo_fs.h> #include <linux/uaccess.h> static struct vfsmount *anon_inode_mnt __ro_after_init; static struct inode *anon_inode_inode __ro_after_init; /* * anon_inodefs_dname() is called from d_path(). */ static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(buffer, buflen, "anon_inode:%s", dentry->d_name.name); } static const struct dentry_operations anon_inodefs_dentry_operations = { .d_dname = anon_inodefs_dname, }; static int anon_inodefs_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, ANON_INODE_FS_MAGIC); if (!ctx) return -ENOMEM; ctx->dops = &anon_inodefs_dentry_operations; return 0; } static struct file_system_type anon_inode_fs_type = { .name = "anon_inodefs", .init_fs_context = anon_inodefs_init_fs_context, .kill_sb = kill_anon_super, }; static struct inode *anon_inode_make_secure_inode( const char *name, const struct inode *context_inode) { struct inode *inode; const struct qstr qname = QSTR_INIT(name, strlen(name)); int error; inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); if (IS_ERR(inode)) return inode; inode->i_flags &= ~S_PRIVATE; error = security_inode_init_security_anon(inode, &qname, context_inode); if (error) { iput(inode); return ERR_PTR(error); } return inode; } static struct file *__anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode, bool make_inode) { struct inode *inode; struct file *file; if (fops->owner && !try_module_get(fops->owner)) return ERR_PTR(-ENOENT); if (make_inode) { inode = anon_inode_make_secure_inode(name, context_inode); if (IS_ERR(inode)) { file = ERR_CAST(inode); goto err; } } else { inode = anon_inode_inode; if (IS_ERR(inode)) { file = ERR_PTR(-ENODEV); goto err; } /* * We know the anon_inode inode count is always * greater than zero, so ihold() is safe. */ ihold(inode); } file = alloc_file_pseudo(inode, anon_inode_mnt, name, flags & (O_ACCMODE | O_NONBLOCK), fops); if (IS_ERR(file)) goto err_iput; file->f_mapping = inode->i_mapping; file->private_data = priv; return file; err_iput: iput(inode); err: module_put(fops->owner); return file; } /** * anon_inode_getfile - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry * setup. Returns the newly created file* or an error pointer. */ struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags) { return __anon_inode_getfile(name, fops, priv, flags, NULL, false); } EXPORT_SYMBOL_GPL(anon_inode_getfile); /** * anon_inode_getfile_fmode - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @f_mode: [in] fmode * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry * setup. Allows setting the fmode. Returns the newly created file* or an error * pointer. */ struct file *anon_inode_getfile_fmode(const char *name, const struct file_operations *fops, void *priv, int flags, fmode_t f_mode) { struct file *file; file = __anon_inode_getfile(name, fops, priv, flags, NULL, false); if (!IS_ERR(file)) file->f_mode |= f_mode; return file; } EXPORT_SYMBOL_GPL(anon_inode_getfile_fmode); /** * anon_inode_create_getfile - Like anon_inode_getfile(), but creates a new * !S_PRIVATE anon inode rather than reuse the * singleton anon inode and calls the * inode_init_security_anon() LSM hook. * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @context_inode: * [in] the logical relationship with the new inode (optional) * * Create a new anonymous inode and file pair. This can be done for two * reasons: * * - for the inode to have its own security context, so that LSMs can enforce * policy on the inode's creation; * * - if the caller needs a unique inode, for example in order to customize * the size returned by fstat() * * The LSM may use @context_inode in inode_init_security_anon(), but a * reference to it is not held. * * Returns the newly created file* or an error pointer. */ struct file *anon_inode_create_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode) { return __anon_inode_getfile(name, fops, priv, flags, context_inode, true); } EXPORT_SYMBOL_GPL(anon_inode_create_getfile); static int __anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode, bool make_inode) { int error, fd; struct file *file; error = get_unused_fd_flags(flags); if (error < 0) return error; fd = error; file = __anon_inode_getfile(name, fops, priv, flags, context_inode, make_inode); if (IS_ERR(file)) { error = PTR_ERR(file); goto err_put_unused_fd; } fd_install(fd, file); return fd; err_put_unused_fd: put_unused_fd(fd); return error; } /** * anon_inode_getfd - creates a new file instance by hooking it up to * an anonymous inode and a dentry that describe * the "class" of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * * Creates a new file by hooking it on a single inode. This is * useful for files that do not need to have a full-fledged inode in * order to operate correctly. All the files created with * anon_inode_getfd() will use the same singleton inode, reducing * memory use and avoiding code duplication for the file/inode/dentry * setup. Returns a newly created file descriptor or an error code. */ int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags) { return __anon_inode_getfd(name, fops, priv, flags, NULL, false); } EXPORT_SYMBOL_GPL(anon_inode_getfd); /** * anon_inode_create_getfd - Like anon_inode_getfd(), but creates a new * !S_PRIVATE anon inode rather than reuse the singleton anon inode, and calls * the inode_init_security_anon() LSM hook. * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @context_inode: * [in] the logical relationship with the new inode (optional) * * Create a new anonymous inode and file pair. This can be done for two * reasons: * * - for the inode to have its own security context, so that LSMs can enforce * policy on the inode's creation; * * - if the caller needs a unique inode, for example in order to customize * the size returned by fstat() * * The LSM may use @context_inode in inode_init_security_anon(), but a * reference to it is not held. * * Returns a newly created file descriptor or an error code. */ int anon_inode_create_getfd(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode) { return __anon_inode_getfd(name, fops, priv, flags, context_inode, true); } static int __init anon_inode_init(void) { anon_inode_mnt = kern_mount(&anon_inode_fs_type); if (IS_ERR(anon_inode_mnt)) panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt)); anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); if (IS_ERR(anon_inode_inode)) panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode)); return 0; } fs_initcall(anon_inode_init);
7 7 7 7 7 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 3 3 3 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 // SPDX-License-Identifier: GPL-2.0-or-later /* xfrm4_protocol.c - Generic xfrm protocol multiplexer. * * Copyright (C) 2013 secunet Security Networks AG * * Author: * Steffen Klassert <steffen.klassert@secunet.com> * * Based on: * net/ipv4/tunnel4.c */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/skbuff.h> #include <net/icmp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/xfrm.h> static struct xfrm4_protocol __rcu *esp4_handlers __read_mostly; static struct xfrm4_protocol __rcu *ah4_handlers __read_mostly; static struct xfrm4_protocol __rcu *ipcomp4_handlers __read_mostly; static DEFINE_MUTEX(xfrm4_protocol_mutex); static inline struct xfrm4_protocol __rcu **proto_handlers(u8 protocol) { switch (protocol) { case IPPROTO_ESP: return &esp4_handlers; case IPPROTO_AH: return &ah4_handlers; case IPPROTO_COMP: return &ipcomp4_handlers; } return NULL; } #define for_each_protocol_rcu(head, handler) \ for (handler = rcu_dereference(head); \ handler != NULL; \ handler = rcu_dereference(handler->next)) \ static int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err) { int ret; struct xfrm4_protocol *handler; struct xfrm4_protocol __rcu **head = proto_handlers(protocol); if (!head) return 0; for_each_protocol_rcu(*head, handler) if ((ret = handler->cb_handler(skb, err)) <= 0) return ret; return 0; } int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { int ret; struct xfrm4_protocol *handler; struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr); XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; XFRM_SPI_SKB_CB(skb)->family = AF_INET; XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); if (!head) goto out; if (!skb_dst(skb)) { const struct iphdr *iph = ip_hdr(skb); if (ip_route_input_noref(skb, iph->daddr, iph->saddr, ip4h_dscp(iph), skb->dev)) goto drop; } for_each_protocol_rcu(*head, handler) if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) return ret; out: icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } EXPORT_SYMBOL(xfrm4_rcv_encap); static int xfrm4_esp_rcv(struct sk_buff *skb) { int ret; struct xfrm4_protocol *handler; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; for_each_protocol_rcu(esp4_handlers, handler) if ((ret = handler->handler(skb)) != -EINVAL) return ret; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); kfree_skb(skb); return 0; } static int xfrm4_esp_err(struct sk_buff *skb, u32 info) { struct xfrm4_protocol *handler; for_each_protocol_rcu(esp4_handlers, handler) if (!handler->err_handler(skb, info)) return 0; return -ENOENT; } static int xfrm4_ah_rcv(struct sk_buff *skb) { int ret; struct xfrm4_protocol *handler; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; for_each_protocol_rcu(ah4_handlers, handler) if ((ret = handler->handler(skb)) != -EINVAL) return ret; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); kfree_skb(skb); return 0; } static int xfrm4_ah_err(struct sk_buff *skb, u32 info) { struct xfrm4_protocol *handler; for_each_protocol_rcu(ah4_handlers, handler) if (!handler->err_handler(skb, info)) return 0; return -ENOENT; } static int xfrm4_ipcomp_rcv(struct sk_buff *skb) { int ret; struct xfrm4_protocol *handler; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; for_each_protocol_rcu(ipcomp4_handlers, handler) if ((ret = handler->handler(skb)) != -EINVAL) return ret; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); kfree_skb(skb); return 0; } static int xfrm4_ipcomp_err(struct sk_buff *skb, u32 info) { struct xfrm4_protocol *handler; for_each_protocol_rcu(ipcomp4_handlers, handler) if (!handler->err_handler(skb, info)) return 0; return -ENOENT; } static const struct net_protocol esp4_protocol = { .handler = xfrm4_esp_rcv, .err_handler = xfrm4_esp_err, .no_policy = 1, }; static const struct net_protocol ah4_protocol = { .handler = xfrm4_ah_rcv, .err_handler = xfrm4_ah_err, .no_policy = 1, }; static const struct net_protocol ipcomp4_protocol = { .handler = xfrm4_ipcomp_rcv, .err_handler = xfrm4_ipcomp_err, .no_policy = 1, }; static const struct xfrm_input_afinfo xfrm4_input_afinfo = { .family = AF_INET, .callback = xfrm4_rcv_cb, }; static inline const struct net_protocol *netproto(unsigned char protocol) { switch (protocol) { case IPPROTO_ESP: return &esp4_protocol; case IPPROTO_AH: return &ah4_protocol; case IPPROTO_COMP: return &ipcomp4_protocol; } return NULL; } int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol) { struct xfrm4_protocol __rcu **pprev; struct xfrm4_protocol *t; bool add_netproto = false; int ret = -EEXIST; int priority = handler->priority; if (!proto_handlers(protocol) || !netproto(protocol)) return -EINVAL; mutex_lock(&xfrm4_protocol_mutex); if (!rcu_dereference_protected(*proto_handlers(protocol), lockdep_is_held(&xfrm4_protocol_mutex))) add_netproto = true; for (pprev = proto_handlers(protocol); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&xfrm4_protocol_mutex))) != NULL; pprev = &t->next) { if (t->priority < priority) break; if (t->priority == priority) goto err; } handler->next = *pprev; rcu_assign_pointer(*pprev, handler); ret = 0; err: mutex_unlock(&xfrm4_protocol_mutex); if (add_netproto) { if (inet_add_protocol(netproto(protocol), protocol)) { pr_err("%s: can't add protocol\n", __func__); ret = -EAGAIN; } } return ret; } EXPORT_SYMBOL(xfrm4_protocol_register); int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol) { struct xfrm4_protocol __rcu **pprev; struct xfrm4_protocol *t; int ret = -ENOENT; if (!proto_handlers(protocol) || !netproto(protocol)) return -EINVAL; mutex_lock(&xfrm4_protocol_mutex); for (pprev = proto_handlers(protocol); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&xfrm4_protocol_mutex))) != NULL; pprev = &t->next) { if (t == handler) { *pprev = handler->next; ret = 0; break; } } if (!rcu_dereference_protected(*proto_handlers(protocol), lockdep_is_held(&xfrm4_protocol_mutex))) { if (inet_del_protocol(netproto(protocol), protocol) < 0) { pr_err("%s: can't remove protocol\n", __func__); ret = -EAGAIN; } } mutex_unlock(&xfrm4_protocol_mutex); synchronize_net(); return ret; } EXPORT_SYMBOL(xfrm4_protocol_deregister); void __init xfrm4_protocol_init(void) { xfrm_input_register_afinfo(&xfrm4_input_afinfo); }
3894 1472 1445 57 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM timestamp #if !defined(_TRACE_TIMESTAMP_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_TIMESTAMP_H #include <linux/tracepoint.h> #include <linux/fs.h> #define CTIME_QUERIED_FLAGS \ { I_CTIME_QUERIED, "Q" } DECLARE_EVENT_CLASS(ctime, TP_PROTO(struct inode *inode, struct timespec64 *ctime), TP_ARGS(inode, ctime), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(time64_t, ctime_s) __field(u32, ctime_ns) __field(u32, gen) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->gen = inode->i_generation; __entry->ctime_s = ctime->tv_sec; __entry->ctime_ns = ctime->tv_nsec; ), TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen, __entry->ctime_s, __entry->ctime_ns ) ); DEFINE_EVENT(ctime, inode_set_ctime_to_ts, TP_PROTO(struct inode *inode, struct timespec64 *ctime), TP_ARGS(inode, ctime)); DEFINE_EVENT(ctime, ctime_xchg_skip, TP_PROTO(struct inode *inode, struct timespec64 *ctime), TP_ARGS(inode, ctime)); TRACE_EVENT(ctime_ns_xchg, TP_PROTO(struct inode *inode, u32 old, u32 new, u32 cur), TP_ARGS(inode, old, new, cur), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(u32, gen) __field(u32, old) __field(u32, new) __field(u32, cur) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->gen = inode->i_generation; __entry->old = old; __entry->new = new; __entry->cur = cur; ), TP_printk("ino=%d:%d:%ld:%u old=%u:%s new=%u cur=%u:%s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen, __entry->old & ~I_CTIME_QUERIED, __print_flags(__entry->old & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS), __entry->new, __entry->cur & ~I_CTIME_QUERIED, __print_flags(__entry->cur & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS) ) ); TRACE_EVENT(fill_mg_cmtime, TP_PROTO(struct inode *inode, struct timespec64 *ctime, struct timespec64 *mtime), TP_ARGS(inode, ctime, mtime), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(time64_t, ctime_s) __field(time64_t, mtime_s) __field(u32, ctime_ns) __field(u32, mtime_ns) __field(u32, gen) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->gen = inode->i_generation; __entry->ctime_s = ctime->tv_sec; __entry->mtime_s = mtime->tv_sec; __entry->ctime_ns = ctime->tv_nsec; __entry->mtime_ns = mtime->tv_nsec; ), TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u mtime=%lld.%u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen, __entry->ctime_s, __entry->ctime_ns, __entry->mtime_s, __entry->mtime_ns ) ); #endif /* _TRACE_TIMESTAMP_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Media Bus API header * * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> */ #ifndef V4L2_MEDIABUS_H #define V4L2_MEDIABUS_H #include <linux/v4l2-mediabus.h> #include <linux/bitops.h> /* * How to use the V4L2_MBUS_* flags: * Flags are defined for each of the possible states and values of a media * bus configuration parameter. One and only one bit of each group of flags * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config * operation to ensure that no conflicting settings are specified when * reporting the media bus configuration. For example, it is invalid to set or * clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the * V4L2_MBUS_HSYNC_ACTIVE_LOW flag at the same time. Instead either flag * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be set. * * TODO: replace the existing V4L2_MBUS_* flags with structures of fields * to avoid conflicting settings. * * In example: * #define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) * #define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) * will be replaced by a field whose value reports the intended active state of * the signal: * unsigned int v4l2_mbus_hsync_active : 1; */ /* Parallel flags */ /* * The client runs in master or in slave mode. By "Master mode" an operation * mode is meant, when the client (e.g., a camera sensor) is producing * horizontal and vertical synchronisation. In "Slave mode" the host is * providing these signals to the slave. */ #define V4L2_MBUS_MASTER BIT(0) #define V4L2_MBUS_SLAVE BIT(1) /* * Signal polarity flags * Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused * V4L2_MBUS_[HV]SYNC* flags should be also used for specifying * configuration of hardware that uses [HV]REF signals */ #define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) #define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) #define V4L2_MBUS_VSYNC_ACTIVE_HIGH BIT(4) #define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5) #define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6) #define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7) #define V4L2_MBUS_PCLK_SAMPLE_DUALEDGE BIT(8) #define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(9) #define V4L2_MBUS_DATA_ACTIVE_LOW BIT(10) /* FIELD = 0/1 - Field1 (odd)/Field2 (even) */ #define V4L2_MBUS_FIELD_EVEN_HIGH BIT(11) /* FIELD = 1/0 - Field1 (odd)/Field2 (even) */ #define V4L2_MBUS_FIELD_EVEN_LOW BIT(12) /* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */ #define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(13) #define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(14) #define V4L2_MBUS_DATA_ENABLE_HIGH BIT(15) #define V4L2_MBUS_DATA_ENABLE_LOW BIT(16) /* Serial flags */ /* Clock non-continuous mode support. */ #define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(0) #define V4L2_MBUS_CSI2_MAX_DATA_LANES 8 /** * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration * @flags: media bus (V4L2_MBUS_*) flags * @data_lanes: an array of physical data lane indexes * @clock_lane: physical lane index of the clock lane * @num_data_lanes: number of data lanes * @lane_polarities: polarity of the lanes. The order is the same of * the physical lanes. */ struct v4l2_mbus_config_mipi_csi2 { unsigned int flags; unsigned char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES]; unsigned char clock_lane; unsigned char num_data_lanes; bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; }; /** * struct v4l2_mbus_config_parallel - parallel data bus configuration * @flags: media bus (V4L2_MBUS_*) flags * @bus_width: bus width in bits * @data_shift: data shift in bits */ struct v4l2_mbus_config_parallel { unsigned int flags; unsigned char bus_width; unsigned char data_shift; }; /** * struct v4l2_mbus_config_mipi_csi1 - CSI-1/CCP2 data bus configuration * @clock_inv: polarity of clock/strobe signal * false - not inverted, true - inverted * @strobe: false - data/clock, true - data/strobe * @lane_polarity: the polarities of the clock (index 0) and data lanes * index (1) * @data_lane: the number of the data lane * @clock_lane: the number of the clock lane */ struct v4l2_mbus_config_mipi_csi1 { unsigned char clock_inv:1; unsigned char strobe:1; bool lane_polarity[2]; unsigned char data_lane; unsigned char clock_lane; }; /** * enum v4l2_mbus_type - media bus type * @V4L2_MBUS_UNKNOWN: unknown bus type, no V4L2 mediabus configuration * @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync * @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can * also be used for BT.1120 * @V4L2_MBUS_CSI1: MIPI CSI-1 serial interface * @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2) * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY * @V4L2_MBUS_DPI: MIPI VIDEO DPI interface * @V4L2_MBUS_INVALID: invalid bus type (keep as last) */ enum v4l2_mbus_type { V4L2_MBUS_UNKNOWN, V4L2_MBUS_PARALLEL, V4L2_MBUS_BT656, V4L2_MBUS_CSI1, V4L2_MBUS_CCP2, V4L2_MBUS_CSI2_DPHY, V4L2_MBUS_CSI2_CPHY, V4L2_MBUS_DPI, V4L2_MBUS_INVALID, }; /** * struct v4l2_mbus_config - media bus configuration * @type: interface type * @bus: bus configuration data structure * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. * Used if the bus is parallel or BT.656. * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. * Used if the bus is MIPI Alliance's Camera Serial * Interface version 1 (MIPI CSI1) or Standard * Mobile Imaging Architecture's Compact Camera Port 2 * (SMIA CCP2). * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. * Used if the bus is MIPI Alliance's Camera Serial * Interface version 2 (MIPI CSI2). */ struct v4l2_mbus_config { enum v4l2_mbus_type type; union { struct v4l2_mbus_config_parallel parallel; struct v4l2_mbus_config_mipi_csi1 mipi_csi1; struct v4l2_mbus_config_mipi_csi2 mipi_csi2; } bus; }; /** * v4l2_fill_pix_format - Ancillary routine that fills a &struct * v4l2_pix_format fields from a &struct v4l2_mbus_framefmt. * * @pix_fmt: pointer to &struct v4l2_pix_format to be filled * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model */ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, const struct v4l2_mbus_framefmt *mbus_fmt) { pix_fmt->width = mbus_fmt->width; pix_fmt->height = mbus_fmt->height; pix_fmt->field = mbus_fmt->field; pix_fmt->colorspace = mbus_fmt->colorspace; pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; pix_fmt->quantization = mbus_fmt->quantization; pix_fmt->xfer_func = mbus_fmt->xfer_func; } /** * v4l2_fill_mbus_format - Ancillary routine that fills a &struct * v4l2_mbus_framefmt from a &struct v4l2_pix_format and a * data format code. * * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled * @pix_fmt: pointer to &struct v4l2_pix_format to be used as model * @code: data format code (from &enum v4l2_mbus_pixelcode) */ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, const struct v4l2_pix_format *pix_fmt, u32 code) { mbus_fmt->width = pix_fmt->width; mbus_fmt->height = pix_fmt->height; mbus_fmt->field = pix_fmt->field; mbus_fmt->colorspace = pix_fmt->colorspace; mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc; mbus_fmt->quantization = pix_fmt->quantization; mbus_fmt->xfer_func = pix_fmt->xfer_func; mbus_fmt->code = code; } /** * v4l2_fill_pix_format_mplane - Ancillary routine that fills a &struct * v4l2_pix_format_mplane fields from a media bus structure. * * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model */ static inline void v4l2_fill_pix_format_mplane(struct v4l2_pix_format_mplane *pix_mp_fmt, const struct v4l2_mbus_framefmt *mbus_fmt) { pix_mp_fmt->width = mbus_fmt->width; pix_mp_fmt->height = mbus_fmt->height; pix_mp_fmt->field = mbus_fmt->field; pix_mp_fmt->colorspace = mbus_fmt->colorspace; pix_mp_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; pix_mp_fmt->quantization = mbus_fmt->quantization; pix_mp_fmt->xfer_func = mbus_fmt->xfer_func; } /** * v4l2_fill_mbus_format_mplane - Ancillary routine that fills a &struct * v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane. * * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model */ static inline void v4l2_fill_mbus_format_mplane(struct v4l2_mbus_framefmt *mbus_fmt, const struct v4l2_pix_format_mplane *pix_mp_fmt) { mbus_fmt->width = pix_mp_fmt->width; mbus_fmt->height = pix_mp_fmt->height; mbus_fmt->field = pix_mp_fmt->field; mbus_fmt->colorspace = pix_mp_fmt->colorspace; mbus_fmt->ycbcr_enc = pix_mp_fmt->ycbcr_enc; mbus_fmt->quantization = pix_mp_fmt->quantization; mbus_fmt->xfer_func = pix_mp_fmt->xfer_func; } #endif
10 24 4 24 4 9 8 24 24 24 3 3 3 24 24 24 24 24 9 4 10 6 6 10 4 4 4 21 14 4 4 6 8 3 4 3 4 1 2 2 2 3 3 3 7 7 8 8 8 8 8 10 10 8 9 9 9 3 3 2 9 2 10 2 11 11 11 10 10 10 10 8 2 2 10 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 // SPDX-License-Identifier: GPL-2.0 /* * Alarmtimer interface * * This interface provides a timer which is similar to hrtimers, * but triggers a RTC alarm if the box is suspend. * * This interface is influenced by the Android RTC Alarm timer * interface. * * Copyright (C) 2010 IBM Corporation * * Author: John Stultz <john.stultz@linaro.org> */ #include <linux/time.h> #include <linux/hrtimer.h> #include <linux/timerqueue.h> #include <linux/rtc.h> #include <linux/sched/signal.h> #include <linux/sched/debug.h> #include <linux/alarmtimer.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/posix-timers.h> #include <linux/workqueue.h> #include <linux/freezer.h> #include <linux/compat.h> #include <linux/module.h> #include <linux/time_namespace.h> #include "posix-timers.h" #define CREATE_TRACE_POINTS #include <trace/events/alarmtimer.h> /** * struct alarm_base - Alarm timer bases * @lock: Lock for syncrhonized access to the base * @timerqueue: Timerqueue head managing the list of events * @get_ktime: Function to read the time correlating to the base * @get_timespec: Function to read the namespace time correlating to the base * @base_clockid: clockid for the base */ static struct alarm_base { spinlock_t lock; struct timerqueue_head timerqueue; ktime_t (*get_ktime)(void); void (*get_timespec)(struct timespec64 *tp); clockid_t base_clockid; } alarm_bases[ALARM_NUMTYPE]; #if defined(CONFIG_POSIX_TIMERS) || defined(CONFIG_RTC_CLASS) /* freezer information to handle clock_nanosleep triggered wakeups */ static enum alarmtimer_type freezer_alarmtype; static ktime_t freezer_expires; static ktime_t freezer_delta; static DEFINE_SPINLOCK(freezer_delta_lock); #endif #ifdef CONFIG_RTC_CLASS /* rtc timer and device for setting alarm wakeups at suspend */ static struct rtc_timer rtctimer; static struct rtc_device *rtcdev; static DEFINE_SPINLOCK(rtcdev_lock); /** * alarmtimer_get_rtcdev - Return selected rtcdevice * * This function returns the rtc device to use for wakealarms. */ struct rtc_device *alarmtimer_get_rtcdev(void) { unsigned long flags; struct rtc_device *ret; spin_lock_irqsave(&rtcdev_lock, flags); ret = rtcdev; spin_unlock_irqrestore(&rtcdev_lock, flags); return ret; } EXPORT_SYMBOL_GPL(alarmtimer_get_rtcdev); static int alarmtimer_rtc_add_device(struct device *dev) { unsigned long flags; struct rtc_device *rtc = to_rtc_device(dev); struct platform_device *pdev; int ret = 0; if (rtcdev) return -EBUSY; if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) return -1; if (!device_may_wakeup(rtc->dev.parent)) return -1; pdev = platform_device_register_data(dev, "alarmtimer", PLATFORM_DEVID_AUTO, NULL, 0); if (!IS_ERR(pdev)) device_init_wakeup(&pdev->dev, true); spin_lock_irqsave(&rtcdev_lock, flags); if (!IS_ERR(pdev) && !rtcdev) { if (!try_module_get(rtc->owner)) { ret = -1; goto unlock; } rtcdev = rtc; /* hold a reference so it doesn't go away */ get_device(dev); pdev = NULL; } else { ret = -1; } unlock: spin_unlock_irqrestore(&rtcdev_lock, flags); platform_device_unregister(pdev); return ret; } static inline void alarmtimer_rtc_timer_init(void) { rtc_timer_init(&rtctimer, NULL, NULL); } static struct class_interface alarmtimer_rtc_interface = { .add_dev = &alarmtimer_rtc_add_device, }; static int alarmtimer_rtc_interface_setup(void) { alarmtimer_rtc_interface.class = &rtc_class; return class_interface_register(&alarmtimer_rtc_interface); } static void alarmtimer_rtc_interface_remove(void) { class_interface_unregister(&alarmtimer_rtc_interface); } #else static inline int alarmtimer_rtc_interface_setup(void) { return 0; } static inline void alarmtimer_rtc_interface_remove(void) { } static inline void alarmtimer_rtc_timer_init(void) { } #endif /** * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue * @base: pointer to the base where the timer is being run * @alarm: pointer to alarm being enqueued. * * Adds alarm to a alarm_base timerqueue * * Must hold base->lock when calling. */ static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm) { if (alarm->state & ALARMTIMER_STATE_ENQUEUED) timerqueue_del(&base->timerqueue, &alarm->node); timerqueue_add(&base->timerqueue, &alarm->node); alarm->state |= ALARMTIMER_STATE_ENQUEUED; } /** * alarmtimer_dequeue - Removes an alarm timer from an alarm_base timerqueue * @base: pointer to the base where the timer is running * @alarm: pointer to alarm being removed * * Removes alarm to a alarm_base timerqueue * * Must hold base->lock when calling. */ static void alarmtimer_dequeue(struct alarm_base *base, struct alarm *alarm) { if (!(alarm->state & ALARMTIMER_STATE_ENQUEUED)) return; timerqueue_del(&base->timerqueue, &alarm->node); alarm->state &= ~ALARMTIMER_STATE_ENQUEUED; } /** * alarmtimer_fired - Handles alarm hrtimer being fired. * @timer: pointer to hrtimer being run * * When a alarm timer fires, this runs through the timerqueue to * see which alarms expired, and runs those. If there are more alarm * timers queued for the future, we set the hrtimer to fire when * the next future alarm timer expires. */ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) { struct alarm *alarm = container_of(timer, struct alarm, timer); struct alarm_base *base = &alarm_bases[alarm->type]; scoped_guard (spinlock_irqsave, &base->lock) alarmtimer_dequeue(base, alarm); if (alarm->function) alarm->function(alarm, base->get_ktime()); trace_alarmtimer_fired(alarm, base->get_ktime()); return HRTIMER_NORESTART; } ktime_t alarm_expires_remaining(const struct alarm *alarm) { struct alarm_base *base = &alarm_bases[alarm->type]; return ktime_sub(alarm->node.expires, base->get_ktime()); } EXPORT_SYMBOL_GPL(alarm_expires_remaining); #ifdef CONFIG_RTC_CLASS /** * alarmtimer_suspend - Suspend time callback * @dev: unused * * When we are going into suspend, we look through the bases * to see which is the soonest timer to expire. We then * set an rtc timer to fire that far into the future, which * will wake us from suspend. */ static int alarmtimer_suspend(struct device *dev) { ktime_t min, now, expires; int i, ret, type; struct rtc_device *rtc; unsigned long flags; struct rtc_time tm; spin_lock_irqsave(&freezer_delta_lock, flags); min = freezer_delta; expires = freezer_expires; type = freezer_alarmtype; freezer_delta = 0; spin_unlock_irqrestore(&freezer_delta_lock, flags); rtc = alarmtimer_get_rtcdev(); /* If we have no rtcdev, just return */ if (!rtc) return 0; /* Find the soonest timer to expire*/ for (i = 0; i < ALARM_NUMTYPE; i++) { struct alarm_base *base = &alarm_bases[i]; struct timerqueue_node *next; ktime_t delta; spin_lock_irqsave(&base->lock, flags); next = timerqueue_getnext(&base->timerqueue); spin_unlock_irqrestore(&base->lock, flags); if (!next) continue; delta = ktime_sub(next->expires, base->get_ktime()); if (!min || (delta < min)) { expires = next->expires; min = delta; type = i; } } if (min == 0) return 0; if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) { pm_wakeup_event(dev, 2 * MSEC_PER_SEC); return -EBUSY; } trace_alarmtimer_suspend(expires, type); /* Setup an rtc timer to fire that far in the future */ rtc_timer_cancel(rtc, &rtctimer); rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); /* * If the RTC alarm timer only supports a limited time offset, set the * alarm time to the maximum supported value. * The system may wake up earlier (possibly much earlier) than expected * when the alarmtimer runs. This is the best the kernel can do if * the alarmtimer exceeds the time that the rtc device can be programmed * for. */ min = rtc_bound_alarmtime(rtc, min); now = ktime_add(now, min); /* Set alarm, if in the past reject suspend briefly to handle */ ret = rtc_timer_start(rtc, &rtctimer, now, 0); if (ret < 0) pm_wakeup_event(dev, MSEC_PER_SEC); return ret; } static int alarmtimer_resume(struct device *dev) { struct rtc_device *rtc; rtc = alarmtimer_get_rtcdev(); if (rtc) rtc_timer_cancel(rtc, &rtctimer); return 0; } #else static int alarmtimer_suspend(struct device *dev) { return 0; } static int alarmtimer_resume(struct device *dev) { return 0; } #endif static void __alarm_init(struct alarm *alarm, enum alarmtimer_type type, void (*function)(struct alarm *, ktime_t)) { timerqueue_init(&alarm->node); alarm->function = function; alarm->type = type; alarm->state = ALARMTIMER_STATE_INACTIVE; } /** * alarm_init - Initialize an alarm structure * @alarm: ptr to alarm to be initialized * @type: the type of the alarm * @function: callback that is run when the alarm fires */ void alarm_init(struct alarm *alarm, enum alarmtimer_type type, void (*function)(struct alarm *, ktime_t)) { hrtimer_setup(&alarm->timer, alarmtimer_fired, alarm_bases[type].base_clockid, HRTIMER_MODE_ABS); __alarm_init(alarm, type, function); } EXPORT_SYMBOL_GPL(alarm_init); /** * alarm_start - Sets an absolute alarm to fire * @alarm: ptr to alarm to set * @start: time to run the alarm */ void alarm_start(struct alarm *alarm, ktime_t start) { struct alarm_base *base = &alarm_bases[alarm->type]; unsigned long flags; spin_lock_irqsave(&base->lock, flags); alarm->node.expires = start; alarmtimer_enqueue(base, alarm); hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS); spin_unlock_irqrestore(&base->lock, flags); trace_alarmtimer_start(alarm, base->get_ktime()); } EXPORT_SYMBOL_GPL(alarm_start); /** * alarm_start_relative - Sets a relative alarm to fire * @alarm: ptr to alarm to set * @start: time relative to now to run the alarm */ void alarm_start_relative(struct alarm *alarm, ktime_t start) { struct alarm_base *base = &alarm_bases[alarm->type]; start = ktime_add_safe(start, base->get_ktime()); alarm_start(alarm, start); } EXPORT_SYMBOL_GPL(alarm_start_relative); void alarm_restart(struct alarm *alarm) { struct alarm_base *base = &alarm_bases[alarm->type]; unsigned long flags; spin_lock_irqsave(&base->lock, flags); hrtimer_set_expires(&alarm->timer, alarm->node.expires); hrtimer_restart(&alarm->timer); alarmtimer_enqueue(base, alarm); spin_unlock_irqrestore(&base->lock, flags); } EXPORT_SYMBOL_GPL(alarm_restart); /** * alarm_try_to_cancel - Tries to cancel an alarm timer * @alarm: ptr to alarm to be canceled * * Returns 1 if the timer was canceled, 0 if it was not running, * and -1 if the callback was running */ int alarm_try_to_cancel(struct alarm *alarm) { struct alarm_base *base = &alarm_bases[alarm->type]; unsigned long flags; int ret; spin_lock_irqsave(&base->lock, flags); ret = hrtimer_try_to_cancel(&alarm->timer); if (ret >= 0) alarmtimer_dequeue(base, alarm); spin_unlock_irqrestore(&base->lock, flags); trace_alarmtimer_cancel(alarm, base->get_ktime()); return ret; } EXPORT_SYMBOL_GPL(alarm_try_to_cancel); /** * alarm_cancel - Spins trying to cancel an alarm timer until it is done * @alarm: ptr to alarm to be canceled * * Returns 1 if the timer was canceled, 0 if it was not active. */ int alarm_cancel(struct alarm *alarm) { for (;;) { int ret = alarm_try_to_cancel(alarm); if (ret >= 0) return ret; hrtimer_cancel_wait_running(&alarm->timer); } } EXPORT_SYMBOL_GPL(alarm_cancel); u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) { u64 overrun = 1; ktime_t delta; delta = ktime_sub(now, alarm->node.expires); if (delta < 0) return 0; if (unlikely(delta >= interval)) { s64 incr = ktime_to_ns(interval); overrun = ktime_divns(delta, incr); alarm->node.expires = ktime_add_ns(alarm->node.expires, incr*overrun); if (alarm->node.expires > now) return overrun; /* * This (and the ktime_add() below) is the * correction for exact: */ overrun++; } alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); return overrun; } EXPORT_SYMBOL_GPL(alarm_forward); u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) { struct alarm_base *base = &alarm_bases[alarm->type]; return alarm_forward(alarm, base->get_ktime(), interval); } EXPORT_SYMBOL_GPL(alarm_forward_now); #ifdef CONFIG_POSIX_TIMERS static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type) { struct alarm_base *base; unsigned long flags; ktime_t delta; switch(type) { case ALARM_REALTIME: base = &alarm_bases[ALARM_REALTIME]; type = ALARM_REALTIME_FREEZER; break; case ALARM_BOOTTIME: base = &alarm_bases[ALARM_BOOTTIME]; type = ALARM_BOOTTIME_FREEZER; break; default: WARN_ONCE(1, "Invalid alarm type: %d\n", type); return; } delta = ktime_sub(absexp, base->get_ktime()); spin_lock_irqsave(&freezer_delta_lock, flags); if (!freezer_delta || (delta < freezer_delta)) { freezer_delta = delta; freezer_expires = absexp; freezer_alarmtype = type; } spin_unlock_irqrestore(&freezer_delta_lock, flags); } /** * clock2alarm - helper that converts from clockid to alarmtypes * @clockid: clockid. */ static enum alarmtimer_type clock2alarm(clockid_t clockid) { if (clockid == CLOCK_REALTIME_ALARM) return ALARM_REALTIME; if (clockid == CLOCK_BOOTTIME_ALARM) return ALARM_BOOTTIME; return -1; } /** * alarm_handle_timer - Callback for posix timers * @alarm: alarm that fired * @now: time at the timer expiration * * Posix timer callback for expired alarm timers. * * Return: whether the timer is to be restarted */ static void alarm_handle_timer(struct alarm *alarm, ktime_t now) { struct k_itimer *ptr = container_of(alarm, struct k_itimer, it.alarm.alarmtimer); guard(spinlock_irqsave)(&ptr->it_lock); posix_timer_queue_signal(ptr); } /** * alarm_timer_rearm - Posix timer callback for rearming timer * @timr: Pointer to the posixtimer data struct */ static void alarm_timer_rearm(struct k_itimer *timr) { struct alarm *alarm = &timr->it.alarm.alarmtimer; timr->it_overrun += alarm_forward_now(alarm, timr->it_interval); alarm_start(alarm, alarm->node.expires); } /** * alarm_timer_forward - Posix timer callback for forwarding timer * @timr: Pointer to the posixtimer data struct * @now: Current time to forward the timer against */ static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now) { struct alarm *alarm = &timr->it.alarm.alarmtimer; return alarm_forward(alarm, timr->it_interval, now); } /** * alarm_timer_remaining - Posix timer callback to retrieve remaining time * @timr: Pointer to the posixtimer data struct * @now: Current time to calculate against */ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now) { struct alarm *alarm = &timr->it.alarm.alarmtimer; return ktime_sub(alarm->node.expires, now); } /** * alarm_timer_try_to_cancel - Posix timer callback to cancel a timer * @timr: Pointer to the posixtimer data struct */ static int alarm_timer_try_to_cancel(struct k_itimer *timr) { return alarm_try_to_cancel(&timr->it.alarm.alarmtimer); } /** * alarm_timer_wait_running - Posix timer callback to wait for a timer * @timr: Pointer to the posixtimer data struct * * Called from the core code when timer cancel detected that the callback * is running. @timr is unlocked and rcu read lock is held to prevent it * from being freed. */ static void alarm_timer_wait_running(struct k_itimer *timr) { hrtimer_cancel_wait_running(&timr->it.alarm.alarmtimer.timer); } /** * alarm_timer_arm - Posix timer callback to arm a timer * @timr: Pointer to the posixtimer data struct * @expires: The new expiry time * @absolute: Expiry value is absolute time * @sigev_none: Posix timer does not deliver signals */ static void alarm_timer_arm(struct k_itimer *timr, ktime_t expires, bool absolute, bool sigev_none) { struct alarm *alarm = &timr->it.alarm.alarmtimer; struct alarm_base *base = &alarm_bases[alarm->type]; if (!absolute) expires = ktime_add_safe(expires, base->get_ktime()); if (sigev_none) alarm->node.expires = expires; else alarm_start(&timr->it.alarm.alarmtimer, expires); } /** * alarm_clock_getres - posix getres interface * @which_clock: clockid * @tp: timespec to fill * * Returns the granularity of underlying alarm base clock */ static int alarm_clock_getres(const clockid_t which_clock, struct timespec64 *tp) { if (!alarmtimer_get_rtcdev()) return -EINVAL; tp->tv_sec = 0; tp->tv_nsec = hrtimer_resolution; return 0; } /** * alarm_clock_get_timespec - posix clock_get_timespec interface * @which_clock: clockid * @tp: timespec to fill. * * Provides the underlying alarm base time in a tasks time namespace. */ static int alarm_clock_get_timespec(clockid_t which_clock, struct timespec64 *tp) { struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; if (!alarmtimer_get_rtcdev()) return -EINVAL; base->get_timespec(tp); return 0; } /** * alarm_clock_get_ktime - posix clock_get_ktime interface * @which_clock: clockid * * Provides the underlying alarm base time in the root namespace. */ static ktime_t alarm_clock_get_ktime(clockid_t which_clock) { struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; if (!alarmtimer_get_rtcdev()) return -EINVAL; return base->get_ktime(); } /** * alarm_timer_create - posix timer_create interface * @new_timer: k_itimer pointer to manage * * Initializes the k_itimer structure. */ static int alarm_timer_create(struct k_itimer *new_timer) { enum alarmtimer_type type; if (!alarmtimer_get_rtcdev()) return -EOPNOTSUPP; if (!capable(CAP_WAKE_ALARM)) return -EPERM; type = clock2alarm(new_timer->it_clock); alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer); return 0; } /** * alarmtimer_nsleep_wakeup - Wakeup function for alarm_timer_nsleep * @alarm: ptr to alarm that fired * @now: time at the timer expiration * * Wakes up the task that set the alarmtimer */ static void alarmtimer_nsleep_wakeup(struct alarm *alarm, ktime_t now) { struct task_struct *task = alarm->data; alarm->data = NULL; if (task) wake_up_process(task); } /** * alarmtimer_do_nsleep - Internal alarmtimer nsleep implementation * @alarm: ptr to alarmtimer * @absexp: absolute expiration time * @type: alarm type (BOOTTIME/REALTIME). * * Sets the alarm timer and sleeps until it is fired or interrupted. */ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp, enum alarmtimer_type type) { struct restart_block *restart; alarm->data = (void *)current; do { set_current_state(TASK_INTERRUPTIBLE); alarm_start(alarm, absexp); if (likely(alarm->data)) schedule(); alarm_cancel(alarm); } while (alarm->data && !signal_pending(current)); __set_current_state(TASK_RUNNING); destroy_hrtimer_on_stack(&alarm->timer); if (!alarm->data) return 0; if (freezing(current)) alarmtimer_freezerset(absexp, type); restart = &current->restart_block; if (restart->nanosleep.type != TT_NONE) { struct timespec64 rmt; ktime_t rem; rem = ktime_sub(absexp, alarm_bases[type].get_ktime()); if (rem <= 0) return 0; rmt = ktime_to_timespec64(rem); return nanosleep_copyout(restart, &rmt); } return -ERESTART_RESTARTBLOCK; } static void alarm_init_on_stack(struct alarm *alarm, enum alarmtimer_type type, void (*function)(struct alarm *, ktime_t)) { hrtimer_setup_on_stack(&alarm->timer, alarmtimer_fired, alarm_bases[type].base_clockid, HRTIMER_MODE_ABS); __alarm_init(alarm, type, function); } /** * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep * @restart: ptr to restart block * * Handles restarted clock_nanosleep calls */ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) { enum alarmtimer_type type = restart->nanosleep.clockid; ktime_t exp = restart->nanosleep.expires; struct alarm alarm; alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup); return alarmtimer_do_nsleep(&alarm, exp, type); } /** * alarm_timer_nsleep - alarmtimer nanosleep * @which_clock: clockid * @flags: determines abstime or relative * @tsreq: requested sleep time (abs or rel) * * Handles clock_nanosleep calls against _ALARM clockids */ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, const struct timespec64 *tsreq) { enum alarmtimer_type type = clock2alarm(which_clock); struct restart_block *restart = &current->restart_block; struct alarm alarm; ktime_t exp; int ret; if (!alarmtimer_get_rtcdev()) return -EOPNOTSUPP; if (flags & ~TIMER_ABSTIME) return -EINVAL; if (!capable(CAP_WAKE_ALARM)) return -EPERM; alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup); exp = timespec64_to_ktime(*tsreq); /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { ktime_t now = alarm_bases[type].get_ktime(); exp = ktime_add_safe(now, exp); } else { exp = timens_ktime_to_host(which_clock, exp); } ret = alarmtimer_do_nsleep(&alarm, exp, type); if (ret != -ERESTART_RESTARTBLOCK) return ret; /* abs timers don't set remaining time or restart */ if (flags == TIMER_ABSTIME) return -ERESTARTNOHAND; restart->nanosleep.clockid = type; restart->nanosleep.expires = exp; set_restart_fn(restart, alarm_timer_nsleep_restart); return ret; } const struct k_clock alarm_clock = { .clock_getres = alarm_clock_getres, .clock_get_ktime = alarm_clock_get_ktime, .clock_get_timespec = alarm_clock_get_timespec, .timer_create = alarm_timer_create, .timer_set = common_timer_set, .timer_del = common_timer_del, .timer_get = common_timer_get, .timer_arm = alarm_timer_arm, .timer_rearm = alarm_timer_rearm, .timer_forward = alarm_timer_forward, .timer_remaining = alarm_timer_remaining, .timer_try_to_cancel = alarm_timer_try_to_cancel, .timer_wait_running = alarm_timer_wait_running, .nsleep = alarm_timer_nsleep, }; #endif /* CONFIG_POSIX_TIMERS */ /* Suspend hook structures */ static const struct dev_pm_ops alarmtimer_pm_ops = { .suspend = alarmtimer_suspend, .resume = alarmtimer_resume, }; static struct platform_driver alarmtimer_driver = { .driver = { .name = "alarmtimer", .pm = &alarmtimer_pm_ops, } }; static void get_boottime_timespec(struct timespec64 *tp) { ktime_get_boottime_ts64(tp); timens_add_boottime(tp); } /** * alarmtimer_init - Initialize alarm timer code * * This function initializes the alarm bases and registers * the posix clock ids. */ static int __init alarmtimer_init(void) { int error; int i; alarmtimer_rtc_timer_init(); /* Initialize alarm bases */ alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME; alarm_bases[ALARM_REALTIME].get_ktime = &ktime_get_real; alarm_bases[ALARM_REALTIME].get_timespec = ktime_get_real_ts64; alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME; alarm_bases[ALARM_BOOTTIME].get_ktime = &ktime_get_boottime; alarm_bases[ALARM_BOOTTIME].get_timespec = get_boottime_timespec; for (i = 0; i < ALARM_NUMTYPE; i++) { timerqueue_init_head(&alarm_bases[i].timerqueue); spin_lock_init(&alarm_bases[i].lock); } error = alarmtimer_rtc_interface_setup(); if (error) return error; error = platform_driver_register(&alarmtimer_driver); if (error) goto out_if; return 0; out_if: alarmtimer_rtc_interface_remove(); return error; } device_initcall(alarmtimer_init);
1 1 1 1 1 1 1 1 1 1 1 1 6 6 6 6 6 6 5 1 1 1 1 1 5 6 1 1 1 1 1 1 6 3 2 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 // SPDX-License-Identifier: GPL-2.0+ /****************************************************************************** * cxacru.c - driver for USB ADSL modems based on * Conexant AccessRunner chipset * * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru) * Copyright (C) 2007 Simon Arlott * Copyright (C) 2009 Simon Arlott ******************************************************************************/ /* * Credit is due for Josep Comas, who created the original patch to speedtch.c * to support the different padding used by the AccessRunner (now generalized * into usbatm), and the userspace firmware loading utility. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <linux/unaligned.h> #include "usbatm.h" #define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott" #define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver" static const char cxacru_driver_name[] = "cxacru"; #define CXACRU_EP_CMD 0x01 /* Bulk/interrupt in/out */ #define CXACRU_EP_DATA 0x02 /* Bulk in/out */ #define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */ #define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2) /* Addresses */ #define PLLFCLK_ADDR 0x00350068 #define PLLBCLK_ADDR 0x0035006c #define SDRAMEN_ADDR 0x00350010 #define FW_ADDR 0x00801000 #define BR_ADDR 0x00180600 #define SIG_ADDR 0x00180500 #define BR_STACK_ADDR 0x00187f10 /* Values */ #define SDRAM_ENA 0x1 #define CMD_TIMEOUT 2000 /* msecs */ #define POLL_INTERVAL 1 /* secs */ /* commands for interaction with the modem through the control channel before * firmware is loaded */ enum cxacru_fw_request { FW_CMD_ERR, FW_GET_VER, FW_READ_MEM, FW_WRITE_MEM, FW_RMW_MEM, FW_CHECKSUM_MEM, FW_GOTO_MEM, }; /* commands for interaction with the modem through the control channel once * firmware is loaded */ enum cxacru_cm_request { CM_REQUEST_UNDEFINED = 0x80, CM_REQUEST_TEST, CM_REQUEST_CHIP_GET_MAC_ADDRESS, CM_REQUEST_CHIP_GET_DP_VERSIONS, CM_REQUEST_CHIP_ADSL_LINE_START, CM_REQUEST_CHIP_ADSL_LINE_STOP, CM_REQUEST_CHIP_ADSL_LINE_GET_STATUS, CM_REQUEST_CHIP_ADSL_LINE_GET_SPEED, CM_REQUEST_CARD_INFO_GET, CM_REQUEST_CARD_DATA_GET, CM_REQUEST_CARD_DATA_SET, CM_REQUEST_COMMAND_HW_IO, CM_REQUEST_INTERFACE_HW_IO, CM_REQUEST_CARD_SERIAL_DATA_PATH_GET, CM_REQUEST_CARD_SERIAL_DATA_PATH_SET, CM_REQUEST_CARD_CONTROLLER_VERSION_GET, CM_REQUEST_CARD_GET_STATUS, CM_REQUEST_CARD_GET_MAC_ADDRESS, CM_REQUEST_CARD_GET_DATA_LINK_STATUS, CM_REQUEST_MAX, }; /* commands for interaction with the flash memory * * read: response is the contents of the first 60 bytes of flash memory * write: request contains the 60 bytes of data to write to flash memory * response is the contents of the first 60 bytes of flash memory * * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * * P: le16 USB Product ID * V: le16 USB Vendor ID * M: be48 MAC Address * S: le16 ASCII Serial Number */ enum cxacru_cm_flash { CM_FLASH_READ = 0xa1, CM_FLASH_WRITE = 0xa2 }; /* reply codes to the commands above */ enum cxacru_cm_status { CM_STATUS_UNDEFINED, CM_STATUS_SUCCESS, CM_STATUS_ERROR, CM_STATUS_UNSUPPORTED, CM_STATUS_UNIMPLEMENTED, CM_STATUS_PARAMETER_ERROR, CM_STATUS_DBG_LOOPBACK, CM_STATUS_MAX, }; /* indices into CARD_INFO_GET return array */ enum cxacru_info_idx { CXINF_DOWNSTREAM_RATE, CXINF_UPSTREAM_RATE, CXINF_LINK_STATUS, CXINF_LINE_STATUS, CXINF_MAC_ADDRESS_HIGH, CXINF_MAC_ADDRESS_LOW, CXINF_UPSTREAM_SNR_MARGIN, CXINF_DOWNSTREAM_SNR_MARGIN, CXINF_UPSTREAM_ATTENUATION, CXINF_DOWNSTREAM_ATTENUATION, CXINF_TRANSMITTER_POWER, CXINF_UPSTREAM_BITS_PER_FRAME, CXINF_DOWNSTREAM_BITS_PER_FRAME, CXINF_STARTUP_ATTEMPTS, CXINF_UPSTREAM_CRC_ERRORS, CXINF_DOWNSTREAM_CRC_ERRORS, CXINF_UPSTREAM_FEC_ERRORS, CXINF_DOWNSTREAM_FEC_ERRORS, CXINF_UPSTREAM_HEC_ERRORS, CXINF_DOWNSTREAM_HEC_ERRORS, CXINF_LINE_STARTABLE, CXINF_MODULATION, CXINF_ADSL_HEADEND, CXINF_ADSL_HEADEND_ENVIRONMENT, CXINF_CONTROLLER_VERSION, /* dunno what the missing two mean */ CXINF_MAX = 0x1c, }; enum cxacru_poll_state { CXPOLL_STOPPING, CXPOLL_STOPPED, CXPOLL_POLLING, CXPOLL_SHUTDOWN }; struct cxacru_modem_type { u32 pll_f_clk; u32 pll_b_clk; int boot_rom_patch; }; struct cxacru_data { struct usbatm_data *usbatm; const struct cxacru_modem_type *modem_type; int line_status; struct mutex adsl_state_serialize; int adsl_status; struct delayed_work poll_work; u32 card_info[CXINF_MAX]; struct mutex poll_state_serialize; enum cxacru_poll_state poll_state; /* control handles */ struct mutex cm_serialize; u8 *rcv_buf; u8 *snd_buf; struct urb *rcv_urb; struct urb *snd_urb; struct completion rcv_done; struct completion snd_done; }; static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm, u8 *wdata, int wsize, u8 *rdata, int rsize); static void cxacru_poll_status(struct work_struct *work); /* Card info exported through sysfs */ #define CXACRU__ATTR_INIT(_name) \ static DEVICE_ATTR_RO(_name) #define CXACRU_CMD_INIT(_name) \ static DEVICE_ATTR_RW(_name) #define CXACRU_SET_INIT(_name) \ static DEVICE_ATTR_WO(_name) #define CXACRU_ATTR_INIT(_value, _type, _name) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct cxacru_data *instance = to_usbatm_driver_data(\ to_usb_interface(dev)); \ \ if (instance == NULL) \ return -ENODEV; \ \ return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \ } \ CXACRU__ATTR_INIT(_name) #define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf) { return sprintf(buf, "%u\n", value); } static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf) { return sprintf(buf, "%d\n", value); } static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf) { if (likely(value >= 0)) { return snprintf(buf, PAGE_SIZE, "%u.%02u\n", value / 100, value % 100); } else { value = -value; return snprintf(buf, PAGE_SIZE, "-%u.%02u\n", value / 100, value % 100); } } static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf) { static char *str[] = { "no", "yes" }; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf) { static char *str[] = { NULL, "not connected", "connected", "lost" }; if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL)) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf) { static char *str[] = { "down", "attempting to activate", "training", "channel analysis", "exchange", "up", "waiting", "initialising" }; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) { static char *str[] = { "", "ANSI T1.413", "ITU-T G.992.1 (G.DMT)", "ITU-T G.992.2 (G.LITE)" }; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } /* * This could use MAC_ADDRESS_HIGH and MAC_ADDRESS_LOW, but since * this data is already in atm_dev there's no point. * * MAC_ADDRESS_HIGH = 0x????5544 * MAC_ADDRESS_LOW = 0x33221100 * Where 00-55 are bytes 0-5 of the MAC. */ static ssize_t mac_address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); if (instance == NULL || instance->usbatm->atm_dev == NULL) return -ENODEV; return sprintf(buf, "%pM\n", instance->usbatm->atm_dev->esi); } static ssize_t adsl_state_show(struct device *dev, struct device_attribute *attr, char *buf) { static char *str[] = { "running", "stopped" }; struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); u32 value; if (instance == NULL) return -ENODEV; value = instance->card_info[CXINF_LINE_STARTABLE]; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t adsl_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); int ret; int poll = -1; char str_cmd[8]; int len = strlen(buf); if (!capable(CAP_NET_ADMIN)) return -EACCES; ret = sscanf(buf, "%7s", str_cmd); if (ret != 1) return -EINVAL; ret = 0; if (instance == NULL) return -ENODEV; if (mutex_lock_interruptible(&instance->adsl_state_serialize)) return -ERESTARTSYS; if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) { ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0); if (ret < 0) { atm_err(instance->usbatm, "change adsl state:" " CHIP_ADSL_LINE_STOP returned %d\n", ret); ret = -EIO; } else { ret = len; poll = CXPOLL_STOPPED; } } /* Line status is only updated every second * and the device appears to only react to * START/STOP every second too. Wait 1.5s to * be sure that restart will have an effect. */ if (!strcmp(str_cmd, "restart")) msleep(1500); if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) { ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); if (ret < 0) { atm_err(instance->usbatm, "change adsl state:" " CHIP_ADSL_LINE_START returned %d\n", ret); ret = -EIO; } else { ret = len; poll = CXPOLL_POLLING; } } if (!strcmp(str_cmd, "poll")) { ret = len; poll = CXPOLL_POLLING; } if (ret == 0) { ret = -EINVAL; poll = -1; } if (poll == CXPOLL_POLLING) { mutex_lock(&instance->poll_state_serialize); switch (instance->poll_state) { case CXPOLL_STOPPED: /* start polling */ instance->poll_state = CXPOLL_POLLING; break; case CXPOLL_STOPPING: /* abort stop request */ instance->poll_state = CXPOLL_POLLING; fallthrough; case CXPOLL_POLLING: case CXPOLL_SHUTDOWN: /* don't start polling */ poll = -1; } mutex_unlock(&instance->poll_state_serialize); } else if (poll == CXPOLL_STOPPED) { mutex_lock(&instance->poll_state_serialize); /* request stop */ if (instance->poll_state == CXPOLL_POLLING) instance->poll_state = CXPOLL_STOPPING; mutex_unlock(&instance->poll_state_serialize); } mutex_unlock(&instance->adsl_state_serialize); if (poll == CXPOLL_POLLING) cxacru_poll_status(&instance->poll_work.work); return ret; } /* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */ static ssize_t adsl_config_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); int len = strlen(buf); int ret, pos, num; __le32 data[CMD_PACKET_SIZE / 4]; if (!capable(CAP_NET_ADMIN)) return -EACCES; if (instance == NULL) return -ENODEV; pos = 0; num = 0; while (pos < len) { int tmp; u32 index; u32 value; ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); if (ret < 2) return -EINVAL; if (index > 0x7f) return -EINVAL; if (tmp < 0 || tmp > len - pos) return -EINVAL; pos += tmp; /* skip trailing newline */ if (buf[pos] == '\n' && pos == len-1) pos++; data[num * 2 + 1] = cpu_to_le32(index); data[num * 2 + 2] = cpu_to_le32(value); num++; /* send config values when data buffer is full * or no more data */ if (pos >= len || num >= CMD_MAX_CONFIG) { char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */ data[0] = cpu_to_le32(num); ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET, (u8 *) data, 4 + num * 8, NULL, 0); if (ret < 0) { atm_err(instance->usbatm, "set card data returned %d\n", ret); return -EIO; } for (tmp = 0; tmp < num; tmp++) snprintf(log + tmp*12, 13, " %02x=%08x", le32_to_cpu(data[tmp * 2 + 1]), le32_to_cpu(data[tmp * 2 + 2])); atm_info(instance->usbatm, "config%s\n", log); num = 0; } } return len; } /* * All device attributes are included in CXACRU_ALL_FILES * so that the same list can be used multiple times: * INIT (define the device attributes) * CREATE (create all the device files) * REMOVE (remove all the device files) * * With the last two being defined as needed in the functions * they are used in before calling CXACRU_ALL_FILES() */ #define CXACRU_ALL_FILES(_action) \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_RATE, u32, downstream_rate); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_RATE, u32, upstream_rate); \ CXACRU_ATTR_##_action(CXINF_LINK_STATUS, LINK, link_status); \ CXACRU_ATTR_##_action(CXINF_LINE_STATUS, LINE, line_status); \ CXACRU__ATTR_##_action( mac_address); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_SNR_MARGIN, dB, upstream_snr_margin); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_SNR_MARGIN, dB, downstream_snr_margin); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_ATTENUATION, dB, upstream_attenuation); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_ATTENUATION, dB, downstream_attenuation); \ CXACRU_ATTR_##_action(CXINF_TRANSMITTER_POWER, s8, transmitter_power); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_BITS_PER_FRAME, u32, upstream_bits_per_frame); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_BITS_PER_FRAME, u32, downstream_bits_per_frame); \ CXACRU_ATTR_##_action(CXINF_STARTUP_ATTEMPTS, u32, startup_attempts); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_CRC_ERRORS, u32, upstream_crc_errors); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_CRC_ERRORS, u32, downstream_crc_errors); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_FEC_ERRORS, u32, upstream_fec_errors); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_FEC_ERRORS, u32, downstream_fec_errors); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_HEC_ERRORS, u32, upstream_hec_errors); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_HEC_ERRORS, u32, downstream_hec_errors); \ CXACRU_ATTR_##_action(CXINF_LINE_STARTABLE, bool, line_startable); \ CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \ CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \ CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \ CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \ CXACRU_CMD_##_action( adsl_state); \ CXACRU_SET_##_action( adsl_config); CXACRU_ALL_FILES(INIT); static struct attribute *cxacru_attrs[] = { &dev_attr_adsl_config.attr, &dev_attr_adsl_state.attr, &dev_attr_adsl_controller_version.attr, &dev_attr_adsl_headend_environment.attr, &dev_attr_adsl_headend.attr, &dev_attr_modulation.attr, &dev_attr_line_startable.attr, &dev_attr_downstream_hec_errors.attr, &dev_attr_upstream_hec_errors.attr, &dev_attr_downstream_fec_errors.attr, &dev_attr_upstream_fec_errors.attr, &dev_attr_downstream_crc_errors.attr, &dev_attr_upstream_crc_errors.attr, &dev_attr_startup_attempts.attr, &dev_attr_downstream_bits_per_frame.attr, &dev_attr_upstream_bits_per_frame.attr, &dev_attr_transmitter_power.attr, &dev_attr_downstream_attenuation.attr, &dev_attr_upstream_attenuation.attr, &dev_attr_downstream_snr_margin.attr, &dev_attr_upstream_snr_margin.attr, &dev_attr_mac_address.attr, &dev_attr_line_status.attr, &dev_attr_link_status.attr, &dev_attr_upstream_rate.attr, &dev_attr_downstream_rate.attr, NULL, }; ATTRIBUTE_GROUPS(cxacru); /* the following three functions are stolen from drivers/usb/core/message.c */ static void cxacru_blocking_completion(struct urb *urb) { complete(urb->context); } struct cxacru_timer { struct timer_list timer; struct urb *urb; }; static void cxacru_timeout_kill(struct timer_list *t) { struct cxacru_timer *timer = from_timer(timer, t, timer); usb_unlink_urb(timer->urb); } static int cxacru_start_wait_urb(struct urb *urb, struct completion *done, int *actual_length) { struct cxacru_timer timer = { .urb = urb, }; timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0); mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT)); wait_for_completion(done); del_timer_sync(&timer.timer); destroy_timer_on_stack(&timer.timer); if (actual_length) *actual_length = urb->actual_length; return urb->status; /* must read status after completion */ } static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm, u8 *wdata, int wsize, u8 *rdata, int rsize) { int ret, actlen; int offb, offd; const int stride = CMD_PACKET_SIZE - 4; u8 *wbuf = instance->snd_buf; u8 *rbuf = instance->rcv_buf; int wbuflen = ((wsize - 1) / stride + 1) * CMD_PACKET_SIZE; int rbuflen = ((rsize - 1) / stride + 1) * CMD_PACKET_SIZE; if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) { if (printk_ratelimit()) usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n", wbuflen, rbuflen); ret = -ENOMEM; goto err; } mutex_lock(&instance->cm_serialize); /* submit reading urb before the writing one */ init_completion(&instance->rcv_done); ret = usb_submit_urb(instance->rcv_urb, GFP_KERNEL); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "submit of read urb for cm %#x failed (%d)\n", cm, ret); goto fail; } memset(wbuf, 0, wbuflen); /* handle wsize == 0 */ wbuf[0] = cm; for (offb = offd = 0; offd < wsize; offd += stride, offb += CMD_PACKET_SIZE) { wbuf[offb] = cm; memcpy(wbuf + offb + 4, wdata + offd, min_t(int, stride, wsize - offd)); } instance->snd_urb->transfer_buffer_length = wbuflen; init_completion(&instance->snd_done); ret = usb_submit_urb(instance->snd_urb, GFP_KERNEL); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "submit of write urb for cm %#x failed (%d)\n", cm, ret); goto fail; } ret = cxacru_start_wait_urb(instance->snd_urb, &instance->snd_done, NULL); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "send of cm %#x failed (%d)\n", cm, ret); goto fail; } ret = cxacru_start_wait_urb(instance->rcv_urb, &instance->rcv_done, &actlen); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "receive of cm %#x failed (%d)\n", cm, ret); goto fail; } if (actlen % CMD_PACKET_SIZE || !actlen) { if (printk_ratelimit()) usb_err(instance->usbatm, "invalid response length to cm %#x: %d\n", cm, actlen); ret = -EIO; goto fail; } /* check the return status and copy the data to the output buffer, if needed */ for (offb = offd = 0; offd < rsize && offb < actlen; offb += CMD_PACKET_SIZE) { if (rbuf[offb] != cm) { if (printk_ratelimit()) usb_err(instance->usbatm, "wrong cm %#x in response to cm %#x\n", rbuf[offb], cm); ret = -EIO; goto fail; } if (rbuf[offb + 1] != CM_STATUS_SUCCESS) { if (printk_ratelimit()) usb_err(instance->usbatm, "response to cm %#x failed: %#x\n", cm, rbuf[offb + 1]); ret = -EIO; goto fail; } if (offd >= rsize) break; memcpy(rdata + offd, rbuf + offb + 4, min_t(int, stride, rsize - offd)); offd += stride; } ret = offd; usb_dbg(instance->usbatm, "cm %#x\n", cm); fail: mutex_unlock(&instance->cm_serialize); err: return ret; } static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_request cm, u32 *data, int size) { int ret, len; __le32 *buf; int offb; unsigned int offd; const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; int buflen = ((size - 1) / stride + 1 + size * 2) * 4; buf = kmalloc(buflen, GFP_KERNEL); if (!buf) return -ENOMEM; ret = cxacru_cm(instance, cm, NULL, 0, (u8 *) buf, buflen); if (ret < 0) goto cleanup; /* len > 0 && len % 4 == 0 guaranteed by cxacru_cm() */ len = ret / 4; for (offb = 0; offb < len; ) { int l = le32_to_cpu(buf[offb++]); if (l < 0 || l > stride || l > (len - offb) / 2) { if (printk_ratelimit()) usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n", cm, l); ret = -EIO; goto cleanup; } while (l--) { offd = le32_to_cpu(buf[offb++]); if (offd >= size) { if (printk_ratelimit()) usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n", offd, cm); ret = -EIO; goto cleanup; } data[offd] = le32_to_cpu(buf[offb++]); } } ret = 0; cleanup: kfree(buf); return ret; } static int cxacru_card_status(struct cxacru_data *instance) { int ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0); if (ret < 0) { /* firmware not loaded */ usb_dbg(instance->usbatm, "cxacru_adsl_start: CARD_GET_STATUS returned %d\n", ret); return ret; } return 0; } static int cxacru_atm_start(struct usbatm_data *usbatm_instance, struct atm_dev *atm_dev) { struct cxacru_data *instance = usbatm_instance->driver_data; struct usb_interface *intf = usbatm_instance->usb_intf; int ret; int start_polling = 1; dev_dbg(&intf->dev, "%s\n", __func__); /* Read MAC address */ ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_MAC_ADDRESS, NULL, 0, atm_dev->esi, sizeof(atm_dev->esi)); if (ret < 0) { atm_err(usbatm_instance, "cxacru_atm_start: CARD_GET_MAC_ADDRESS returned %d\n", ret); return ret; } /* start ADSL */ mutex_lock(&instance->adsl_state_serialize); ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); if (ret < 0) atm_err(usbatm_instance, "cxacru_atm_start: CHIP_ADSL_LINE_START returned %d\n", ret); /* Start status polling */ mutex_lock(&instance->poll_state_serialize); switch (instance->poll_state) { case CXPOLL_STOPPED: /* start polling */ instance->poll_state = CXPOLL_POLLING; break; case CXPOLL_STOPPING: /* abort stop request */ instance->poll_state = CXPOLL_POLLING; fallthrough; case CXPOLL_POLLING: case CXPOLL_SHUTDOWN: /* don't start polling */ start_polling = 0; } mutex_unlock(&instance->poll_state_serialize); mutex_unlock(&instance->adsl_state_serialize); if (start_polling) cxacru_poll_status(&instance->poll_work.work); return 0; } static void cxacru_poll_status(struct work_struct *work) { struct cxacru_data *instance = container_of(work, struct cxacru_data, poll_work.work); u32 buf[CXINF_MAX] = {}; struct usbatm_data *usbatm = instance->usbatm; struct atm_dev *atm_dev = usbatm->atm_dev; int keep_polling = 1; int ret; ret = cxacru_cm_get_array(instance, CM_REQUEST_CARD_INFO_GET, buf, CXINF_MAX); if (ret < 0) { if (ret != -ESHUTDOWN) atm_warn(usbatm, "poll status: error %d\n", ret); mutex_lock(&instance->poll_state_serialize); if (instance->poll_state != CXPOLL_SHUTDOWN) { instance->poll_state = CXPOLL_STOPPED; if (ret != -ESHUTDOWN) atm_warn(usbatm, "polling disabled, set adsl_state" " to 'start' or 'poll' to resume\n"); } mutex_unlock(&instance->poll_state_serialize); goto reschedule; } memcpy(instance->card_info, buf, sizeof(instance->card_info)); if (instance->adsl_status != buf[CXINF_LINE_STARTABLE]) { instance->adsl_status = buf[CXINF_LINE_STARTABLE]; switch (instance->adsl_status) { case 0: atm_info(usbatm, "ADSL state: running\n"); break; case 1: atm_info(usbatm, "ADSL state: stopped\n"); break; default: atm_info(usbatm, "Unknown adsl status %02x\n", instance->adsl_status); break; } } if (instance->line_status == buf[CXINF_LINE_STATUS]) goto reschedule; instance->line_status = buf[CXINF_LINE_STATUS]; switch (instance->line_status) { case 0: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: down\n"); break; case 1: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: attempting to activate\n"); break; case 2: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: training\n"); break; case 3: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: channel analysis\n"); break; case 4: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: exchange\n"); break; case 5: atm_dev->link_rate = buf[CXINF_DOWNSTREAM_RATE] * 1000 / 424; atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND); atm_info(usbatm, "ADSL line: up (%d kb/s down | %d kb/s up)\n", buf[CXINF_DOWNSTREAM_RATE], buf[CXINF_UPSTREAM_RATE]); break; case 6: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: waiting\n"); break; case 7: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: initializing\n"); break; default: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "Unknown line state %02x\n", instance->line_status); break; } reschedule: mutex_lock(&instance->poll_state_serialize); if (instance->poll_state == CXPOLL_STOPPING && instance->adsl_status == 1 && /* stopped */ instance->line_status == 0) /* down */ instance->poll_state = CXPOLL_STOPPED; if (instance->poll_state == CXPOLL_STOPPED) keep_polling = 0; mutex_unlock(&instance->poll_state_serialize); if (keep_polling) schedule_delayed_work(&instance->poll_work, round_jiffies_relative(POLL_INTERVAL*HZ)); } static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw, u8 code1, u8 code2, u32 addr, const u8 *data, int size) { int ret; u8 *buf; int offd, offb; const int stride = CMD_PACKET_SIZE - 8; buf = (u8 *) __get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; offb = offd = 0; do { int l = min_t(int, stride, size - offd); buf[offb++] = fw; buf[offb++] = l; buf[offb++] = code1; buf[offb++] = code2; put_unaligned(cpu_to_le32(addr), (__le32 *)(buf + offb)); offb += 4; addr += l; if (l) memcpy(buf + offb, data + offd, l); if (l < stride) memset(buf + offb + l, 0, stride - l); offb += stride; offd += stride; if ((offb >= PAGE_SIZE) || (offd >= size)) { ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), buf, offb, NULL, CMD_TIMEOUT); if (ret < 0) { dev_dbg(&usb_dev->dev, "sending fw %#x failed\n", fw); goto cleanup; } offb = 0; } } while (offd < size); dev_dbg(&usb_dev->dev, "sent fw %#x\n", fw); ret = 0; cleanup: free_page((unsigned long) buf); return ret; } static void cxacru_upload_firmware(struct cxacru_data *instance, const struct firmware *fw, const struct firmware *bp) { int ret; struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; __le16 signature[] = { usb_dev->descriptor.idVendor, usb_dev->descriptor.idProduct }; __le32 val; usb_dbg(usbatm, "%s\n", __func__); /* FirmwarePllFClkValue */ val = cpu_to_le32(instance->modem_type->pll_f_clk); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLFCLK_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "FirmwarePllFClkValue failed: %d\n", ret); return; } /* FirmwarePllBClkValue */ val = cpu_to_le32(instance->modem_type->pll_b_clk); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLBCLK_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "FirmwarePllBClkValue failed: %d\n", ret); return; } /* Enable SDRAM */ val = cpu_to_le32(SDRAM_ENA); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SDRAMEN_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "Enable SDRAM failed: %d\n", ret); return; } /* Firmware */ usb_info(usbatm, "loading firmware\n"); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size); if (ret) { usb_err(usbatm, "Firmware upload failed: %d\n", ret); return; } /* Boot ROM patch */ if (instance->modem_type->boot_rom_patch) { usb_info(usbatm, "loading boot ROM patch\n"); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size); if (ret) { usb_err(usbatm, "Boot ROM patching failed: %d\n", ret); return; } } /* Signature */ ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SIG_ADDR, (u8 *) signature, 4); if (ret) { usb_err(usbatm, "Signature storing failed: %d\n", ret); return; } usb_info(usbatm, "starting device\n"); if (instance->modem_type->boot_rom_patch) { val = cpu_to_le32(BR_ADDR); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4); } else { ret = cxacru_fw(usb_dev, FW_GOTO_MEM, 0x0, 0x0, FW_ADDR, NULL, 0); } if (ret) { usb_err(usbatm, "Passing control to firmware failed: %d\n", ret); return; } /* Delay to allow firmware to start up. */ msleep_interruptible(1000); usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD)); usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD)); usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_DATA)); usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_DATA)); ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0); if (ret < 0) { usb_err(usbatm, "modem failed to initialize: %d\n", ret); return; } } static int cxacru_find_firmware(struct cxacru_data *instance, char *phase, const struct firmware **fw_p) { struct usbatm_data *usbatm = instance->usbatm; struct device *dev = &usbatm->usb_intf->dev; char buf[16]; sprintf(buf, "cxacru-%s.bin", phase); usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf); if (request_firmware(fw_p, buf, dev)) { usb_dbg(usbatm, "no stage %s firmware found\n", phase); return -ENOENT; } usb_info(usbatm, "found firmware %s\n", buf); return 0; } static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, struct usb_interface *usb_intf) { const struct firmware *fw, *bp; struct cxacru_data *instance = usbatm_instance->driver_data; int ret = cxacru_find_firmware(instance, "fw", &fw); if (ret) { usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n"); return ret; } if (instance->modem_type->boot_rom_patch) { ret = cxacru_find_firmware(instance, "bp", &bp); if (ret) { usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n"); release_firmware(fw); return ret; } } cxacru_upload_firmware(instance, fw, bp); if (instance->modem_type->boot_rom_patch) release_firmware(bp); release_firmware(fw); ret = cxacru_card_status(instance); if (ret) usb_dbg(usbatm_instance, "modem initialisation failed\n"); else usb_dbg(usbatm_instance, "done setting up the modem\n"); return ret; } static int cxacru_bind(struct usbatm_data *usbatm_instance, struct usb_interface *intf, const struct usb_device_id *id) { struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; struct usb_endpoint_descriptor *in, *out; int ret; /* instance init */ instance = kzalloc(sizeof(*instance), GFP_KERNEL); if (!instance) return -ENOMEM; instance->usbatm = usbatm_instance; instance->modem_type = (struct cxacru_modem_type *) id->driver_info; mutex_init(&instance->poll_state_serialize); instance->poll_state = CXPOLL_STOPPED; instance->line_status = -1; instance->adsl_status = -1; mutex_init(&instance->adsl_state_serialize); instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL); if (!instance->rcv_buf) { usb_dbg(usbatm_instance, "cxacru_bind: no memory for rcv_buf\n"); ret = -ENOMEM; goto fail; } instance->snd_buf = (u8 *) __get_free_page(GFP_KERNEL); if (!instance->snd_buf) { usb_dbg(usbatm_instance, "cxacru_bind: no memory for snd_buf\n"); ret = -ENOMEM; goto fail; } instance->rcv_urb = usb_alloc_urb(0, GFP_KERNEL); if (!instance->rcv_urb) { ret = -ENOMEM; goto fail; } instance->snd_urb = usb_alloc_urb(0, GFP_KERNEL); if (!instance->snd_urb) { ret = -ENOMEM; goto fail; } if (!cmd_ep) { usb_dbg(usbatm_instance, "cxacru_bind: no command endpoint\n"); ret = -ENODEV; goto fail; } if (usb_endpoint_xfer_int(&cmd_ep->desc)) ret = usb_find_common_endpoints(intf->cur_altsetting, NULL, NULL, &in, &out); else ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); if (ret) { usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n"); ret = -ENODEV; goto fail; } if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { usb_fill_int_urb(instance->rcv_urb, usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), instance->rcv_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->rcv_done, 1); usb_fill_int_urb(instance->snd_urb, usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), instance->snd_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->snd_done, 4); } else { usb_fill_bulk_urb(instance->rcv_urb, usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD), instance->rcv_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->rcv_done); usb_fill_bulk_urb(instance->snd_urb, usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), instance->snd_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->snd_done); } mutex_init(&instance->cm_serialize); INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status); usbatm_instance->driver_data = instance; usbatm_instance->flags = (cxacru_card_status(instance) ? 0 : UDSL_SKIP_HEAVY_INIT); return 0; fail: free_page((unsigned long) instance->snd_buf); free_page((unsigned long) instance->rcv_buf); usb_free_urb(instance->snd_urb); usb_free_urb(instance->rcv_urb); kfree(instance); return ret; } static void cxacru_unbind(struct usbatm_data *usbatm_instance, struct usb_interface *intf) { struct cxacru_data *instance = usbatm_instance->driver_data; int is_polling = 1; usb_dbg(usbatm_instance, "cxacru_unbind entered\n"); if (!instance) { usb_dbg(usbatm_instance, "cxacru_unbind: NULL instance!\n"); return; } mutex_lock(&instance->poll_state_serialize); BUG_ON(instance->poll_state == CXPOLL_SHUTDOWN); /* ensure that status polling continues unless * it has already stopped */ if (instance->poll_state == CXPOLL_STOPPED) is_polling = 0; /* stop polling from being stopped or started */ instance->poll_state = CXPOLL_SHUTDOWN; mutex_unlock(&instance->poll_state_serialize); if (is_polling) cancel_delayed_work_sync(&instance->poll_work); usb_kill_urb(instance->snd_urb); usb_kill_urb(instance->rcv_urb); usb_free_urb(instance->snd_urb); usb_free_urb(instance->rcv_urb); free_page((unsigned long) instance->snd_buf); free_page((unsigned long) instance->rcv_buf); kfree(instance); usbatm_instance->driver_data = NULL; } static const struct cxacru_modem_type cxacru_cafe = { .pll_f_clk = 0x02d874df, .pll_b_clk = 0x0196a51a, .boot_rom_patch = 1, }; static const struct cxacru_modem_type cxacru_cb00 = { .pll_f_clk = 0x5, .pll_b_clk = 0x3, .boot_rom_patch = 0, }; static const struct usb_device_id cxacru_usb_ids[] = { { /* V = Conexant P = ADSL modem (Euphrates project) */ USB_DEVICE(0x0572, 0xcafe), .driver_info = (unsigned long) &cxacru_cafe }, { /* V = Conexant P = ADSL modem (Hasbani project) */ USB_DEVICE(0x0572, 0xcb00), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem */ USB_DEVICE(0x0572, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem (Well PTI-800) */ USB_DEVICE(0x0572, 0xcb02), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem */ USB_DEVICE(0x0572, 0xcb06), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem (ZTE ZXDSL 852) */ USB_DEVICE(0x0572, 0xcb07), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Olitec P = ADSL modem version 2 */ USB_DEVICE(0x08e3, 0x0100), .driver_info = (unsigned long) &cxacru_cafe }, { /* V = Olitec P = ADSL modem version 3 */ USB_DEVICE(0x08e3, 0x0102), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Trust/Amigo Technology Co. P = AMX-CA86U */ USB_DEVICE(0x0eb0, 0x3457), .driver_info = (unsigned long) &cxacru_cafe }, { /* V = Zoom P = 5510 */ USB_DEVICE(0x1803, 0x5510), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Draytek P = Vigor 318 */ USB_DEVICE(0x0675, 0x0200), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Zyxel P = 630-C1 aka OMNI ADSL USB (Annex A) */ USB_DEVICE(0x0586, 0x330a), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Zyxel P = 630-C3 aka OMNI ADSL USB (Annex B) */ USB_DEVICE(0x0586, 0x330b), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Aethra P = Starmodem UM1020 */ USB_DEVICE(0x0659, 0x0020), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Aztech Systems P = ? AKA Pirelli AUA-010 */ USB_DEVICE(0x0509, 0x0812), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Netopia P = Cayman 3341(Annex A)/3351(Annex B) */ USB_DEVICE(0x100d, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Netopia P = Cayman 3342(Annex A)/3352(Annex B) */ USB_DEVICE(0x100d, 0x3342), .driver_info = (unsigned long) &cxacru_cb00 }, {} }; MODULE_DEVICE_TABLE(usb, cxacru_usb_ids); static struct usbatm_driver cxacru_driver = { .driver_name = cxacru_driver_name, .bind = cxacru_bind, .heavy_init = cxacru_heavy_init, .unbind = cxacru_unbind, .atm_start = cxacru_atm_start, .bulk_in = CXACRU_EP_DATA, .bulk_out = CXACRU_EP_DATA, .rx_padding = 3, .tx_padding = 11, }; static int cxacru_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); char buf[15]; /* Avoid ADSL routers (cx82310_eth). * Abort if bDeviceClass is 0xff and iProduct is "USB NET CARD". */ if (usb_dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC && usb_string(usb_dev, usb_dev->descriptor.iProduct, buf, sizeof(buf)) > 0) { if (!strcmp(buf, "USB NET CARD")) { dev_info(&intf->dev, "ignoring cx82310_eth device\n"); return -ENODEV; } } return usbatm_usb_probe(intf, id, &cxacru_driver); } static struct usb_driver cxacru_usb_driver = { .name = cxacru_driver_name, .probe = cxacru_usb_probe, .disconnect = usbatm_usb_disconnect, .id_table = cxacru_usb_ids, .dev_groups = cxacru_groups, }; module_usb_driver(cxacru_usb_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
134 134 134 142 141 140 1 140 139 139 132 139 140 142 134 134 134 130 130 130 132 132 36 35 36 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 26 26 3 3 3 3 3 3 2 3 26 4 4 3 2 1 1 3 2 2 2 2 2 2 1 2 9 8 8 8 8 8 8 8 8 4 2 1 2 8 9 2 1 18 18 18 2 16 1 16 2 16 1 16 16 2 16 16 16 16 13 18 38 38 4 3 3 4 3 3 3 40 40 40 32 32 6 32 4 11 28 26 26 26 26 2 1 1 1 1 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth address family and sockets. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/stringify.h> #include <linux/sched/signal.h> #include <asm/ioctls.h> #include <net/bluetooth/bluetooth.h> #include <linux/proc_fs.h> #include "leds.h" #include "selftest.h" /* Bluetooth sockets */ #define BT_MAX_PROTO (BTPROTO_LAST + 1) static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; static DEFINE_RWLOCK(bt_proto_lock); static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; static const char *const bt_key_strings[BT_MAX_PROTO] = { "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", "sk_lock-AF_BLUETOOTH-BTPROTO_HCI", "sk_lock-AF_BLUETOOTH-BTPROTO_SCO", "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM", "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP", "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP", "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP", "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", "sk_lock-AF_BLUETOOTH-BTPROTO_ISO", }; static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { "slock-AF_BLUETOOTH-BTPROTO_L2CAP", "slock-AF_BLUETOOTH-BTPROTO_HCI", "slock-AF_BLUETOOTH-BTPROTO_SCO", "slock-AF_BLUETOOTH-BTPROTO_RFCOMM", "slock-AF_BLUETOOTH-BTPROTO_BNEP", "slock-AF_BLUETOOTH-BTPROTO_CMTP", "slock-AF_BLUETOOTH-BTPROTO_HIDP", "slock-AF_BLUETOOTH-BTPROTO_AVDTP", "slock-AF_BLUETOOTH-BTPROTO_ISO", }; void bt_sock_reclassify_lock(struct sock *sk, int proto) { BUG_ON(!sk); BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, bt_slock_key_strings[proto], &bt_slock_key[proto], bt_key_strings[proto], &bt_lock_key[proto]); } EXPORT_SYMBOL(bt_sock_reclassify_lock); int bt_sock_register(int proto, const struct net_proto_family *ops) { int err = 0; if (proto < 0 || proto >= BT_MAX_PROTO) return -EINVAL; write_lock(&bt_proto_lock); if (bt_proto[proto]) err = -EEXIST; else bt_proto[proto] = ops; write_unlock(&bt_proto_lock); return err; } EXPORT_SYMBOL(bt_sock_register); void bt_sock_unregister(int proto) { if (proto < 0 || proto >= BT_MAX_PROTO) return; write_lock(&bt_proto_lock); bt_proto[proto] = NULL; write_unlock(&bt_proto_lock); } EXPORT_SYMBOL(bt_sock_unregister); static int bt_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err; if (net != &init_net) return -EAFNOSUPPORT; if (proto < 0 || proto >= BT_MAX_PROTO) return -EINVAL; if (!bt_proto[proto]) request_module("bt-proto-%d", proto); err = -EPROTONOSUPPORT; read_lock(&bt_proto_lock); if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { err = bt_proto[proto]->create(net, sock, proto, kern); if (!err) bt_sock_reclassify_lock(sock->sk, proto); module_put(bt_proto[proto]->owner); } read_unlock(&bt_proto_lock); return err; } struct sock *bt_sock_alloc(struct net *net, struct socket *sock, struct proto *prot, int proto, gfp_t prio, int kern) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, prot, kern); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; /* Init peer information so it can be properly monitored */ if (!kern) { spin_lock(&sk->sk_peer_lock); sk->sk_peer_pid = get_pid(task_tgid(current)); sk->sk_peer_cred = get_current_cred(); spin_unlock(&sk->sk_peer_lock); } return sk; } EXPORT_SYMBOL(bt_sock_alloc); void bt_sock_link(struct bt_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } EXPORT_SYMBOL(bt_sock_link); void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } EXPORT_SYMBOL(bt_sock_unlink); bool bt_sock_linked(struct bt_sock_list *l, struct sock *s) { struct sock *sk; if (!l || !s) return false; read_lock(&l->lock); sk_for_each(sk, &l->head) { if (s == sk) { read_unlock(&l->lock); return true; } } read_unlock(&l->lock); return false; } EXPORT_SYMBOL(bt_sock_linked); void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh) { const struct cred *old_cred; struct pid *old_pid; BT_DBG("parent %p, sk %p", parent, sk); sock_hold(sk); if (bh) bh_lock_sock_nested(sk); else lock_sock_nested(sk, SINGLE_DEPTH_NESTING); list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_sk(sk)->parent = parent; /* Copy credentials from parent since for incoming connections the * socket is allocated by the kernel. */ spin_lock(&sk->sk_peer_lock); old_pid = sk->sk_peer_pid; old_cred = sk->sk_peer_cred; sk->sk_peer_pid = get_pid(parent->sk_peer_pid); sk->sk_peer_cred = get_cred(parent->sk_peer_cred); spin_unlock(&sk->sk_peer_lock); put_pid(old_pid); put_cred(old_cred); if (bh) bh_unlock_sock(sk); else release_sock(sk); sk_acceptq_added(parent); } EXPORT_SYMBOL(bt_accept_enqueue); /* Calling function must hold the sk lock. * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list. */ void bt_accept_unlink(struct sock *sk) { BT_DBG("sk %p state %d", sk, sk->sk_state); list_del_init(&bt_sk(sk)->accept_q); sk_acceptq_removed(bt_sk(sk)->parent); bt_sk(sk)->parent = NULL; sock_put(sk); } EXPORT_SYMBOL(bt_accept_unlink); struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) { struct bt_sock *s, *n; struct sock *sk; BT_DBG("parent %p", parent); restart: list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { sk = (struct sock *)s; /* Prevent early freeing of sk due to unlink and sock_kill */ sock_hold(sk); lock_sock(sk); /* Check sk has not already been unlinked via * bt_accept_unlink() due to serialisation caused by sk locking */ if (!bt_sk(sk)->parent) { BT_DBG("sk %p, already unlinked", sk); release_sock(sk); sock_put(sk); /* Restart the loop as sk is no longer in the list * and also avoid a potential infinite loop because * list_for_each_entry_safe() is not thread safe. */ goto restart; } /* sk is safely in the parent list so reduce reference count */ sock_put(sk); /* FIXME: Is this check still needed */ if (sk->sk_state == BT_CLOSED) { bt_accept_unlink(sk); release_sock(sk); continue; } if (sk->sk_state == BT_CONNECTED || !newsock || test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { bt_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); return sk; } release_sock(sk); } return NULL; } EXPORT_SYMBOL(bt_accept_dequeue); int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; size_t skblen; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & MSG_OOB) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) err = 0; return err; } skblen = skb->len; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err == 0) { sock_recv_cmsgs(msg, sk, skb); if (msg->msg_name && bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); if (test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags)) { u8 pkt_status = hci_skb_pkt_status(skb); put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, sizeof(pkt_status), &pkt_status); } } skb_free_datagram(sk, skb); if (flags & MSG_TRUNC) copied = skblen; return err ? : copied; } EXPORT_SYMBOL(bt_sock_recvmsg); static long bt_sock_data_wait(struct sock *sk, long timeo) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(sk_sleep(sk), &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue)) break; if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN)) break; if (signal_pending(current) || !timeo) break; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return timeo; } int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int err = 0; size_t target, copied = 0; long timeo; if (flags & MSG_OOB) return -EOPNOTSUPP; BT_DBG("sk %p size %zu", sk, size); lock_sock(sk); target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { struct sk_buff *skb; int chunk; skb = skb_dequeue(&sk->sk_receive_queue); if (!skb) { if (copied >= target) break; err = sock_error(sk); if (err) break; if (sk->sk_shutdown & RCV_SHUTDOWN) break; err = -EAGAIN; if (!timeo) break; timeo = bt_sock_data_wait(sk, timeo); if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } continue; } chunk = min_t(unsigned int, skb->len, size); if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (!copied) copied = -EFAULT; break; } copied += chunk; size -= chunk; sock_recv_cmsgs(msg, sk, skb); if (!(flags & MSG_PEEK)) { int skb_len = skb_headlen(skb); if (chunk <= skb_len) { __skb_pull(skb, chunk); } else { struct sk_buff *frag; __skb_pull(skb, skb_len); chunk -= skb_len; skb_walk_frags(skb, frag) { if (chunk <= frag->len) { /* Pulling partial data */ skb->len -= chunk; skb->data_len -= chunk; __skb_pull(frag, chunk); break; } else if (frag->len) { /* Pulling all frag data */ chunk -= frag->len; skb->len -= frag->len; skb->data_len -= frag->len; __skb_pull(frag, frag->len); } } } if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); out: release_sock(sk); return copied ? : err; } EXPORT_SYMBOL(bt_sock_stream_recvmsg); static inline __poll_t bt_accept_poll(struct sock *parent) { struct bt_sock *s, *n; struct sock *sk; list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { sk = (struct sock *)s; if (sk->sk_state == BT_CONNECTED || (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && sk->sk_state == BT_CONNECT2)) return EPOLLIN | EPOLLRDNORM; } return 0; } __poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == BT_LISTEN) return bt_accept_poll(sk); if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == BT_CLOSED) mask |= EPOLLHUP; if (sk->sk_state == BT_CONNECT || sk->sk_state == BT_CONNECT2 || sk->sk_state == BT_CONFIG) return mask; if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } EXPORT_SYMBOL(bt_sock_poll); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct sk_buff *skb; long amount; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); switch (cmd) { case TIOCOUTQ: if (sk->sk_state == BT_LISTEN) return -EINVAL; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (int __user *)arg); break; case TIOCINQ: if (sk->sk_state == BT_LISTEN) return -EINVAL; spin_lock(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); amount = skb ? skb->len : 0; spin_unlock(&sk->sk_receive_queue.lock); err = put_user(amount, (int __user *)arg); break; default: err = -ENOIOCTLCMD; break; } return err; } EXPORT_SYMBOL(bt_sock_ioctl); /* This function expects the sk lock to be held when called */ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) { DECLARE_WAITQUEUE(wait, current); int err = 0; BT_DBG("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { if (!timeo) { err = -EINPROGRESS; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } EXPORT_SYMBOL(bt_sock_wait_state); /* This function expects the sk lock to be held when called */ int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags) { DECLARE_WAITQUEUE(wait, current); unsigned long timeo; int err = 0; BT_DBG("sk %p", sk); timeo = sock_sndtimeo(sk, !!(msg_flags & MSG_DONTWAIT)); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) { if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } EXPORT_SYMBOL(bt_sock_wait_ready); #ifdef CONFIG_PROC_FS static void *bt_seq_start(struct seq_file *seq, loff_t *pos) __acquires(seq->private->l->lock) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); read_lock(&l->lock); return seq_hlist_start_head(&l->head, *pos); } static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); return seq_hlist_next(v, &l->head, pos); } static void bt_seq_stop(struct seq_file *seq, void *v) __releases(seq->private->l->lock) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); read_unlock(&l->lock); } static int bt_seq_show(struct seq_file *seq, void *v) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent"); if (l->custom_seq_show) { seq_putc(seq, ' '); l->custom_seq_show(seq, v); } seq_putc(seq, '\n'); } else { struct sock *sk = sk_entry(v); struct bt_sock *bt = bt_sk(sk); seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu", sk, refcount_read(&sk->sk_refcnt), sk_rmem_alloc_get(sk), sk_wmem_alloc_get(sk), from_kuid(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk), bt->parent ? sock_i_ino(bt->parent) : 0LU); if (l->custom_seq_show) { seq_putc(seq, ' '); l->custom_seq_show(seq, v); } seq_putc(seq, '\n'); } return 0; } static const struct seq_operations bt_seq_ops = { .start = bt_seq_start, .next = bt_seq_next, .stop = bt_seq_stop, .show = bt_seq_show, }; int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list *sk_list, int (*seq_show)(struct seq_file *, void *)) { sk_list->custom_seq_show = seq_show; if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list)) return -ENOMEM; return 0; } void bt_procfs_cleanup(struct net *net, const char *name) { remove_proc_entry(name, net->proc_net); } #else int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list *sk_list, int (*seq_show)(struct seq_file *, void *)) { return 0; } void bt_procfs_cleanup(struct net *net, const char *name) { } #endif EXPORT_SYMBOL(bt_procfs_init); EXPORT_SYMBOL(bt_procfs_cleanup); static const struct net_proto_family bt_sock_family_ops = { .owner = THIS_MODULE, .family = PF_BLUETOOTH, .create = bt_sock_create, }; struct dentry *bt_debugfs; EXPORT_SYMBOL_GPL(bt_debugfs); #define VERSION __stringify(BT_SUBSYS_VERSION) "." \ __stringify(BT_SUBSYS_REVISION) static int __init bt_init(void) { int err; sock_skb_cb_check_size(sizeof(struct bt_skb_cb)); BT_INFO("Core ver %s", VERSION); err = bt_selftest(); if (err < 0) return err; bt_debugfs = debugfs_create_dir("bluetooth", NULL); bt_leds_init(); err = bt_sysfs_init(); if (err < 0) goto cleanup_led; err = sock_register(&bt_sock_family_ops); if (err) goto cleanup_sysfs; BT_INFO("HCI device and connection manager initialized"); err = hci_sock_init(); if (err) goto unregister_socket; err = l2cap_init(); if (err) goto cleanup_socket; err = sco_init(); if (err) goto cleanup_cap; err = mgmt_init(); if (err) goto cleanup_sco; return 0; cleanup_sco: sco_exit(); cleanup_cap: l2cap_exit(); cleanup_socket: hci_sock_cleanup(); unregister_socket: sock_unregister(PF_BLUETOOTH); cleanup_sysfs: bt_sysfs_cleanup(); cleanup_led: bt_leds_cleanup(); debugfs_remove_recursive(bt_debugfs); return err; } static void __exit bt_exit(void) { iso_exit(); mgmt_exit(); sco_exit(); l2cap_exit(); hci_sock_cleanup(); sock_unregister(PF_BLUETOOTH); bt_sysfs_cleanup(); bt_leds_cleanup(); debugfs_remove_recursive(bt_debugfs); } subsys_initcall(bt_init); module_exit(bt_exit); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
7187 24 6187 6192 6046 224 6205 5803 2977 2974 2969 2976 2978 728 725 729 258 6 619 641 619 2781 2792 2643 224 12 2800 2371 2134 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 /* SPDX-License-Identifier: GPL-2.0 */ /* * Percpu refcounts: * (C) 2012 Google, Inc. * Author: Kent Overstreet <koverstreet@google.com> * * This implements a refcount with similar semantics to atomic_t - atomic_inc(), * atomic_dec_and_test() - but percpu. * * There's one important difference between percpu refs and normal atomic_t * refcounts; you have to keep track of your initial refcount, and then when you * start shutting down you call percpu_ref_kill() _before_ dropping the initial * refcount. * * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less * than an atomic_t - this is because of the way shutdown works, see * percpu_ref_kill()/PERCPU_COUNT_BIAS. * * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() * puts the ref back in single atomic_t mode, collecting the per cpu refs and * issuing the appropriate barriers, and then marks the ref as shutting down so * that percpu_ref_put() will check for the ref hitting 0. After it returns, * it's safe to drop the initial ref. * * USAGE: * * See fs/aio.c for some example usage; it's used there for struct kioctx, which * is created when userspaces calls io_setup(), and destroyed when userspace * calls io_destroy() or the process exits. * * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref. * After that, there can't be any new users of the kioctx (from lookup_ioctx()) * and it's then safe to drop the initial ref with percpu_ref_put(). * * Note that the free path, free_ioctx(), needs to go through explicit call_rcu() * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't * imply RCU grace periods of any kind and if a user wants to combine percpu_ref * with RCU protection, it must be done explicitly. * * Code that does a two stage shutdown like this often needs some kind of * explicit synchronization to ensure the initial refcount can only be dropped * once - percpu_ref_kill() does this for you, it returns true once and false if * someone else already called it. The aio code uses it this way, but it's not * necessary if the code has some other mechanism to synchronize teardown. * around. */ #ifndef _LINUX_PERCPU_REFCOUNT_H #define _LINUX_PERCPU_REFCOUNT_H #include <linux/atomic.h> #include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/types.h> #include <linux/gfp.h> struct percpu_ref; typedef void (percpu_ref_func_t)(struct percpu_ref *); /* flags set in the lower bits of percpu_ref->percpu_count_ptr */ enum { __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, __PERCPU_REF_FLAG_BITS = 2, }; /* @flags for percpu_ref_init() */ enum { /* * Start w/ ref == 1 in atomic mode. Can be switched to percpu * operation using percpu_ref_switch_to_percpu(). If initialized * with this flag, the ref will stay in atomic mode until * percpu_ref_switch_to_percpu() is invoked on it. * Implies ALLOW_REINIT. */ PERCPU_REF_INIT_ATOMIC = 1 << 0, /* * Start dead w/ ref == 0 in atomic mode. Must be revived with * percpu_ref_reinit() before used. Implies INIT_ATOMIC and * ALLOW_REINIT. */ PERCPU_REF_INIT_DEAD = 1 << 1, /* * Allow switching from atomic mode to percpu mode. */ PERCPU_REF_ALLOW_REINIT = 1 << 2, }; struct percpu_ref_data { atomic_long_t count; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic:1; bool allow_reinit:1; struct rcu_head rcu; struct percpu_ref *ref; }; struct percpu_ref { /* * The low bit of the pointer indicates whether the ref is in percpu * mode; if set, then get/put will manipulate the atomic_t. */ unsigned long percpu_count_ptr; /* * 'percpu_ref' is often embedded into user structure, and only * 'percpu_count_ptr' is required in fast path, move other fields * into 'percpu_ref_data', so we can reduce memory footprint in * fast path. */ struct percpu_ref_data *data; }; int __must_check percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, unsigned int flags, gfp_t gfp); void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch); void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); void percpu_ref_switch_to_percpu(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); void percpu_ref_resurrect(struct percpu_ref *ref); void percpu_ref_reinit(struct percpu_ref *ref); bool percpu_ref_is_zero(struct percpu_ref *ref); /** * percpu_ref_kill - drop the initial ref * @ref: percpu_ref to kill * * Must be used to drop the initial ref on a percpu refcount; must be called * precisely once before shutdown. * * Switches @ref into atomic mode before gathering up the percpu counters * and dropping the initial ref. * * There are no implied RCU grace periods between kill and release. */ static inline void percpu_ref_kill(struct percpu_ref *ref) { percpu_ref_kill_and_confirm(ref, NULL); } /* * Internal helper. Don't use outside percpu-refcount proper. The * function doesn't return the pointer and let the caller test it for NULL * because doing so forces the compiler to generate two conditional * branches as it can't assume that @ref->percpu_count is not NULL. */ static inline bool __ref_is_percpu(struct percpu_ref *ref, unsigned long __percpu **percpu_countp) { unsigned long percpu_ptr; /* * The value of @ref->percpu_count_ptr is tested for * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then * used as a pointer. If the compiler generates a separate fetch * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in * between contaminating the pointer value, meaning that * READ_ONCE() is required when fetching it. * * The dependency ordering from the READ_ONCE() pairs * with smp_store_release() in __percpu_ref_switch_to_percpu(). */ percpu_ptr = READ_ONCE(ref->percpu_count_ptr); /* * Theoretically, the following could test just ATOMIC; however, * then we'd have to mask off DEAD separately as DEAD may be * visible without ATOMIC if we race with percpu_ref_kill(). DEAD * implies ATOMIC anyway. Test them together. */ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; return true; } /** * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get * @nr: number of references to get * * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock(); if (__ref_is_percpu(ref, &percpu_count)) this_cpu_add(*percpu_count, nr); else atomic_long_add(nr, &ref->data->count); rcu_read_unlock(); } /** * percpu_ref_get - increment a percpu refcount * @ref: percpu_ref to get * * Analogous to atomic_long_inc(). * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_get(struct percpu_ref *ref) { percpu_ref_get_many(ref, 1); } /** * percpu_ref_tryget_many - try to increment a percpu refcount * @ref: percpu_ref to try-get * @nr: number of references to get * * Increment a percpu refcount by @nr unless its count already reached zero. * Returns %true on success; %false on failure. * * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; bool ret; rcu_read_lock(); if (__ref_is_percpu(ref, &percpu_count)) { this_cpu_add(*percpu_count, nr); ret = true; } else { ret = atomic_long_add_unless(&ref->data->count, nr, 0); } rcu_read_unlock(); return ret; } /** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * * Increment a percpu refcount unless its count already reached zero. * Returns %true on success; %false on failure. * * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget(struct percpu_ref *ref) { return percpu_ref_tryget_many(ref, 1); } /** * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the * caller is responsible for taking RCU. * * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref) { unsigned long __percpu *percpu_count; bool ret = false; WARN_ON_ONCE(!rcu_read_lock_held()); if (likely(__ref_is_percpu(ref, &percpu_count))) { this_cpu_inc(*percpu_count); ret = true; } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { ret = atomic_long_inc_not_zero(&ref->data->count); } return ret; } /** * percpu_ref_tryget_live - try to increment a live percpu refcount * @ref: percpu_ref to try-get * * Increment a percpu refcount unless it has already been killed. Returns * %true on success; %false on failure. * * Completion of percpu_ref_kill() in itself doesn't guarantee that this * function will fail. For such guarantee, percpu_ref_kill_and_confirm() * should be used. After the confirm_kill callback is invoked, it's * guaranteed that no new reference will be given out by * percpu_ref_tryget_live(). * * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { bool ret = false; rcu_read_lock(); ret = percpu_ref_tryget_live_rcu(ref); rcu_read_unlock(); return ret; } /** * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock(); if (__ref_is_percpu(ref, &percpu_count)) this_cpu_sub(*percpu_count, nr); else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count))) ref->data->release(ref); rcu_read_unlock(); } /** * percpu_ref_put - decrement a percpu refcount * @ref: percpu_ref to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_put(struct percpu_ref *ref) { percpu_ref_put_many(ref, 1); } /** * percpu_ref_is_dying - test whether a percpu refcount is dying or dead * @ref: percpu_ref to test * * Returns %true if @ref is dying or dead. * * This function is safe to call as long as @ref is between init and exit * and the caller is responsible for synchronizing against state changes. */ static inline bool percpu_ref_is_dying(struct percpu_ref *ref) { return ref->percpu_count_ptr & __PERCPU_REF_DEAD; } #endif
2 2 3 2 2 2 2 2 1 1 2 3 1 1 1 1 1 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 // SPDX-License-Identifier: GPL-2.0-only #include "netlink.h" #include "common.h" #include <linux/phy.h> struct linkstate_req_info { struct ethnl_req_info base; }; struct linkstate_reply_data { struct ethnl_reply_data base; int link; int sqi; int sqi_max; struct ethtool_link_ext_stats link_stats; bool link_ext_state_provided; struct ethtool_link_ext_state_info ethtool_link_ext_state_info; }; #define LINKSTATE_REPDATA(__reply_base) \ container_of(__reply_base, struct linkstate_reply_data, base) const struct nla_policy ethnl_linkstate_get_policy[] = { [ETHTOOL_A_LINKSTATE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_stats), }; static int linkstate_get_sqi(struct net_device *dev) { struct phy_device *phydev = dev->phydev; int ret; if (!phydev) return -EOPNOTSUPP; mutex_lock(&phydev->lock); if (!phydev->drv || !phydev->drv->get_sqi) ret = -EOPNOTSUPP; else if (!phydev->link) ret = -ENETDOWN; else ret = phydev->drv->get_sqi(phydev); mutex_unlock(&phydev->lock); return ret; } static int linkstate_get_sqi_max(struct net_device *dev) { struct phy_device *phydev = dev->phydev; int ret; if (!phydev) return -EOPNOTSUPP; mutex_lock(&phydev->lock); if (!phydev->drv || !phydev->drv->get_sqi_max) ret = -EOPNOTSUPP; else if (!phydev->link) ret = -ENETDOWN; else ret = phydev->drv->get_sqi_max(phydev); mutex_unlock(&phydev->lock); return ret; }; static bool linkstate_sqi_critical_error(int sqi) { return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN; } static bool linkstate_sqi_valid(struct linkstate_reply_data *data) { return data->sqi >= 0 && data->sqi_max >= 0 && data->sqi <= data->sqi_max; } static int linkstate_get_link_ext_state(struct net_device *dev, struct linkstate_reply_data *data) { int err; if (!dev->ethtool_ops->get_link_ext_state) return -EOPNOTSUPP; err = dev->ethtool_ops->get_link_ext_state(dev, &data->ethtool_link_ext_state_info); if (err) return err; data->link_ext_state_provided = true; return 0; } static int linkstate_prepare_data(const struct ethnl_req_info *req_base, struct ethnl_reply_data *reply_base, const struct genl_info *info) { struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base); struct net_device *dev = reply_base->dev; int ret; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; data->link = __ethtool_get_link(dev); ret = linkstate_get_sqi(dev); if (linkstate_sqi_critical_error(ret)) goto out; data->sqi = ret; ret = linkstate_get_sqi_max(dev); if (linkstate_sqi_critical_error(ret)) goto out; data->sqi_max = ret; if (dev->flags & IFF_UP) { ret = linkstate_get_link_ext_state(dev, data); if (ret < 0 && ret != -EOPNOTSUPP && ret != -ENODATA) goto out; } ethtool_stats_init((u64 *)&data->link_stats, sizeof(data->link_stats) / 8); if (req_base->flags & ETHTOOL_FLAG_STATS) { if (dev->phydev) data->link_stats.link_down_events = READ_ONCE(dev->phydev->link_down_events); if (dev->ethtool_ops->get_link_ext_stats) dev->ethtool_ops->get_link_ext_stats(dev, &data->link_stats); } ret = 0; out: ethnl_ops_complete(dev); return ret; } static int linkstate_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base); int len; len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */ + 0; if (linkstate_sqi_valid(data)) { len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */ } if (data->link_ext_state_provided) len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */ if (data->ethtool_link_ext_state_info.__link_ext_substate) len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_SUBSTATE */ if (data->link_stats.link_down_events != ETHTOOL_STAT_NOT_SET) len += nla_total_size(sizeof(u32)); return len; } static int linkstate_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base); if (data->link >= 0 && nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link)) return -EMSGSIZE; if (linkstate_sqi_valid(data)) { if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi)) return -EMSGSIZE; if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max)) return -EMSGSIZE; } if (data->link_ext_state_provided) { if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE, data->ethtool_link_ext_state_info.link_ext_state)) return -EMSGSIZE; if (data->ethtool_link_ext_state_info.__link_ext_substate && nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, data->ethtool_link_ext_state_info.__link_ext_substate)) return -EMSGSIZE; } if (data->link_stats.link_down_events != ETHTOOL_STAT_NOT_SET) if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT, data->link_stats.link_down_events)) return -EMSGSIZE; return 0; } const struct ethnl_request_ops ethnl_linkstate_request_ops = { .request_cmd = ETHTOOL_MSG_LINKSTATE_GET, .reply_cmd = ETHTOOL_MSG_LINKSTATE_GET_REPLY, .hdr_attr = ETHTOOL_A_LINKSTATE_HEADER, .req_info_size = sizeof(struct linkstate_req_info), .reply_data_size = sizeof(struct linkstate_reply_data), .prepare_data = linkstate_prepare_data, .reply_size = linkstate_reply_size, .fill_reply = linkstate_fill_reply, };
107 143 444 132 111 112 80 95 144 20 9 7 7 9 2 165 165 165 165 95 81 95 95 95 95 81 80 95 94 92 94 94 36 37 107 107 107 272 321 272 38 39 39 9 12 3 3 3 38 39 3 39 39 39 120 37 37 37 11 12 38 38 38 174 172 173 2 173 171 174 174 2 174 173 172 173 173 172 123 124 54 111 111 111 110 109 111 110 112 113 113 112 124 112 123 58 179 90 180 164 180 52 1 181 199 199 198 199 197 35 17 181 181 180 197 123 117 94 86 180 181 181 180 181 94 88 40 124 124 123 123 86 37 10 181 123 124 66 180 181 181 181 181 181 181 124 84 84 181 181 181 171 181 94 181 2 2 2 2 181 124 39 39 38 123 6 141 142 142 140 124 124 123 2 10 8 2 2 2 111 110 111 111 124 124 113 124 123 124 111 123 124 122 123 108 107 105 107 108 108 108 162 156 154 110 101 103 110 161 72 5 5 70 70 71 12 12 12 12 12 12 12 12 12 12 1 106 96 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2016 - 2020 Christoph Hellwig */ #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/major.h> #include <linux/device_cgroup.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <linux/backing-dev.h> #include <linux/module.h> #include <linux/blkpg.h> #include <linux/magic.h> #include <linux/buffer_head.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/uio.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/part_stat.h> #include <linux/uaccess.h> #include <linux/stat.h> #include "../fs/internal.h" #include "blk.h" /* Should we allow writing to mounted block devices? */ static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED); struct bdev_inode { struct block_device bdev; struct inode vfs_inode; }; static inline struct bdev_inode *BDEV_I(struct inode *inode) { return container_of(inode, struct bdev_inode, vfs_inode); } static inline struct inode *BD_INODE(struct block_device *bdev) { return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode; } struct block_device *I_BDEV(struct inode *inode) { return &BDEV_I(inode)->bdev; } EXPORT_SYMBOL(I_BDEV); struct block_device *file_bdev(struct file *bdev_file) { return I_BDEV(bdev_file->f_mapping->host); } EXPORT_SYMBOL(file_bdev); static void bdev_write_inode(struct block_device *bdev) { struct inode *inode = BD_INODE(bdev); int ret; spin_lock(&inode->i_lock); while (inode->i_state & I_DIRTY) { spin_unlock(&inode->i_lock); ret = write_inode_now(inode, true); if (ret) pr_warn_ratelimited( "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n", bdev, ret); spin_lock(&inode->i_lock); } spin_unlock(&inode->i_lock); } /* Kill _all_ buffers and pagecache , dirty or not.. */ static void kill_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_mapping; if (mapping_empty(mapping)) return; invalidate_bh_lrus(); truncate_inode_pages(mapping, 0); } /* Invalidate clean unused buffers and pagecache. */ void invalidate_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_mapping; if (mapping->nrpages) { invalidate_bh_lrus(); lru_add_drain_all(); /* make sure all lru add caches are flushed */ invalidate_mapping_pages(mapping, 0, -1); } } EXPORT_SYMBOL(invalidate_bdev); /* * Drop all buffers & page cache for given bdev range. This function bails * with error if bdev has other exclusive owner (such as filesystem). */ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, loff_t lstart, loff_t lend) { /* * If we don't hold exclusive handle for the device, upgrade to it * while we discard the buffer cache to avoid discarding buffers * under live filesystem. */ if (!(mode & BLK_OPEN_EXCL)) { int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); if (err) goto invalidate; } truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, truncate_bdev_range); return 0; invalidate: /* * Someone else has handle exclusively open. Try invalidating instead. * The 'end' argument is inclusive so the rounding is safe. */ return invalidate_inode_pages2_range(bdev->bd_mapping, lstart >> PAGE_SHIFT, lend >> PAGE_SHIFT); } static void set_init_blocksize(struct block_device *bdev) { unsigned int bsize = bdev_logical_block_size(bdev); loff_t size = i_size_read(BD_INODE(bdev)); while (bsize < PAGE_SIZE) { if (size & bsize) break; bsize <<= 1; } BD_INODE(bdev)->i_blkbits = blksize_bits(bsize); } int set_blocksize(struct file *file, int size) { struct inode *inode = file->f_mapping->host; struct block_device *bdev = I_BDEV(inode); /* Size must be a power of two, and between 512 and PAGE_SIZE */ if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) return -EINVAL; /* Size cannot be smaller than the size supported by the device */ if (size < bdev_logical_block_size(bdev)) return -EINVAL; if (!file->private_data) return -EINVAL; /* Don't change the size if it is same as current */ if (inode->i_blkbits != blksize_bits(size)) { sync_blockdev(bdev); inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); } return 0; } EXPORT_SYMBOL(set_blocksize); int sb_set_blocksize(struct super_block *sb, int size) { if (set_blocksize(sb->s_bdev_file, size)) return 0; /* If we get here, we know size is power of two * and it's value is between 512 and PAGE_SIZE */ sb->s_blocksize = size; sb->s_blocksize_bits = blksize_bits(size); return sb->s_blocksize; } EXPORT_SYMBOL(sb_set_blocksize); int sb_min_blocksize(struct super_block *sb, int size) { int minsize = bdev_logical_block_size(sb->s_bdev); if (size < minsize) size = minsize; return sb_set_blocksize(sb, size); } EXPORT_SYMBOL(sb_min_blocksize); int sync_blockdev_nowait(struct block_device *bdev) { if (!bdev) return 0; return filemap_flush(bdev->bd_mapping); } EXPORT_SYMBOL_GPL(sync_blockdev_nowait); /* * Write out and wait upon all the dirty data associated with a block * device via its mapping. Does not take the superblock lock. */ int sync_blockdev(struct block_device *bdev) { if (!bdev) return 0; return filemap_write_and_wait(bdev->bd_mapping); } EXPORT_SYMBOL(sync_blockdev); int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) { return filemap_write_and_wait_range(bdev->bd_mapping, lstart, lend); } EXPORT_SYMBOL(sync_blockdev_range); /** * bdev_freeze - lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. * The reference counter (bd_fsfreeze_count) guarantees that only the last * unfreeze process can unfreeze the frozen filesystem actually when multiple * freeze requests arrive simultaneously. It counts up in bdev_freeze() and * count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze * actually. * * Return: On success zero is returned, negative error code on failure. */ int bdev_freeze(struct block_device *bdev) { int error = 0; mutex_lock(&bdev->bd_fsfreeze_mutex); if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) { mutex_unlock(&bdev->bd_fsfreeze_mutex); return 0; } mutex_lock(&bdev->bd_holder_lock); if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) { error = bdev->bd_holder_ops->freeze(bdev); lockdep_assert_not_held(&bdev->bd_holder_lock); } else { mutex_unlock(&bdev->bd_holder_lock); error = sync_blockdev(bdev); } if (error) atomic_dec(&bdev->bd_fsfreeze_count); mutex_unlock(&bdev->bd_fsfreeze_mutex); return error; } EXPORT_SYMBOL(bdev_freeze); /** * bdev_thaw - unlock filesystem * @bdev: blockdevice to unlock * * Unlocks the filesystem and marks it writeable again after bdev_freeze(). * * Return: On success zero is returned, negative error code on failure. */ int bdev_thaw(struct block_device *bdev) { int error = -EINVAL, nr_freeze; mutex_lock(&bdev->bd_fsfreeze_mutex); /* * If this returns < 0 it means that @bd_fsfreeze_count was * already 0 and no decrement was performed. */ nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count); if (nr_freeze < 0) goto out; error = 0; if (nr_freeze > 0) goto out; mutex_lock(&bdev->bd_holder_lock); if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) { error = bdev->bd_holder_ops->thaw(bdev); lockdep_assert_not_held(&bdev->bd_holder_lock); } else { mutex_unlock(&bdev->bd_holder_lock); } if (error) atomic_inc(&bdev->bd_fsfreeze_count); out: mutex_unlock(&bdev->bd_fsfreeze_mutex); return error; } EXPORT_SYMBOL(bdev_thaw); /* * pseudo-fs */ static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock); static struct kmem_cache *bdev_cachep __ro_after_init; static struct inode *bdev_alloc_inode(struct super_block *sb) { struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL); if (!ei) return NULL; memset(&ei->bdev, 0, sizeof(ei->bdev)); if (security_bdev_alloc(&ei->bdev)) { kmem_cache_free(bdev_cachep, ei); return NULL; } return &ei->vfs_inode; } static void bdev_free_inode(struct inode *inode) { struct block_device *bdev = I_BDEV(inode); free_percpu(bdev->bd_stats); kfree(bdev->bd_meta_info); security_bdev_free(bdev); if (!bdev_is_partition(bdev)) { if (bdev->bd_disk && bdev->bd_disk->bdi) bdi_put(bdev->bd_disk->bdi); kfree(bdev->bd_disk); } if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) blk_free_ext_minor(MINOR(bdev->bd_dev)); kmem_cache_free(bdev_cachep, BDEV_I(inode)); } static void init_once(void *data) { struct bdev_inode *ei = data; inode_init_once(&ei->vfs_inode); } static void bdev_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); /* is it needed here? */ clear_inode(inode); } static const struct super_operations bdev_sops = { .statfs = simple_statfs, .alloc_inode = bdev_alloc_inode, .free_inode = bdev_free_inode, .drop_inode = generic_delete_inode, .evict_inode = bdev_evict_inode, }; static int bd_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC); if (!ctx) return -ENOMEM; fc->s_iflags |= SB_I_CGROUPWB; ctx->ops = &bdev_sops; return 0; } static struct file_system_type bd_type = { .name = "bdev", .init_fs_context = bd_init_fs_context, .kill_sb = kill_anon_super, }; struct super_block *blockdev_superblock __ro_after_init; static struct vfsmount *blockdev_mnt __ro_after_init; EXPORT_SYMBOL_GPL(blockdev_superblock); void __init bdev_cache_init(void) { int err; bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT|SLAB_PANIC), init_once); err = register_filesystem(&bd_type); if (err) panic("Cannot register bdev pseudo-fs"); blockdev_mnt = kern_mount(&bd_type); if (IS_ERR(blockdev_mnt)) panic("Cannot create bdev pseudo-fs"); blockdev_superblock = blockdev_mnt->mnt_sb; /* For writeback */ } struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) { struct block_device *bdev; struct inode *inode; inode = new_inode(blockdev_superblock); if (!inode) return NULL; inode->i_mode = S_IFBLK; inode->i_rdev = 0; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); bdev = I_BDEV(inode); mutex_init(&bdev->bd_fsfreeze_mutex); spin_lock_init(&bdev->bd_size_lock); mutex_init(&bdev->bd_holder_lock); atomic_set(&bdev->__bd_flags, partno); bdev->bd_mapping = &inode->i_data; bdev->bd_queue = disk->queue; if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO)) bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO); bdev->bd_stats = alloc_percpu(struct disk_stats); if (!bdev->bd_stats) { iput(inode); return NULL; } bdev->bd_disk = disk; return bdev; } void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) { spin_lock(&bdev->bd_size_lock); i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT); bdev->bd_nr_sectors = sectors; spin_unlock(&bdev->bd_size_lock); } void bdev_add(struct block_device *bdev, dev_t dev) { struct inode *inode = BD_INODE(bdev); if (bdev_stable_writes(bdev)) mapping_set_stable_writes(bdev->bd_mapping); bdev->bd_dev = dev; inode->i_rdev = dev; inode->i_ino = dev; insert_inode_hash(inode); } void bdev_unhash(struct block_device *bdev) { remove_inode_hash(BD_INODE(bdev)); } void bdev_drop(struct block_device *bdev) { iput(BD_INODE(bdev)); } long nr_blockdev_pages(void) { struct inode *inode; long ret = 0; spin_lock(&blockdev_superblock->s_inode_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) ret += inode->i_mapping->nrpages; spin_unlock(&blockdev_superblock->s_inode_list_lock); return ret; } /** * bd_may_claim - test whether a block device can be claimed * @bdev: block device of interest * @holder: holder trying to claim @bdev * @hops: holder ops * * Test whether @bdev can be claimed by @holder. * * RETURNS: * %true if @bdev can be claimed, %false otherwise. */ static bool bd_may_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); lockdep_assert_held(&bdev_lock); if (bdev->bd_holder) { /* * The same holder can always re-claim. */ if (bdev->bd_holder == holder) { if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) return false; return true; } return false; } /* * If the whole devices holder is set to bd_may_claim, a partition on * the device is claimed, but not the whole device. */ if (whole != bdev && whole->bd_holder && whole->bd_holder != bd_may_claim) return false; return true; } /** * bd_prepare_to_claim - claim a block device * @bdev: block device of interest * @holder: holder trying to claim @bdev * @hops: holder ops. * * Claim @bdev. This function fails if @bdev is already claimed by another * holder and waits if another claiming is in progress. return, the caller * has ownership of bd_claiming and bd_holder[s]. * * RETURNS: * 0 if @bdev can be claimed, -EBUSY otherwise. */ int bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); if (WARN_ON_ONCE(!holder)) return -EINVAL; retry: mutex_lock(&bdev_lock); /* if someone else claimed, fail */ if (!bd_may_claim(bdev, holder, hops)) { mutex_unlock(&bdev_lock); return -EBUSY; } /* if claiming is already in progress, wait for it to finish */ if (whole->bd_claiming) { wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming); DEFINE_WAIT(wait); prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); mutex_unlock(&bdev_lock); schedule(); finish_wait(wq, &wait); goto retry; } /* yay, all mine */ whole->bd_claiming = holder; mutex_unlock(&bdev_lock); return 0; } EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ static void bd_clear_claiming(struct block_device *whole, void *holder) { lockdep_assert_held(&bdev_lock); /* tell others that we're done */ BUG_ON(whole->bd_claiming != holder); whole->bd_claiming = NULL; wake_up_var(&whole->bd_claiming); } /** * bd_finish_claiming - finish claiming of a block device * @bdev: block device of interest * @holder: holder that has claimed @bdev * @hops: block device holder operations * * Finish exclusive open of a block device. Mark the device as exlusively * open by the holder and wake up all waiters for exclusive open to finish. */ static void bd_finish_claiming(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); mutex_lock(&bdev_lock); BUG_ON(!bd_may_claim(bdev, holder, hops)); /* * Note that for a whole device bd_holders will be incremented twice, * and bd_holder will be set to bd_may_claim before being set to holder */ whole->bd_holders++; whole->bd_holder = bd_may_claim; bdev->bd_holders++; mutex_lock(&bdev->bd_holder_lock); bdev->bd_holder = holder; bdev->bd_holder_ops = hops; mutex_unlock(&bdev->bd_holder_lock); bd_clear_claiming(whole, holder); mutex_unlock(&bdev_lock); } /** * bd_abort_claiming - abort claiming of a block device * @bdev: block device of interest * @holder: holder that has claimed @bdev * * Abort claiming of a block device when the exclusive open failed. This can be * also used when exclusive open is not actually desired and we just needed * to block other exclusive openers for a while. */ void bd_abort_claiming(struct block_device *bdev, void *holder) { mutex_lock(&bdev_lock); bd_clear_claiming(bdev_whole(bdev), holder); mutex_unlock(&bdev_lock); } EXPORT_SYMBOL(bd_abort_claiming); static void bd_end_claim(struct block_device *bdev, void *holder) { struct block_device *whole = bdev_whole(bdev); bool unblock = false; /* * Release a claim on the device. The holder fields are protected with * bdev_lock. open_mutex is used to synchronize disk_holder unlinking. */ mutex_lock(&bdev_lock); WARN_ON_ONCE(bdev->bd_holder != holder); WARN_ON_ONCE(--bdev->bd_holders < 0); WARN_ON_ONCE(--whole->bd_holders < 0); if (!bdev->bd_holders) { mutex_lock(&bdev->bd_holder_lock); bdev->bd_holder = NULL; bdev->bd_holder_ops = NULL; mutex_unlock(&bdev->bd_holder_lock); if (bdev_test_flag(bdev, BD_WRITE_HOLDER)) unblock = true; } if (!whole->bd_holders) whole->bd_holder = NULL; mutex_unlock(&bdev_lock); /* * If this was the last claim, remove holder link and unblock evpoll if * it was a write holder. */ if (unblock) { disk_unblock_events(bdev->bd_disk); bdev_clear_flag(bdev, BD_WRITE_HOLDER); } } static void blkdev_flush_mapping(struct block_device *bdev) { WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); bdev_write_inode(bdev); } static void blkdev_put_whole(struct block_device *bdev) { if (atomic_dec_and_test(&bdev->bd_openers)) blkdev_flush_mapping(bdev); if (bdev->bd_disk->fops->release) bdev->bd_disk->fops->release(bdev->bd_disk); } static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) { struct gendisk *disk = bdev->bd_disk; int ret; if (disk->fops->open) { ret = disk->fops->open(disk, mode); if (ret) { /* avoid ghost partitions on a removed medium */ if (ret == -ENOMEDIUM && test_bit(GD_NEED_PART_SCAN, &disk->state)) bdev_disk_changed(disk, true); return ret; } } if (!atomic_read(&bdev->bd_openers)) set_init_blocksize(bdev); atomic_inc(&bdev->bd_openers); if (test_bit(GD_NEED_PART_SCAN, &disk->state)) { /* * Only return scanning errors if we are called from contexts * that explicitly want them, e.g. the BLKRRPART ioctl. */ ret = bdev_disk_changed(disk, false); if (ret && (mode & BLK_OPEN_STRICT_SCAN)) { blkdev_put_whole(bdev); return ret; } } return 0; } static int blkdev_get_part(struct block_device *part, blk_mode_t mode) { struct gendisk *disk = part->bd_disk; int ret; ret = blkdev_get_whole(bdev_whole(part), mode); if (ret) return ret; ret = -ENXIO; if (!bdev_nr_sectors(part)) goto out_blkdev_put; if (!atomic_read(&part->bd_openers)) { disk->open_partitions++; set_init_blocksize(part); } atomic_inc(&part->bd_openers); return 0; out_blkdev_put: blkdev_put_whole(bdev_whole(part)); return ret; } int bdev_permission(dev_t dev, blk_mode_t mode, void *holder) { int ret; ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, MAJOR(dev), MINOR(dev), ((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) | ((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0)); if (ret) return ret; /* Blocking writes requires exclusive opener */ if (mode & BLK_OPEN_RESTRICT_WRITES && !holder) return -EINVAL; /* * We're using error pointers to indicate to ->release() when we * failed to open that block device. Also this doesn't make sense. */ if (WARN_ON_ONCE(IS_ERR(holder))) return -EINVAL; return 0; } static void blkdev_put_part(struct block_device *part) { struct block_device *whole = bdev_whole(part); if (atomic_dec_and_test(&part->bd_openers)) { blkdev_flush_mapping(part); whole->bd_disk->open_partitions--; } blkdev_put_whole(whole); } struct block_device *blkdev_get_no_open(dev_t dev) { struct block_device *bdev; struct inode *inode; inode = ilookup(blockdev_superblock, dev); if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) { blk_request_module(dev); inode = ilookup(blockdev_superblock, dev); if (inode) pr_warn_ratelimited( "block device autoloading is deprecated and will be removed.\n"); } if (!inode) return NULL; /* switch from the inode reference to a device mode one: */ bdev = &BDEV_I(inode)->bdev; if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) bdev = NULL; iput(inode); return bdev; } void blkdev_put_no_open(struct block_device *bdev) { put_device(&bdev->bd_device); } static bool bdev_writes_blocked(struct block_device *bdev) { return bdev->bd_writers < 0; } static void bdev_block_writes(struct block_device *bdev) { bdev->bd_writers--; } static void bdev_unblock_writes(struct block_device *bdev) { bdev->bd_writers++; } static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode) { if (bdev_allow_write_mounted) return true; /* Writes blocked? */ if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev)) return false; if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0) return false; return true; } static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode) { if (bdev_allow_write_mounted) return; /* Claim exclusive or shared write access. */ if (mode & BLK_OPEN_RESTRICT_WRITES) bdev_block_writes(bdev); else if (mode & BLK_OPEN_WRITE) bdev->bd_writers++; } static inline bool bdev_unclaimed(const struct file *bdev_file) { return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host); } static void bdev_yield_write_access(struct file *bdev_file) { struct block_device *bdev; if (bdev_allow_write_mounted) return; if (bdev_unclaimed(bdev_file)) return; bdev = file_bdev(bdev_file); if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED) bdev_unblock_writes(bdev); else if (bdev_file->f_mode & FMODE_WRITE) bdev->bd_writers--; } /** * bdev_open - open a block device * @bdev: block device to open * @mode: open mode (BLK_OPEN_*) * @holder: exclusive holder identifier * @hops: holder operations * @bdev_file: file for the block device * * Open the block device. If @holder is not %NULL, the block device is opened * with exclusive access. Exclusive opens may nest for the same @holder. * * CONTEXT: * Might sleep. * * RETURNS: * zero on success, -errno on failure. */ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops, struct file *bdev_file) { bool unblock_events = true; struct gendisk *disk = bdev->bd_disk; int ret; if (holder) { mode |= BLK_OPEN_EXCL; ret = bd_prepare_to_claim(bdev, holder, hops); if (ret) return ret; } else { if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL)) return -EIO; } disk_block_events(disk); mutex_lock(&disk->open_mutex); ret = -ENXIO; if (!disk_live(disk)) goto abort_claiming; if (!try_module_get(disk->fops->owner)) goto abort_claiming; ret = -EBUSY; if (!bdev_may_open(bdev, mode)) goto put_module; if (bdev_is_partition(bdev)) ret = blkdev_get_part(bdev, mode); else ret = blkdev_get_whole(bdev, mode); if (ret) goto put_module; bdev_claim_write_access(bdev, mode); if (holder) { bd_finish_claiming(bdev, holder, hops); /* * Block event polling for write claims if requested. Any write * holder makes the write_holder state stick until all are * released. This is good enough and tracking individual * writeable reference is too fragile given the way @mode is * used in blkdev_get/put(). */ if ((mode & BLK_OPEN_WRITE) && !bdev_test_flag(bdev, BD_WRITE_HOLDER) && (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { bdev_set_flag(bdev, BD_WRITE_HOLDER); unblock_events = false; } } mutex_unlock(&disk->open_mutex); if (unblock_events) disk_unblock_events(disk); bdev_file->f_flags |= O_LARGEFILE; bdev_file->f_mode |= FMODE_CAN_ODIRECT; if (bdev_nowait(bdev)) bdev_file->f_mode |= FMODE_NOWAIT; if (mode & BLK_OPEN_RESTRICT_WRITES) bdev_file->f_mode |= FMODE_WRITE_RESTRICTED; bdev_file->f_mapping = bdev->bd_mapping; bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping); bdev_file->private_data = holder; return 0; put_module: module_put(disk->fops->owner); abort_claiming: if (holder) bd_abort_claiming(bdev, holder); mutex_unlock(&disk->open_mutex); disk_unblock_events(disk); return ret; } /* * If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk * associated with the floppy driver where it has allowed ioctls if the * file was opened for writing, but does not allow reads or writes. * Make sure that this quirk is reflected in @f_flags. * * It can also happen if a block device is opened as O_RDWR | O_WRONLY. */ static unsigned blk_to_file_flags(blk_mode_t mode) { unsigned int flags = 0; if ((mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) == (BLK_OPEN_READ | BLK_OPEN_WRITE)) flags |= O_RDWR; else if (mode & BLK_OPEN_WRITE_IOCTL) flags |= O_RDWR | O_WRONLY; else if (mode & BLK_OPEN_WRITE) flags |= O_WRONLY; else if (mode & BLK_OPEN_READ) flags |= O_RDONLY; /* homeopathic, because O_RDONLY is 0 */ else WARN_ON_ONCE(true); if (mode & BLK_OPEN_NDELAY) flags |= O_NDELAY; return flags; } struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops) { struct file *bdev_file; struct block_device *bdev; unsigned int flags; int ret; ret = bdev_permission(dev, mode, holder); if (ret) return ERR_PTR(ret); bdev = blkdev_get_no_open(dev); if (!bdev) return ERR_PTR(-ENXIO); flags = blk_to_file_flags(mode); bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev), blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops); if (IS_ERR(bdev_file)) { blkdev_put_no_open(bdev); return bdev_file; } ihold(BD_INODE(bdev)); ret = bdev_open(bdev, mode, holder, hops, bdev_file); if (ret) { /* We failed to open the block device. Let ->release() know. */ bdev_file->private_data = ERR_PTR(ret); fput(bdev_file); return ERR_PTR(ret); } return bdev_file; } EXPORT_SYMBOL(bdev_file_open_by_dev); struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops) { struct file *file; dev_t dev; int error; error = lookup_bdev(path, &dev); if (error) return ERR_PTR(error); file = bdev_file_open_by_dev(dev, mode, holder, hops); if (!IS_ERR(file) && (mode & BLK_OPEN_WRITE)) { if (bdev_read_only(file_bdev(file))) { fput(file); file = ERR_PTR(-EACCES); } } return file; } EXPORT_SYMBOL(bdev_file_open_by_path); static inline void bd_yield_claim(struct file *bdev_file) { struct block_device *bdev = file_bdev(bdev_file); void *holder = bdev_file->private_data; lockdep_assert_held(&bdev->bd_disk->open_mutex); if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder))) return; if (!bdev_unclaimed(bdev_file)) bd_end_claim(bdev, holder); } void bdev_release(struct file *bdev_file) { struct block_device *bdev = file_bdev(bdev_file); void *holder = bdev_file->private_data; struct gendisk *disk = bdev->bd_disk; /* We failed to open that block device. */ if (IS_ERR(holder)) goto put_no_open; /* * Sync early if it looks like we're the last one. If someone else * opens the block device between now and the decrement of bd_openers * then we did a sync that we didn't need to, but that's not the end * of the world and we want to avoid long (could be several minute) * syncs while holding the mutex. */ if (atomic_read(&bdev->bd_openers) == 1) sync_blockdev(bdev); mutex_lock(&disk->open_mutex); bdev_yield_write_access(bdev_file); if (holder) bd_yield_claim(bdev_file); /* * Trigger event checking and tell drivers to flush MEDIA_CHANGE * event. This is to ensure detection of media removal commanded * from userland - e.g. eject(1). */ disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE); if (bdev_is_partition(bdev)) blkdev_put_part(bdev); else blkdev_put_whole(bdev); mutex_unlock(&disk->open_mutex); module_put(disk->fops->owner); put_no_open: blkdev_put_no_open(bdev); } /** * bdev_fput - yield claim to the block device and put the file * @bdev_file: open block device * * Yield claim on the block device and put the file. Ensure that the * block device can be reclaimed before the file is closed which is a * deferred operation. */ void bdev_fput(struct file *bdev_file) { if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops)) return; if (bdev_file->private_data) { struct block_device *bdev = file_bdev(bdev_file); struct gendisk *disk = bdev->bd_disk; mutex_lock(&disk->open_mutex); bdev_yield_write_access(bdev_file); bd_yield_claim(bdev_file); /* * Tell release we already gave up our hold on the * device and if write restrictions are available that * we already gave up write access to the device. */ bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host); mutex_unlock(&disk->open_mutex); } fput(bdev_file); } EXPORT_SYMBOL(bdev_fput); /** * lookup_bdev() - Look up a struct block_device by name. * @pathname: Name of the block device in the filesystem. * @dev: Pointer to the block device's dev_t, if found. * * Lookup the block device's dev_t at @pathname in the current * namespace if possible and return it in @dev. * * Context: May sleep. * Return: 0 if succeeded, negative errno otherwise. */ int lookup_bdev(const char *pathname, dev_t *dev) { struct inode *inode; struct path path; int error; if (!pathname || !*pathname) return -EINVAL; error = kern_path(pathname, LOOKUP_FOLLOW, &path); if (error) return error; inode = d_backing_inode(path.dentry); error = -ENOTBLK; if (!S_ISBLK(inode->i_mode)) goto out_path_put; error = -EACCES; if (!may_open_dev(&path)) goto out_path_put; *dev = inode->i_rdev; error = 0; out_path_put: path_put(&path); return error; } EXPORT_SYMBOL(lookup_bdev); /** * bdev_mark_dead - mark a block device as dead * @bdev: block device to operate on * @surprise: indicate a surprise removal * * Tell the file system that this devices or media is dead. If @surprise is set * to %true the device or media is already gone, if not we are preparing for an * orderly removal. * * This calls into the file system, which then typicall syncs out all dirty data * and writes back inodes and then invalidates any cached data in the inodes on * the file system. In addition we also invalidate the block device mapping. */ void bdev_mark_dead(struct block_device *bdev, bool surprise) { mutex_lock(&bdev->bd_holder_lock); if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead) bdev->bd_holder_ops->mark_dead(bdev, surprise); else { mutex_unlock(&bdev->bd_holder_lock); sync_blockdev(bdev); } invalidate_bdev(bdev); } /* * New drivers should not use this directly. There are some drivers however * that needs this for historical reasons. For example, the DASD driver has * historically had a shutdown to offline mode that doesn't actually remove the * gendisk that otherwise looks a lot like a safe device removal. */ EXPORT_SYMBOL_GPL(bdev_mark_dead); void sync_bdevs(bool wait) { struct inode *inode, *old_inode = NULL; spin_lock(&blockdev_superblock->s_inode_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { struct address_space *mapping = inode->i_mapping; struct block_device *bdev; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || mapping->nrpages == 0) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&blockdev_superblock->s_inode_list_lock); /* * We hold a reference to 'inode' so it couldn't have been * removed from s_inodes list while we dropped the * s_inode_list_lock We cannot iput the inode now as we can * be holding the last reference and we cannot iput it under * s_inode_list_lock. So we keep the reference and iput it * later. */ iput(old_inode); old_inode = inode; bdev = I_BDEV(inode); mutex_lock(&bdev->bd_disk->open_mutex); if (!atomic_read(&bdev->bd_openers)) { ; /* skip */ } else if (wait) { /* * We keep the error status of individual mapping so * that applications can catch the writeback error using * fsync(2). See filemap_fdatawait_keep_errors() for * details. */ filemap_fdatawait_keep_errors(inode->i_mapping); } else { filemap_fdatawrite(inode->i_mapping); } mutex_unlock(&bdev->bd_disk->open_mutex); spin_lock(&blockdev_superblock->s_inode_list_lock); } spin_unlock(&blockdev_superblock->s_inode_list_lock); iput(old_inode); } /* * Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices. */ void bdev_statx(struct path *path, struct kstat *stat, u32 request_mask) { struct inode *backing_inode; struct block_device *bdev; if (!(request_mask & (STATX_DIOALIGN | STATX_WRITE_ATOMIC))) return; backing_inode = d_backing_inode(path->dentry); /* * Note that backing_inode is the inode of a block device node file, * not the block device's internal inode. Therefore it is *not* valid * to use I_BDEV() here; the block device has to be looked up by i_rdev * instead. */ bdev = blkdev_get_no_open(backing_inode->i_rdev); if (!bdev) return; if (request_mask & STATX_DIOALIGN) { stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; stat->dio_offset_align = bdev_logical_block_size(bdev); stat->result_mask |= STATX_DIOALIGN; } if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) { struct request_queue *bd_queue = bdev->bd_queue; generic_fill_statx_atomic_writes(stat, queue_atomic_write_unit_min_bytes(bd_queue), queue_atomic_write_unit_max_bytes(bd_queue)); } blkdev_put_no_open(bdev); } bool disk_live(struct gendisk *disk) { return !inode_unhashed(BD_INODE(disk->part0)); } EXPORT_SYMBOL_GPL(disk_live); unsigned int block_size(struct block_device *bdev) { return 1 << BD_INODE(bdev)->i_blkbits; } EXPORT_SYMBOL_GPL(block_size); static int __init setup_bdev_allow_write_mounted(char *str) { if (kstrtobool(str, &bdev_allow_write_mounted)) pr_warn("Invalid option string for bdev_allow_write_mounted:" " '%s'\n", str); return 1; } __setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);
18 18 18 18 17 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * Copyright (c) 2003 International Business Machines, Corp. * * This file is part of the SCTP kernel implementation * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Sridhar Samudrala <sri@us.ibm.com> */ #include <linux/types.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/export.h> #include <net/sctp/sctp.h> #include <net/ip.h> /* for snmp_fold_field */ static const struct snmp_mib sctp_snmp_list[] = { SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS), SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS), SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES), SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS), SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS), SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS), SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS), SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS), SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS), SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS), SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS), SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), SNMP_MIB_ITEM("SctpT1InitExpireds", SCTP_MIB_T1_INIT_EXPIREDS), SNMP_MIB_ITEM("SctpT1CookieExpireds", SCTP_MIB_T1_COOKIE_EXPIREDS), SNMP_MIB_ITEM("SctpT2ShutdownExpireds", SCTP_MIB_T2_SHUTDOWN_EXPIREDS), SNMP_MIB_ITEM("SctpT3RtxExpireds", SCTP_MIB_T3_RTX_EXPIREDS), SNMP_MIB_ITEM("SctpT4RtoExpireds", SCTP_MIB_T4_RTO_EXPIREDS), SNMP_MIB_ITEM("SctpT5ShutdownGuardExpireds", SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS), SNMP_MIB_ITEM("SctpDelaySackExpireds", SCTP_MIB_DELAY_SACK_EXPIREDS), SNMP_MIB_ITEM("SctpAutocloseExpireds", SCTP_MIB_AUTOCLOSE_EXPIREDS), SNMP_MIB_ITEM("SctpT3Retransmits", SCTP_MIB_T3_RETRANSMITS), SNMP_MIB_ITEM("SctpPmtudRetransmits", SCTP_MIB_PMTUD_RETRANSMITS), SNMP_MIB_ITEM("SctpFastRetransmits", SCTP_MIB_FAST_RETRANSMITS), SNMP_MIB_ITEM("SctpInPktSoftirq", SCTP_MIB_IN_PKT_SOFTIRQ), SNMP_MIB_ITEM("SctpInPktBacklog", SCTP_MIB_IN_PKT_BACKLOG), SNMP_MIB_ITEM("SctpInPktDiscards", SCTP_MIB_IN_PKT_DISCARDS), SNMP_MIB_ITEM("SctpInDataChunkDiscards", SCTP_MIB_IN_DATA_CHUNK_DISCARDS), SNMP_MIB_SENTINEL }; /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ static int sctp_snmp_seq_show(struct seq_file *seq, void *v) { unsigned long buff[SCTP_MIB_MAX]; struct net *net = seq->private; int i; memset(buff, 0, sizeof(unsigned long) * SCTP_MIB_MAX); snmp_get_cpu_field_batch(buff, sctp_snmp_list, net->sctp.sctp_statistics); for (i = 0; sctp_snmp_list[i].name; i++) seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, buff[i]); return 0; } /* Dump local addresses of an association/endpoint. */ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) { struct sctp_association *asoc; struct sctp_sockaddr_entry *laddr; struct sctp_transport *peer; union sctp_addr *addr, *primary = NULL; struct sctp_af *af; if (epb->type == SCTP_EP_TYPE_ASSOCIATION) { asoc = sctp_assoc(epb); peer = asoc->peer.primary_path; if (unlikely(peer == NULL)) { WARN(1, "Association %p with NULL primary path!\n", asoc); return; } primary = &peer->saddr; } rcu_read_lock(); list_for_each_entry_rcu(laddr, &epb->bind_addr.address_list, list) { if (!laddr->valid) continue; addr = &laddr->a; af = sctp_get_af_specific(addr->sa.sa_family); if (primary && af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } rcu_read_unlock(); } /* Dump remote addresses of an association. */ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc) { struct sctp_transport *transport; union sctp_addr *addr, *primary; struct sctp_af *af; primary = &assoc->peer.primary_addr; list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list, transports) { addr = &transport->ipaddr; af = sctp_get_af_specific(addr->sa.sa_family); if (af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } } static void *sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_ep_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); return (void *)pos; } static void sctp_eps_seq_stop(struct seq_file *seq, void *v) { } static void *sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_ep_hashsize) return NULL; return pos; } /* Display sctp endpoints (/proc/net/sctp/eps). */ static int sctp_eps_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_endpoint *ep; struct sock *sk; int hash = *(loff_t *)v; if (hash >= sctp_ep_hashsize) return -ENOMEM; head = &sctp_ep_hashtable[hash]; read_lock_bh(&head->lock); sctp_for_each_hentry(ep, &head->chain) { sk = ep->base.sk; if (!net_eq(sock_net(sk), seq_file_net(seq))) continue; seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, ep->base.bind_addr.port, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk)); sctp_seq_dump_local_addrs(seq, &ep->base); seq_printf(seq, "\n"); } read_unlock_bh(&head->lock); return 0; } static const struct seq_operations sctp_eps_ops = { .start = sctp_eps_seq_start, .next = sctp_eps_seq_next, .stop = sctp_eps_seq_stop, .show = sctp_eps_seq_show, }; struct sctp_ht_iter { struct seq_net_private p; struct rhashtable_iter hti; }; static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) { struct sctp_ht_iter *iter = seq->private; sctp_transport_walk_start(&iter->hti); return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); } static void sctp_transport_seq_stop(struct seq_file *seq, void *v) { struct sctp_ht_iter *iter = seq->private; if (v && v != SEQ_START_TOKEN) { struct sctp_transport *transport = v; sctp_transport_put(transport); } sctp_transport_walk_stop(&iter->hti); } static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sctp_ht_iter *iter = seq->private; if (v && v != SEQ_START_TOKEN) { struct sctp_transport *transport = v; sctp_transport_put(transport); } ++*pos; return sctp_transport_get_next(seq_file_net(seq), &iter->hti); } /* Display sctp associations (/proc/net/sctp/assocs). */ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) { struct sctp_transport *transport; struct sctp_association *assoc; struct sctp_ep_common *epb; struct sock *sk; if (v == SEQ_START_TOKEN) { seq_printf(seq, " ASSOC SOCK STY SST ST HBKT " "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " "RPORT LADDRS <-> RADDRS " "HBINT INS OUTS MAXRT T1X T2X RTXC " "wmema wmemq sndbuf rcvbuf\n"); return 0; } transport = (struct sctp_transport *)v; assoc = transport->asoc; epb = &assoc->base; sk = epb->sk; seq_printf(seq, "%8pK %8pK %-3d %-3d %-2d %-4d " "%4d %8d %8d %7u %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, assoc->state, 0, assoc->assoc_id, assoc->sndbuf_used, atomic_read(&assoc->rmem_alloc), from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk), epb->bind_addr.port, assoc->peer.port); seq_printf(seq, " "); sctp_seq_dump_local_addrs(seq, epb); seq_printf(seq, "<-> "); sctp_seq_dump_remote_addrs(seq, assoc); seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " "%8d %8d %8d %8d", assoc->hbinterval, assoc->stream.incnt, assoc->stream.outcnt, assoc->max_retrans, assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks, refcount_read(&sk->sk_wmem_alloc), READ_ONCE(sk->sk_wmem_queued), sk->sk_sndbuf, sk->sk_rcvbuf); seq_printf(seq, "\n"); return 0; } static const struct seq_operations sctp_assoc_ops = { .start = sctp_transport_seq_start, .next = sctp_transport_seq_next, .stop = sctp_transport_seq_stop, .show = sctp_assocs_seq_show, }; static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) { struct sctp_association *assoc; struct sctp_transport *transport, *tsp; if (v == SEQ_START_TOKEN) { seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " "REM_ADDR_RTX START STATE\n"); return 0; } transport = (struct sctp_transport *)v; assoc = transport->asoc; list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, transports) { /* * The remote address (ADDR) */ tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr); seq_printf(seq, " "); /* * The association ID (ASSOC_ID) */ seq_printf(seq, "%d ", tsp->asoc->assoc_id); /* * If the Heartbeat is active (HB_ACT) * Note: 1 = Active, 0 = Inactive */ seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer)); /* * Retransmit time out (RTO) */ seq_printf(seq, "%lu ", tsp->rto); /* * Maximum path retransmit count (PATH_MAX_RTX) */ seq_printf(seq, "%d ", tsp->pathmaxrxt); /* * remote address retransmit count (REM_ADDR_RTX) * Note: We don't have a way to tally this at the moment * so lets just leave it as zero for the moment */ seq_puts(seq, "0 "); /* * remote address start time (START). This is also not * currently implemented, but we can record it with a * jiffies marker in a subsequent patch */ seq_puts(seq, "0 "); /* * The current state of this destination. I.e. * SCTP_ACTIVE, SCTP_INACTIVE, ... */ seq_printf(seq, "%d", tsp->state); seq_printf(seq, "\n"); } return 0; } static const struct seq_operations sctp_remaddr_ops = { .start = sctp_transport_seq_start, .next = sctp_transport_seq_next, .stop = sctp_transport_seq_stop, .show = sctp_remaddr_seq_show, }; /* Set up the proc fs entry for the SCTP protocol. */ int __net_init sctp_proc_init(struct net *net) { net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net); if (!net->sctp.proc_net_sctp) return -ENOMEM; if (!proc_create_net_single("snmp", 0444, net->sctp.proc_net_sctp, sctp_snmp_seq_show, NULL)) goto cleanup; if (!proc_create_net("eps", 0444, net->sctp.proc_net_sctp, &sctp_eps_ops, sizeof(struct seq_net_private))) goto cleanup; if (!proc_create_net("assocs", 0444, net->sctp.proc_net_sctp, &sctp_assoc_ops, sizeof(struct sctp_ht_iter))) goto cleanup; if (!proc_create_net("remaddr", 0444, net->sctp.proc_net_sctp, &sctp_remaddr_ops, sizeof(struct sctp_ht_iter))) goto cleanup; return 0; cleanup: remove_proc_subtree("sctp", net->proc_net); net->sctp.proc_net_sctp = NULL; return -ENOMEM; }
1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 // SPDX-License-Identifier: GPL-2.0-only /* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (line6@grabner-graz.at) */ #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "capture.h" #include "driver.h" #include "playback.h" /* impulse response volume controls */ static int snd_line6_impulse_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 255; return 0; } static int snd_line6_impulse_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = line6pcm->impulse_volume; return 0; } static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); int value = ucontrol->value.integer.value[0]; int err; if (line6pcm->impulse_volume == value) return 0; line6pcm->impulse_volume = value; if (value > 0) { err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE, true); if (err < 0) { line6pcm->impulse_volume = 0; return err; } } else { line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE); } return 1; } /* impulse response period controls */ static int snd_line6_impulse_period_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 2000; return 0; } static int snd_line6_impulse_period_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = line6pcm->impulse_period; return 0; } static int snd_line6_impulse_period_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); int value = ucontrol->value.integer.value[0]; if (line6pcm->impulse_period == value) return 0; line6pcm->impulse_period = value; return 1; } /* Unlink all currently active URBs. */ static void line6_unlink_audio_urbs(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pcms) { int i; for (i = 0; i < line6pcm->line6->iso_buffers; i++) { if (test_bit(i, &pcms->active_urbs)) { if (!test_and_set_bit(i, &pcms->unlink_urbs)) usb_unlink_urb(pcms->urbs[i]); } } } /* Wait until unlinking of all currently active URBs has been finished. */ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pcms) { int timeout = HZ; int i; int alive; do { alive = 0; for (i = 0; i < line6pcm->line6->iso_buffers; i++) { if (test_bit(i, &pcms->active_urbs)) alive++; } if (!alive) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1); } while (--timeout > 0); if (alive) dev_err(line6pcm->line6->ifcdev, "timeout: still %d active urbs..\n", alive); } static inline struct line6_pcm_stream * get_stream(struct snd_line6_pcm *line6pcm, int direction) { return (direction == SNDRV_PCM_STREAM_PLAYBACK) ? &line6pcm->out : &line6pcm->in; } /* allocate a buffer if not opened yet; * call this in line6pcm.state_mutex */ static int line6_buffer_acquire(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pstr, int direction, int type) { const int pkt_size = (direction == SNDRV_PCM_STREAM_PLAYBACK) ? line6pcm->max_packet_size_out : line6pcm->max_packet_size_in; /* Invoked multiple times in a row so allocate once only */ if (!test_and_set_bit(type, &pstr->opened) && !pstr->buffer) { pstr->buffer = kmalloc(array3_size(line6pcm->line6->iso_buffers, LINE6_ISO_PACKETS, pkt_size), GFP_KERNEL); if (!pstr->buffer) return -ENOMEM; } return 0; } /* free a buffer if all streams are closed; * call this in line6pcm.state_mutex */ static void line6_buffer_release(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pstr, int type) { clear_bit(type, &pstr->opened); if (!pstr->opened) { line6_wait_clear_audio_urbs(line6pcm, pstr); kfree(pstr->buffer); pstr->buffer = NULL; } } /* start a PCM stream */ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction, int type) { unsigned long flags; struct line6_pcm_stream *pstr = get_stream(line6pcm, direction); int ret = 0; spin_lock_irqsave(&pstr->lock, flags); if (!test_and_set_bit(type, &pstr->running) && !(pstr->active_urbs || pstr->unlink_urbs)) { pstr->count = 0; /* Submit all currently available URBs */ if (direction == SNDRV_PCM_STREAM_PLAYBACK) ret = line6_submit_audio_out_all_urbs(line6pcm); else ret = line6_submit_audio_in_all_urbs(line6pcm); } if (ret < 0) clear_bit(type, &pstr->running); spin_unlock_irqrestore(&pstr->lock, flags); return ret; } /* stop a PCM stream; this doesn't sync with the unlinked URBs */ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction, int type) { unsigned long flags; struct line6_pcm_stream *pstr = get_stream(line6pcm, direction); spin_lock_irqsave(&pstr->lock, flags); clear_bit(type, &pstr->running); if (!pstr->running) { spin_unlock_irqrestore(&pstr->lock, flags); line6_unlink_audio_urbs(line6pcm, pstr); spin_lock_irqsave(&pstr->lock, flags); if (direction == SNDRV_PCM_STREAM_CAPTURE) { line6pcm->prev_fbuf = NULL; line6pcm->prev_fsize = 0; } } spin_unlock_irqrestore(&pstr->lock, flags); } /* common PCM trigger callback */ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct snd_pcm_substream *s; int err; clear_bit(LINE6_FLAG_PREPARED, &line6pcm->flags); snd_pcm_group_for_each_entry(s, substream) { if (s->pcm->card != substream->pcm->card) continue; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (s->stream == SNDRV_PCM_STREAM_CAPTURE && (line6pcm->line6->properties->capabilities & LINE6_CAP_IN_NEEDS_OUT)) { err = line6_stream_start(line6pcm, SNDRV_PCM_STREAM_PLAYBACK, LINE6_STREAM_CAPTURE_HELPER); if (err < 0) return err; } err = line6_stream_start(line6pcm, s->stream, LINE6_STREAM_PCM); if (err < 0) return err; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (s->stream == SNDRV_PCM_STREAM_CAPTURE && (line6pcm->line6->properties->capabilities & LINE6_CAP_IN_NEEDS_OUT)) { line6_stream_stop(line6pcm, SNDRV_PCM_STREAM_PLAYBACK, LINE6_STREAM_CAPTURE_HELPER); } line6_stream_stop(line6pcm, s->stream, LINE6_STREAM_PCM); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) return -EINVAL; set_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) return -EINVAL; clear_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags); break; default: return -EINVAL; } } return 0; } /* common PCM pointer callback */ snd_pcm_uframes_t snd_line6_pointer(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); return pstr->pos_done; } /* Acquire and optionally start duplex streams: * type is either LINE6_STREAM_IMPULSE or LINE6_STREAM_MONITOR */ int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type, bool start) { struct line6_pcm_stream *pstr; int ret = 0, dir; /* TODO: We should assert SNDRV_PCM_STREAM_PLAYBACK/CAPTURE == 0/1 */ mutex_lock(&line6pcm->state_mutex); for (dir = 0; dir < 2; dir++) { pstr = get_stream(line6pcm, dir); ret = line6_buffer_acquire(line6pcm, pstr, dir, type); if (ret < 0) goto error; if (!pstr->running) line6_wait_clear_audio_urbs(line6pcm, pstr); } if (start) { for (dir = 0; dir < 2; dir++) { ret = line6_stream_start(line6pcm, dir, type); if (ret < 0) goto error; } } error: mutex_unlock(&line6pcm->state_mutex); if (ret < 0) line6_pcm_release(line6pcm, type); return ret; } EXPORT_SYMBOL_GPL(line6_pcm_acquire); /* Stop and release duplex streams */ void line6_pcm_release(struct snd_line6_pcm *line6pcm, int type) { struct line6_pcm_stream *pstr; int dir; mutex_lock(&line6pcm->state_mutex); for (dir = 0; dir < 2; dir++) line6_stream_stop(line6pcm, dir, type); for (dir = 0; dir < 2; dir++) { pstr = get_stream(line6pcm, dir); line6_buffer_release(line6pcm, pstr, type); } mutex_unlock(&line6pcm->state_mutex); } EXPORT_SYMBOL_GPL(line6_pcm_release); /* common PCM hw_params callback */ int snd_line6_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret; struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); mutex_lock(&line6pcm->state_mutex); ret = line6_buffer_acquire(line6pcm, pstr, substream->stream, LINE6_STREAM_PCM); if (ret < 0) goto error; pstr->period = params_period_bytes(hw_params); error: mutex_unlock(&line6pcm->state_mutex); return ret; } /* common PCM hw_free callback */ int snd_line6_hw_free(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); mutex_lock(&line6pcm->state_mutex); line6_buffer_release(line6pcm, pstr, LINE6_STREAM_PCM); mutex_unlock(&line6pcm->state_mutex); return 0; } /* control info callback */ static int snd_line6_control_playback_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 256; return 0; } /* control get callback */ static int snd_line6_control_playback_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i; struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = line6pcm->volume_playback[i]; return 0; } /* control put callback */ static int snd_line6_control_playback_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i, changed = 0; struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); for (i = 0; i < 2; i++) if (line6pcm->volume_playback[i] != ucontrol->value.integer.value[i]) { line6pcm->volume_playback[i] = ucontrol->value.integer.value[i]; changed = 1; } return changed; } /* control definition */ static const struct snd_kcontrol_new line6_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Volume", .info = snd_line6_control_playback_info, .get = snd_line6_control_playback_get, .put = snd_line6_control_playback_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Impulse Response Volume", .info = snd_line6_impulse_volume_info, .get = snd_line6_impulse_volume_get, .put = snd_line6_impulse_volume_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Impulse Response Period", .info = snd_line6_impulse_period_info, .get = snd_line6_impulse_period_get, .put = snd_line6_impulse_period_put }, }; /* Cleanup the PCM device. */ static void cleanup_urbs(struct line6_pcm_stream *pcms, int iso_buffers) { int i; /* Most likely impossible in current code... */ if (pcms->urbs == NULL) return; for (i = 0; i < iso_buffers; i++) { if (pcms->urbs[i]) { usb_kill_urb(pcms->urbs[i]); usb_free_urb(pcms->urbs[i]); } } kfree(pcms->urbs); pcms->urbs = NULL; } static void line6_cleanup_pcm(struct snd_pcm *pcm) { struct snd_line6_pcm *line6pcm = snd_pcm_chip(pcm); cleanup_urbs(&line6pcm->out, line6pcm->line6->iso_buffers); cleanup_urbs(&line6pcm->in, line6pcm->line6->iso_buffers); kfree(line6pcm); } /* create a PCM device */ static int snd_line6_new_pcm(struct usb_line6 *line6, struct snd_pcm **pcm_ret) { struct snd_pcm *pcm; int err; err = snd_pcm_new(line6->card, (char *)line6->properties->name, 0, 1, 1, pcm_ret); if (err < 0) return err; pcm = *pcm_ret; strcpy(pcm->name, line6->properties->name); /* set operators */ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_line6_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_line6_capture_ops); /* pre-allocation of buffers */ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, NULL, 64 * 1024, 128 * 1024); return 0; } /* Sync with PCM stream stops. */ void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm) { line6_unlink_audio_urbs(line6pcm, &line6pcm->out); line6_unlink_audio_urbs(line6pcm, &line6pcm->in); line6_wait_clear_audio_urbs(line6pcm, &line6pcm->out); line6_wait_clear_audio_urbs(line6pcm, &line6pcm->in); } /* Create and register the PCM device and mixer entries. Create URBs for playback and capture. */ int line6_init_pcm(struct usb_line6 *line6, struct line6_pcm_properties *properties) { int i, err; unsigned ep_read = line6->properties->ep_audio_r; unsigned ep_write = line6->properties->ep_audio_w; struct snd_pcm *pcm; struct snd_line6_pcm *line6pcm; if (!(line6->properties->capabilities & LINE6_CAP_PCM)) return 0; /* skip PCM initialization and report success */ err = snd_line6_new_pcm(line6, &pcm); if (err < 0) return err; line6pcm = kzalloc(sizeof(*line6pcm), GFP_KERNEL); if (!line6pcm) return -ENOMEM; mutex_init(&line6pcm->state_mutex); line6pcm->pcm = pcm; line6pcm->properties = properties; line6pcm->volume_playback[0] = line6pcm->volume_playback[1] = 255; line6pcm->volume_monitor = 255; line6pcm->line6 = line6; spin_lock_init(&line6pcm->out.lock); spin_lock_init(&line6pcm->in.lock); line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD; line6->line6pcm = line6pcm; pcm->private_data = line6pcm; pcm->private_free = line6_cleanup_pcm; line6pcm->max_packet_size_in = usb_maxpacket(line6->usbdev, usb_rcvisocpipe(line6->usbdev, ep_read)); line6pcm->max_packet_size_out = usb_maxpacket(line6->usbdev, usb_sndisocpipe(line6->usbdev, ep_write)); if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) { dev_err(line6pcm->line6->ifcdev, "cannot get proper max packet size\n"); return -EINVAL; } err = line6_create_audio_out_urbs(line6pcm); if (err < 0) return err; err = line6_create_audio_in_urbs(line6pcm); if (err < 0) return err; /* mixer: */ for (i = 0; i < ARRAY_SIZE(line6_controls); i++) { err = snd_ctl_add(line6->card, snd_ctl_new1(&line6_controls[i], line6pcm)); if (err < 0) return err; } return 0; } EXPORT_SYMBOL_GPL(line6_init_pcm); /* prepare pcm callback */ int snd_line6_prepare(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); mutex_lock(&line6pcm->state_mutex); if (!pstr->running) line6_wait_clear_audio_urbs(line6pcm, pstr); if (!test_and_set_bit(LINE6_FLAG_PREPARED, &line6pcm->flags)) { line6pcm->out.count = 0; line6pcm->out.pos = 0; line6pcm->out.pos_done = 0; line6pcm->out.bytes = 0; line6pcm->in.count = 0; line6pcm->in.pos_done = 0; line6pcm->in.bytes = 0; } mutex_unlock(&line6pcm->state_mutex); return 0; }
367 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_DRIVER_H #define _LINUX_TTY_DRIVER_H #include <linux/export.h> #include <linux/fs.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/termios.h> #include <linux/seq_file.h> struct tty_struct; struct tty_driver; struct serial_icounter_struct; struct serial_struct; /** * struct tty_operations -- interface between driver and tty * * @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *, * int idx)`` * * Return the tty device corresponding to @idx, %NULL if there is not * one currently in use and an %ERR_PTR value on error. Called under * %tty_mutex (for now!) * * Optional method. Default behaviour is to use the @self->ttys array. * * @install: ``int ()(struct tty_driver *self, struct tty_struct *tty)`` * * Install a new @tty into the @self's internal tables. Used in * conjunction with @lookup and @remove methods. * * Optional method. Default behaviour is to use the @self->ttys array. * * @remove: ``void ()(struct tty_driver *self, struct tty_struct *tty)`` * * Remove a closed @tty from the @self's internal tables. Used in * conjunction with @lookup and @remove methods. * * Optional method. Default behaviour is to use the @self->ttys array. * * @open: ``int ()(struct tty_struct *tty, struct file *)`` * * This routine is called when a particular @tty device is opened. This * routine is mandatory; if this routine is not filled in, the attempted * open will fail with %ENODEV. * * Required method. Called with tty lock held. May sleep. * * @close: ``void ()(struct tty_struct *tty, struct file *)`` * * This routine is called when a particular @tty device is closed. At the * point of return from this call the driver must make no further ldisc * calls of any kind. * * Remark: called even if the corresponding @open() failed. * * Required method. Called with tty lock held. May sleep. * * @shutdown: ``void ()(struct tty_struct *tty)`` * * This routine is called under the tty lock when a particular @tty device * is closed for the last time. It executes before the @tty resources * are freed so may execute while another function holds a @tty kref. * * @cleanup: ``void ()(struct tty_struct *tty)`` * * This routine is called asynchronously when a particular @tty device * is closed for the last time freeing up the resources. This is * actually the second part of shutdown for routines that might sleep. * * @write: ``ssize_t ()(struct tty_struct *tty, const u8 *buf, size_t count)`` * * This routine is called by the kernel to write a series (@count) of * characters (@buf) to the @tty device. The characters may come from * user space or kernel space. This routine will return the * number of characters actually accepted for writing. * * May occur in parallel in special cases. Because this includes panic * paths drivers generally shouldn't try and do clever locking here. * * Optional: Required for writable devices. May not sleep. * * @put_char: ``int ()(struct tty_struct *tty, u8 ch)`` * * This routine is called by the kernel to write a single character @ch to * the @tty device. If the kernel uses this routine, it must call the * @flush_chars() routine (if defined) when it is done stuffing characters * into the driver. If there is no room in the queue, the character is * ignored. * * Optional: Kernel will use the @write method if not provided. Do not * call this function directly, call tty_put_char(). * * @flush_chars: ``void ()(struct tty_struct *tty)`` * * This routine is called by the kernel after it has written a * series of characters to the tty device using @put_char(). * * Optional. Do not call this function directly, call * tty_driver_flush_chars(). * * @write_room: ``unsigned int ()(struct tty_struct *tty)`` * * This routine returns the numbers of characters the @tty driver * will accept for queuing to be written. This number is subject * to change as output buffers get emptied, or if the output flow * control is acted. * * The ldisc is responsible for being intelligent about multi-threading of * write_room/write calls * * Required if @write method is provided else not needed. Do not call this * function directly, call tty_write_room() * * @chars_in_buffer: ``unsigned int ()(struct tty_struct *tty)`` * * This routine returns the number of characters in the device private * output queue. Used in tty_wait_until_sent() and for poll() * implementation. * * Optional: if not provided, it is assumed there is no queue on the * device. Do not call this function directly, call tty_chars_in_buffer(). * * @ioctl: ``int ()(struct tty_struct *tty, unsigned int cmd, * unsigned long arg)`` * * This routine allows the @tty driver to implement device-specific * ioctls. If the ioctl number passed in @cmd is not recognized by the * driver, it should return %ENOIOCTLCMD. * * Optional. * * @compat_ioctl: ``long ()(struct tty_struct *tty, unsigned int cmd, * unsigned long arg)`` * * Implement ioctl processing for 32 bit process on 64 bit system. * * Optional. * * @set_termios: ``void ()(struct tty_struct *tty, const struct ktermios *old)`` * * This routine allows the @tty driver to be notified when device's * termios settings have changed. New settings are in @tty->termios. * Previous settings are passed in the @old argument. * * The API is defined such that the driver should return the actual modes * selected. This means that the driver is responsible for modifying any * bits in @tty->termios it cannot fulfill to indicate the actual modes * being used. * * Optional. Called under the @tty->termios_rwsem. May sleep. * * @ldisc_ok: ``int ()(struct tty_struct *tty, int ldisc)`` * * This routine allows the @tty driver to decide if it can deal * with a particular @ldisc. * * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem. * * @set_ldisc: ``void ()(struct tty_struct *tty)`` * * This routine allows the @tty driver to be notified when the device's * line discipline is being changed. At the point this is done the * discipline is not yet usable. * * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem. * * @throttle: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that input buffers for the line * discipline are close to full, and it should somehow signal that no more * characters should be sent to the @tty. * * Serialization including with @unthrottle() is the job of the ldisc * layer. * * Optional: Always invoke via tty_throttle_safe(). Called under the * @tty->termios_rwsem. * * @unthrottle: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it should signal that * characters can now be sent to the @tty without fear of overrunning the * input buffers of the line disciplines. * * Optional. Always invoke via tty_unthrottle(). Called under the * @tty->termios_rwsem. * * @stop: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it should stop outputting * characters to the tty device. * * Called with @tty->flow.lock held. Serialized with @start() method. * * Optional. Always invoke via stop_tty(). * * @start: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it resumed sending * characters to the @tty device. * * Called with @tty->flow.lock held. Serialized with stop() method. * * Optional. Always invoke via start_tty(). * * @hangup: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it should hang up the @tty * device. * * Optional. Called with tty lock held. * * @break_ctl: ``int ()(struct tty_struct *tty, int state)`` * * This optional routine requests the @tty driver to turn on or off BREAK * status on the RS-232 port. If @state is -1, then the BREAK status * should be turned on; if @state is 0, then BREAK should be turned off. * * If this routine is implemented, the high-level tty driver will handle * the following ioctls: %TCSBRK, %TCSBRKP, %TIOCSBRK, %TIOCCBRK. * * If the driver sets %TTY_DRIVER_HARDWARE_BREAK in tty_alloc_driver(), * then the interface will also be called with actual times and the * hardware is expected to do the delay work itself. 0 and -1 are still * used for on/off. * * Optional: Required for %TCSBRK/%BRKP/etc. handling. May sleep. * * @flush_buffer: ``void ()(struct tty_struct *tty)`` * * This routine discards device private output buffer. Invoked on close, * hangup, to implement %TCOFLUSH ioctl and similar. * * Optional: if not provided, it is assumed there is no queue on the * device. Do not call this function directly, call * tty_driver_flush_buffer(). * * @wait_until_sent: ``void ()(struct tty_struct *tty, int timeout)`` * * This routine waits until the device has written out all of the * characters in its transmitter FIFO. Or until @timeout (in jiffies) is * reached. * * Optional: If not provided, the device is assumed to have no FIFO. * Usually correct to invoke via tty_wait_until_sent(). May sleep. * * @send_xchar: ``void ()(struct tty_struct *tty, u8 ch)`` * * This routine is used to send a high-priority XON/XOFF character (@ch) * to the @tty device. * * Optional: If not provided, then the @write method is called under * the @tty->atomic_write_lock to keep it serialized with the ldisc. * * @tiocmget: ``int ()(struct tty_struct *tty)`` * * This routine is used to obtain the modem status bits from the @tty * driver. * * Optional: If not provided, then %ENOTTY is returned from the %TIOCMGET * ioctl. Do not call this function directly, call tty_tiocmget(). * * @tiocmset: ``int ()(struct tty_struct *tty, * unsigned int set, unsigned int clear)`` * * This routine is used to set the modem status bits to the @tty driver. * First, @clear bits should be cleared, then @set bits set. * * Optional: If not provided, then %ENOTTY is returned from the %TIOCMSET * ioctl. Do not call this function directly, call tty_tiocmset(). * * @resize: ``int ()(struct tty_struct *tty, struct winsize *ws)`` * * Called when a termios request is issued which changes the requested * terminal geometry to @ws. * * Optional: the default action is to update the termios structure * without error. This is usually the correct behaviour. Drivers should * not force errors here if they are not resizable objects (e.g. a serial * line). See tty_do_resize() if you need to wrap the standard method * in your own logic -- the usual case. * * @get_icount: ``int ()(struct tty_struct *tty, * struct serial_icounter *icount)`` * * Called when the @tty device receives a %TIOCGICOUNT ioctl. Passed a * kernel structure @icount to complete. * * Optional: called only if provided, otherwise %ENOTTY will be returned. * * @get_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)`` * * Called when the @tty device receives a %TIOCGSERIAL ioctl. Passed a * kernel structure @p (&struct serial_struct) to complete. * * Optional: called only if provided, otherwise %ENOTTY will be returned. * Do not call this function directly, call tty_tiocgserial(). * * @set_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)`` * * Called when the @tty device receives a %TIOCSSERIAL ioctl. Passed a * kernel structure @p (&struct serial_struct) to set the values from. * * Optional: called only if provided, otherwise %ENOTTY will be returned. * Do not call this function directly, call tty_tiocsserial(). * * @show_fdinfo: ``void ()(struct tty_struct *tty, struct seq_file *m)`` * * Called when the @tty device file descriptor receives a fdinfo request * from VFS (to show in /proc/<pid>/fdinfo/). @m should be filled with * information. * * Optional: called only if provided, otherwise nothing is written to @m. * Do not call this function directly, call tty_show_fdinfo(). * * @poll_init: ``int ()(struct tty_driver *driver, int line, char *options)`` * * kgdboc support (Documentation/dev-tools/kgdb.rst). This routine is * called to initialize the HW for later use by calling @poll_get_char or * @poll_put_char. * * Optional: called only if provided, otherwise skipped as a non-polling * driver. * * @poll_get_char: ``int ()(struct tty_driver *driver, int line)`` * * kgdboc support (see @poll_init). @driver should read a character from a * tty identified by @line and return it. * * Optional: called only if @poll_init provided. * * @poll_put_char: ``void ()(struct tty_driver *driver, int line, char ch)`` * * kgdboc support (see @poll_init). @driver should write character @ch to * a tty identified by @line. * * Optional: called only if @poll_init provided. * * @proc_show: ``int ()(struct seq_file *m, void *driver)`` * * Driver @driver (cast to &struct tty_driver) can show additional info in * /proc/tty/driver/<driver_name>. It is enough to fill in the information * into @m. * * Optional: called only if provided, otherwise no /proc entry created. * * This structure defines the interface between the low-level tty driver and * the tty routines. These routines can be defined. Unless noted otherwise, * they are optional, and can be filled in with a %NULL pointer. */ struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *driver, struct file *filp, int idx); int (*install)(struct tty_driver *driver, struct tty_struct *tty); void (*remove)(struct tty_driver *driver, struct tty_struct *tty); int (*open)(struct tty_struct * tty, struct file * filp); void (*close)(struct tty_struct * tty, struct file * filp); void (*shutdown)(struct tty_struct *tty); void (*cleanup)(struct tty_struct *tty); ssize_t (*write)(struct tty_struct *tty, const u8 *buf, size_t count); int (*put_char)(struct tty_struct *tty, u8 ch); void (*flush_chars)(struct tty_struct *tty); unsigned int (*write_room)(struct tty_struct *tty); unsigned int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); long (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, const struct ktermios *old); void (*throttle)(struct tty_struct * tty); void (*unthrottle)(struct tty_struct * tty); void (*stop)(struct tty_struct *tty); void (*start)(struct tty_struct *tty); void (*hangup)(struct tty_struct *tty); int (*break_ctl)(struct tty_struct *tty, int state); void (*flush_buffer)(struct tty_struct *tty); int (*ldisc_ok)(struct tty_struct *tty, int ldisc); void (*set_ldisc)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, int timeout); void (*send_xchar)(struct tty_struct *tty, u8 ch); int (*tiocmget)(struct tty_struct *tty); int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); int (*resize)(struct tty_struct *tty, struct winsize *ws); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); int (*get_serial)(struct tty_struct *tty, struct serial_struct *p); int (*set_serial)(struct tty_struct *tty, struct serial_struct *p); void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); int (*poll_get_char)(struct tty_driver *driver, int line); void (*poll_put_char)(struct tty_driver *driver, int line, char ch); #endif int (*proc_show)(struct seq_file *m, void *driver); } __randomize_layout; /** * struct tty_driver -- driver for TTY devices * * @kref: reference counting. Reaching zero frees all the internals and the * driver. * @cdevs: allocated/registered character /dev devices * @owner: modules owning this driver. Used drivers cannot be rmmod'ed. * Automatically set by tty_alloc_driver(). * @driver_name: name of the driver used in /proc/tty * @name: used for constructing /dev node name * @name_base: used as a number base for constructing /dev node name * @major: major /dev device number (zero for autoassignment) * @minor_start: the first minor /dev device number * @num: number of devices allocated * @type: type of tty driver (%TTY_DRIVER_TYPE_) * @subtype: subtype of tty driver (%SYSTEM_TYPE_, %PTY_TYPE_, %SERIAL_TYPE_) * @init_termios: termios to set to each tty initially (e.g. %tty_std_termios) * @flags: tty driver flags (%TTY_DRIVER_) * @proc_entry: proc fs entry, used internally * @other: driver of the linked tty; only used for the PTY driver * @ttys: array of active &struct tty_struct, set by tty_standard_install() * @ports: array of &struct tty_port; can be set during initialization by * tty_port_link_device() and similar * @termios: storage for termios at each TTY close for the next open * @driver_state: pointer to driver's arbitrary data * @ops: driver hooks for TTYs. Set them using tty_set_operations(). Use &struct * tty_port helpers in them as much as possible. * @tty_drivers: used internally to link tty_drivers together * * The usual handling of &struct tty_driver is to allocate it by * tty_alloc_driver(), set up all the necessary members, and register it by * tty_register_driver(). At last, the driver is torn down by calling * tty_unregister_driver() followed by tty_driver_kref_put(). * * The fields required to be set before calling tty_register_driver() include * @driver_name, @name, @type, @subtype, @init_termios, and @ops. */ struct tty_driver { struct kref kref; struct cdev **cdevs; struct module *owner; const char *driver_name; const char *name; int name_base; int major; int minor_start; unsigned int num; short type; short subtype; struct ktermios init_termios; unsigned long flags; struct proc_dir_entry *proc_entry; struct tty_driver *other; /* * Pointer to the tty data structures */ struct tty_struct **ttys; struct tty_port **ports; struct ktermios **termios; void *driver_state; /* * Driver methods */ const struct tty_operations *ops; struct list_head tty_drivers; } __randomize_layout; extern struct list_head tty_drivers; struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags); struct tty_driver *tty_find_polling_driver(char *name, int *line); void tty_driver_kref_put(struct tty_driver *driver); /* Use TTY_DRIVER_* flags below */ #define tty_alloc_driver(lines, flags) \ __tty_alloc_driver(lines, THIS_MODULE, flags) static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; } static inline void tty_set_operations(struct tty_driver *driver, const struct tty_operations *op) { driver->ops = op; } /** * DOC: TTY Driver Flags * * TTY_DRIVER_RESET_TERMIOS * Requests the tty layer to reset the termios setting when the last * process has closed the device. Used for PTYs, in particular. * * TTY_DRIVER_REAL_RAW * Indicates that the driver will guarantee not to set any special * character handling flags if this is set for the tty: * * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)`` * * That is, if there is no reason for the driver to * send notifications of parity and break characters up to the line * driver, it won't do so. This allows the line driver to optimize for * this case if this flag is set. (Note that there is also a promise, if * the above case is true, not to signal overruns, either.) * * TTY_DRIVER_DYNAMIC_DEV * The individual tty devices need to be registered with a call to * tty_register_device() when the device is found in the system and * unregistered with a call to tty_unregister_device() so the devices will * be show up properly in sysfs. If not set, all &tty_driver.num entries * will be created by the tty core in sysfs when tty_register_driver() is * called. This is to be used by drivers that have tty devices that can * appear and disappear while the main tty driver is registered with the * tty core. * * TTY_DRIVER_DEVPTS_MEM * Don't use the standard arrays (&tty_driver.ttys and * &tty_driver.termios), instead use dynamic memory keyed through the * devpts filesystem. This is only applicable to the PTY driver. * * TTY_DRIVER_HARDWARE_BREAK * Hardware handles break signals. Pass the requested timeout to the * &tty_operations.break_ctl instead of using a simple on/off interface. * * TTY_DRIVER_DYNAMIC_ALLOC * Do not allocate structures which are needed per line for this driver * (&tty_driver.ports) as it would waste memory. The driver will take * care. This is only applicable to the PTY driver. * * TTY_DRIVER_UNNUMBERED_NODE * Do not create numbered ``/dev`` nodes. For example, create * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a * driver for a single tty device is being allocated. */ #define TTY_DRIVER_INSTALLED 0x0001 #define TTY_DRIVER_RESET_TERMIOS 0x0002 #define TTY_DRIVER_REAL_RAW 0x0004 #define TTY_DRIVER_DYNAMIC_DEV 0x0008 #define TTY_DRIVER_DEVPTS_MEM 0x0010 #define TTY_DRIVER_HARDWARE_BREAK 0x0020 #define TTY_DRIVER_DYNAMIC_ALLOC 0x0040 #define TTY_DRIVER_UNNUMBERED_NODE 0x0080 /* tty driver types */ #define TTY_DRIVER_TYPE_SYSTEM 0x0001 #define TTY_DRIVER_TYPE_CONSOLE 0x0002 #define TTY_DRIVER_TYPE_SERIAL 0x0003 #define TTY_DRIVER_TYPE_PTY 0x0004 #define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */ #define TTY_DRIVER_TYPE_SYSCONS 0x0006 /* system subtypes (magic, used by tty_io.c) */ #define SYSTEM_TYPE_TTY 0x0001 #define SYSTEM_TYPE_CONSOLE 0x0002 #define SYSTEM_TYPE_SYSCONS 0x0003 #define SYSTEM_TYPE_SYSPTMX 0x0004 /* pty subtypes (magic, used by tty_io.c) */ #define PTY_TYPE_MASTER 0x0001 #define PTY_TYPE_SLAVE 0x0002 /* serial subtype definitions */ #define SERIAL_TYPE_NORMAL 1 int tty_register_driver(struct tty_driver *driver); void tty_unregister_driver(struct tty_driver *driver); struct device *tty_register_device(struct tty_driver *driver, unsigned index, struct device *dev); struct device *tty_register_device_attr(struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp); void tty_unregister_device(struct tty_driver *driver, unsigned index); #ifdef CONFIG_PROC_FS void proc_tty_register_driver(struct tty_driver *); void proc_tty_unregister_driver(struct tty_driver *); #else static inline void proc_tty_register_driver(struct tty_driver *d) {} static inline void proc_tty_unregister_driver(struct tty_driver *d) {} #endif #endif /* #ifdef _LINUX_TTY_DRIVER_H */
35 35 29 29 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 // SPDX-License-Identifier: GPL-2.0 /* * fs/partitions/sysv68.c * * Copyright (C) 2007 Philippe De Muyter <phdm@macqel.be> */ #include "check.h" /* * Volume ID structure: on first 256-bytes sector of disk */ struct volumeid { u8 vid_unused[248]; u8 vid_mac[8]; /* ASCII string "MOTOROLA" */ }; /* * config block: second 256-bytes sector on disk */ struct dkconfig { u8 ios_unused0[128]; __be32 ios_slcblk; /* Slice table block number */ __be16 ios_slccnt; /* Number of entries in slice table */ u8 ios_unused1[122]; }; /* * combined volumeid and dkconfig block */ struct dkblk0 { struct volumeid dk_vid; struct dkconfig dk_ios; }; /* * Slice Table Structure */ struct slice { __be32 nblocks; /* slice size (in blocks) */ __be32 blkoff; /* block offset of slice */ }; int sysv68_partition(struct parsed_partitions *state) { int i, slices; int slot = 1; Sector sect; unsigned char *data; struct dkblk0 *b; struct slice *slice; char tmp[64]; data = read_part_sector(state, 0, &sect); if (!data) return -1; b = (struct dkblk0 *)data; if (memcmp(b->dk_vid.vid_mac, "MOTOROLA", sizeof(b->dk_vid.vid_mac))) { put_dev_sector(sect); return 0; } slices = be16_to_cpu(b->dk_ios.ios_slccnt); i = be32_to_cpu(b->dk_ios.ios_slcblk); put_dev_sector(sect); data = read_part_sector(state, i, &sect); if (!data) return -1; slices -= 1; /* last slice is the whole disk */ snprintf(tmp, sizeof(tmp), "sysV68: %s(s%u)", state->name, slices); strlcat(state->pp_buf, tmp, PAGE_SIZE); slice = (struct slice *)data; for (i = 0; i < slices; i++, slice++) { if (slot == state->limit) break; if (be32_to_cpu(slice->nblocks)) { put_partition(state, slot, be32_to_cpu(slice->blkoff), be32_to_cpu(slice->nblocks)); snprintf(tmp, sizeof(tmp), "(s%u)", i); strlcat(state->pp_buf, tmp, PAGE_SIZE); } slot++; } strlcat(state->pp_buf, "\n", PAGE_SIZE); put_dev_sector(sect); return 1; }
4 4 11 13 1 11 1 5 5 18 18 6 18 18 7 6 11 1 11 1 11 18 2 10 1 10 34 34 56 56 56 56 55 20 20 78 78 3 3 3 41 41 54 37 37 10 10 8 14 37 5 37 37 6 6 6 6 2 6 6 2 19 19 4 6 6 19 12 35 8 10 2 2 7 17 17 17 3 9 9 9 9 9 9 4 4 4 3 4 4 3 3 3 3 3 3 3 3 4 4 4 3 3 3 4 4 4 3 3 3 4 4 4 7 7 7 7 7 1 1 1 11 11 11 11 11 11 11 11 11 11 11 22 1 1 1 1 1 1 1 1 2 2 2 2 2 9 9 9 2 8 2 2 2 5 5 3 3 3 3 2 4 4 4 4 4 4 4 3 3 3 3 3 3 4 1 4 4 3 4 4 4 3 14 14 5 9 4 4 4 4 13 5 4 4 4 2 2 2 2 2 2 2 2 2 2 2 2 2 5 5 8 9 9 9 9 9 1 1 1 1 1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 3 1 1 1 1 1 1 1 1 2 1 1 2 2 1 1 1 1 1 1 1 2 2 2 2 1 1 1 2 1 5 5 5 5 5 1 5 5 4 4 4 2 2 4 2 2 4 3 12 49 1 1 1 13 13 13 12 8 7 13 13 12 2 1 5 1 38 28 29 29 29 29 29 16 12 49 49 39 39 11 29 38 6 38 37 3 3 34 34 37 3 34 34 34 34 13 34 34 34 34 34 16 16 16 15 34 39 49 4 4 3 4 4 4 3 3 3 4 4 7 7 7 7 3 4 4 4 4 3 3 3 3 3 3 4 8 8 8 8 5 4 4 5 14 13 13 13 11 12 13 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> Copyright (C) 2010 Google Inc. Copyright (C) 2011 ProFUSION Embedded Systems Copyright (c) 2012 Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth L2CAP core. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/crc16.h> #include <linux/filter.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "smp.h" #define LE_FLOWCTL_MAX_CREDITS 65535 bool disable_ertm; bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED); static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; static LIST_HEAD(chan_list); static DEFINE_RWLOCK(chan_list_lock); static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data); static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event); static void l2cap_retrans_timeout(struct work_struct *work); static void l2cap_monitor_timeout(struct work_struct *work); static void l2cap_ack_timeout(struct work_struct *work); static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) { if (link_type == LE_LINK) { if (bdaddr_type == ADDR_LE_DEV_PUBLIC) return BDADDR_LE_PUBLIC; else return BDADDR_LE_RANDOM; } return BDADDR_BREDR; } static inline u8 bdaddr_src_type(struct hci_conn *hcon) { return bdaddr_type(hcon->type, hcon->src_type); } static inline u8 bdaddr_dst_type(struct hci_conn *hcon) { return bdaddr_type(hcon->type, hcon->dst_type); } /* ---- L2CAP channels ---- */ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->dcid == cid) return c; } return NULL; } static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->scid == cid) return c; } return NULL; } /* Find channel with given SCID. * Returns a reference locked channel. */ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); if (c) { /* Only lock if chan reference is not 0 */ c = l2cap_chan_hold_unless_zero(c); if (c) l2cap_chan_lock(c); } mutex_unlock(&conn->chan_lock); return c; } /* Find channel with given DCID. * Returns a reference locked channel. */ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_dcid(conn, cid); if (c) { /* Only lock if chan reference is not 0 */ c = l2cap_chan_hold_unless_zero(c); if (c) l2cap_chan_lock(c); } mutex_unlock(&conn->chan_lock); return c; } static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->ident == ident) return c; } return NULL; } static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, u8 src_type) { struct l2cap_chan *c; list_for_each_entry(c, &chan_list, global_l) { if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) continue; if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) continue; if (c->sport == psm && !bacmp(&c->src, src)) return c; } return NULL; } int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) { int err; write_lock(&chan_list_lock); if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) { err = -EADDRINUSE; goto done; } if (psm) { chan->psm = psm; chan->sport = psm; err = 0; } else { u16 p, start, end, incr; if (chan->src_type == BDADDR_BREDR) { start = L2CAP_PSM_DYN_START; end = L2CAP_PSM_AUTO_END; incr = 2; } else { start = L2CAP_PSM_LE_DYN_START; end = L2CAP_PSM_LE_DYN_END; incr = 1; } err = -EINVAL; for (p = start; p <= end; p += incr) if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src, chan->src_type)) { chan->psm = cpu_to_le16(p); chan->sport = cpu_to_le16(p); err = 0; break; } } done: write_unlock(&chan_list_lock); return err; } EXPORT_SYMBOL_GPL(l2cap_add_psm); int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) { write_lock(&chan_list_lock); /* Override the defaults (which are for conn-oriented) */ chan->omtu = L2CAP_DEFAULT_MTU; chan->chan_type = L2CAP_CHAN_FIXED; chan->scid = scid; write_unlock(&chan_list_lock); return 0; } static u16 l2cap_alloc_cid(struct l2cap_conn *conn) { u16 cid, dyn_end; if (conn->hcon->type == LE_LINK) dyn_end = L2CAP_CID_LE_DYN_END; else dyn_end = L2CAP_CID_DYN_END; for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { if (!__l2cap_get_chan_by_scid(conn, cid)) return cid; } return 0; } static void l2cap_state_change(struct l2cap_chan *chan, int state) { BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), state_to_string(state)); chan->state = state; chan->ops->state_change(chan, state, 0); } static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, int state, int err) { chan->state = state; chan->ops->state_change(chan, chan->state, err); } static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) { chan->ops->state_change(chan, chan->state, err); } static void __set_retrans_timer(struct l2cap_chan *chan) { if (!delayed_work_pending(&chan->monitor_timer) && chan->retrans_timeout) { l2cap_set_timer(chan, &chan->retrans_timer, msecs_to_jiffies(chan->retrans_timeout)); } } static void __set_monitor_timer(struct l2cap_chan *chan) { __clear_retrans_timer(chan); if (chan->monitor_timeout) { l2cap_set_timer(chan, &chan->monitor_timer, msecs_to_jiffies(chan->monitor_timeout)); } } static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, u16 seq) { struct sk_buff *skb; skb_queue_walk(head, skb) { if (bt_cb(skb)->l2cap.txseq == seq) return skb; } return NULL; } /* ---- L2CAP sequence number lists ---- */ /* For ERTM, ordered lists of sequence numbers must be tracked for * SREJ requests that are received and for frames that are to be * retransmitted. These seq_list functions implement a singly-linked * list in an array, where membership in the list can also be checked * in constant time. Items can also be added to the tail of the list * and removed from the head in constant time, without further memory * allocs or frees. */ static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) { size_t alloc_size, i; /* Allocated size is a power of 2 to map sequence numbers * (which may be up to 14 bits) in to a smaller array that is * sized for the negotiated ERTM transmit windows. */ alloc_size = roundup_pow_of_two(size); seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL); if (!seq_list->list) return -ENOMEM; seq_list->mask = alloc_size - 1; seq_list->head = L2CAP_SEQ_LIST_CLEAR; seq_list->tail = L2CAP_SEQ_LIST_CLEAR; for (i = 0; i < alloc_size; i++) seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; return 0; } static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) { kfree(seq_list->list); } static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, u16 seq) { /* Constant-time check for list membership */ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; } static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) { u16 seq = seq_list->head; u16 mask = seq_list->mask; seq_list->head = seq_list->list[seq & mask]; seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { seq_list->head = L2CAP_SEQ_LIST_CLEAR; seq_list->tail = L2CAP_SEQ_LIST_CLEAR; } return seq; } static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) { u16 i; if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) return; for (i = 0; i <= seq_list->mask; i++) seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; seq_list->head = L2CAP_SEQ_LIST_CLEAR; seq_list->tail = L2CAP_SEQ_LIST_CLEAR; } static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) { u16 mask = seq_list->mask; /* All appends happen in constant time */ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) return; if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) seq_list->head = seq; else seq_list->list[seq_list->tail & mask] = seq; seq_list->tail = seq; seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; } static void l2cap_chan_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, chan_timer.work); struct l2cap_conn *conn = chan->conn; int reason; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); if (!conn) return; mutex_lock(&conn->chan_lock); /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling * this work. No need to call l2cap_chan_hold(chan) here again. */ l2cap_chan_lock(chan); if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) reason = ECONNREFUSED; else if (chan->state == BT_CONNECT && chan->sec_level != BT_SECURITY_SDP) reason = ECONNREFUSED; else reason = ETIMEDOUT; l2cap_chan_close(chan, reason); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); } struct l2cap_chan *l2cap_chan_create(void) { struct l2cap_chan *chan; chan = kzalloc(sizeof(*chan), GFP_ATOMIC); if (!chan) return NULL; skb_queue_head_init(&chan->tx_q); skb_queue_head_init(&chan->srej_q); mutex_init(&chan->lock); /* Set default lock nesting level */ atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); /* Available receive buffer space is initially unknown */ chan->rx_avail = -1; write_lock(&chan_list_lock); list_add(&chan->global_l, &chan_list); write_unlock(&chan_list_lock); INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); chan->state = BT_OPEN; kref_init(&chan->kref); /* This flag is cleared in l2cap_chan_ready() */ set_bit(CONF_NOT_COMPLETE, &chan->conf_state); BT_DBG("chan %p", chan); return chan; } EXPORT_SYMBOL_GPL(l2cap_chan_create); static void l2cap_chan_destroy(struct kref *kref) { struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); BT_DBG("chan %p", chan); write_lock(&chan_list_lock); list_del(&chan->global_l); write_unlock(&chan_list_lock); kfree(chan); } void l2cap_chan_hold(struct l2cap_chan *c) { BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); kref_get(&c->kref); } struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) { BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); if (!kref_get_unless_zero(&c->kref)) return NULL; return c; } void l2cap_chan_put(struct l2cap_chan *c) { BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); kref_put(&c->kref, l2cap_chan_destroy); } EXPORT_SYMBOL_GPL(l2cap_chan_put); void l2cap_chan_set_defaults(struct l2cap_chan *chan) { chan->fcs = L2CAP_FCS_CRC16; chan->max_tx = L2CAP_DEFAULT_MAX_TX; chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; chan->remote_max_tx = chan->max_tx; chan->remote_tx_win = chan->tx_win; chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; chan->sec_level = BT_SECURITY_LOW; chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; chan->conf_state = 0; set_bit(CONF_NOT_COMPLETE, &chan->conf_state); set_bit(FLAG_FORCE_ACTIVE, &chan->flags); } EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan) { size_t sdu_len = chan->sdu ? chan->sdu->len : 0; if (chan->mps == 0) return 0; /* If we don't know the available space in the receiver buffer, give * enough credits for a full packet. */ if (chan->rx_avail == -1) return (chan->imtu / chan->mps) + 1; /* If we know how much space is available in the receive buffer, give * out as many credits as would fill the buffer. */ if (chan->rx_avail <= sdu_len) return 0; return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps); } static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) { chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; chan->tx_credits = tx_credits; /* Derive MPS from connection MTU to stop HCI fragmentation */ chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); chan->rx_credits = l2cap_le_rx_credits(chan); skb_queue_head_init(&chan->tx_q); } static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) { l2cap_le_flowctl_init(chan, tx_credits); /* L2CAP implementations shall support a minimum MPS of 64 octets */ if (chan->mps < L2CAP_ECRED_MIN_MPS) { chan->mps = L2CAP_ECRED_MIN_MPS; chan->rx_credits = l2cap_le_rx_credits(chan); } } void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, __le16_to_cpu(chan->psm), chan->dcid); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; chan->conn = conn; switch (chan->chan_type) { case L2CAP_CHAN_CONN_ORIENTED: /* Alloc CID for connection-oriented socket */ chan->scid = l2cap_alloc_cid(conn); if (conn->hcon->type == ACL_LINK) chan->omtu = L2CAP_DEFAULT_MTU; break; case L2CAP_CHAN_CONN_LESS: /* Connectionless socket */ chan->scid = L2CAP_CID_CONN_LESS; chan->dcid = L2CAP_CID_CONN_LESS; chan->omtu = L2CAP_DEFAULT_MTU; break; case L2CAP_CHAN_FIXED: /* Caller will set CID and CID specific MTU values */ break; default: /* Raw socket can send/recv signalling messages only */ chan->scid = L2CAP_CID_SIGNALING; chan->dcid = L2CAP_CID_SIGNALING; chan->omtu = L2CAP_DEFAULT_MTU; } chan->local_id = L2CAP_BESTEFFORT_ID; chan->local_stype = L2CAP_SERV_BESTEFFORT; chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; l2cap_chan_hold(chan); /* Only keep a reference for fixed channels if they requested it */ if (chan->chan_type != L2CAP_CHAN_FIXED || test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_hold(conn->hcon); list_add(&chan->list, &conn->chan_l); } void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { mutex_lock(&conn->chan_lock); __l2cap_chan_add(conn, chan); mutex_unlock(&conn->chan_lock); } void l2cap_chan_del(struct l2cap_chan *chan, int err) { struct l2cap_conn *conn = chan->conn; __clear_chan_timer(chan); BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, state_to_string(chan->state)); chan->ops->teardown(chan, err); if (conn) { /* Delete from channel list */ list_del(&chan->list); l2cap_chan_put(chan); chan->conn = NULL; /* Reference was only held for non-fixed channels or * fixed channels that explicitly requested it using the * FLAG_HOLD_HCI_CONN flag. */ if (chan->chan_type != L2CAP_CHAN_FIXED || test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_drop(conn->hcon); } if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) return; switch (chan->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: skb_queue_purge(&chan->tx_q); break; case L2CAP_MODE_ERTM: __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); skb_queue_purge(&chan->srej_q); l2cap_seq_list_free(&chan->srej_list); l2cap_seq_list_free(&chan->retrans_list); fallthrough; case L2CAP_MODE_STREAMING: skb_queue_purge(&chan->tx_q); break; } } EXPORT_SYMBOL_GPL(l2cap_chan_del); static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, l2cap_chan_func_t func, void *data) { struct l2cap_chan *chan, *l; list_for_each_entry_safe(chan, l, &conn->chan_l, list) { if (chan->ident == id) func(chan, data); } } static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, void *data) { struct l2cap_chan *chan; list_for_each_entry(chan, &conn->chan_l, list) { func(chan, data); } } void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, void *data) { if (!conn) return; mutex_lock(&conn->chan_lock); __l2cap_chan_list(conn, func, data); mutex_unlock(&conn->chan_lock); } EXPORT_SYMBOL_GPL(l2cap_chan_list); static void l2cap_conn_update_id_addr(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, id_addr_timer.work); struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); bacpy(&chan->dst, &hcon->dst); chan->dst_type = bdaddr_dst_type(hcon); l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); } static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_le_conn_rsp rsp; u16 result; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) result = L2CAP_CR_LE_AUTHORIZATION; else result = L2CAP_CR_LE_BAD_PSM; l2cap_state_change(chan, BT_DISCONN); rsp.dcid = cpu_to_le16(chan->scid); rsp.mtu = cpu_to_le16(chan->imtu); rsp.mps = cpu_to_le16(chan->mps); rsp.credits = cpu_to_le16(chan->rx_credits); rsp.result = cpu_to_le16(result); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); } static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) { l2cap_state_change(chan, BT_DISCONN); __l2cap_ecred_conn_rsp_defer(chan); } static void l2cap_chan_connect_reject(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_conn_rsp rsp; u16 result; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; l2cap_state_change(chan, BT_DISCONN); rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } void l2cap_chan_close(struct l2cap_chan *chan, int reason) { struct l2cap_conn *conn = chan->conn; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); switch (chan->state) { case BT_LISTEN: chan->ops->teardown(chan, 0); break; case BT_CONNECTED: case BT_CONFIG: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); l2cap_send_disconn_req(chan, reason); } else l2cap_chan_del(chan, reason); break; case BT_CONNECT2: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { if (conn->hcon->type == ACL_LINK) l2cap_chan_connect_reject(chan); else if (conn->hcon->type == LE_LINK) { switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: l2cap_chan_le_connect_reject(chan); break; case L2CAP_MODE_EXT_FLOWCTL: l2cap_chan_ecred_connect_reject(chan); return; } } } l2cap_chan_del(chan, reason); break; case BT_CONNECT: case BT_DISCONN: l2cap_chan_del(chan, reason); break; default: chan->ops->teardown(chan, 0); break; } } EXPORT_SYMBOL(l2cap_chan_close); static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) { switch (chan->chan_type) { case L2CAP_CHAN_RAW: switch (chan->sec_level) { case BT_SECURITY_HIGH: case BT_SECURITY_FIPS: return HCI_AT_DEDICATED_BONDING_MITM; case BT_SECURITY_MEDIUM: return HCI_AT_DEDICATED_BONDING; default: return HCI_AT_NO_BONDING; } break; case L2CAP_CHAN_CONN_LESS: if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP; } if (chan->sec_level == BT_SECURITY_HIGH || chan->sec_level == BT_SECURITY_FIPS) return HCI_AT_NO_BONDING_MITM; else return HCI_AT_NO_BONDING; break; case L2CAP_CHAN_CONN_ORIENTED: if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP; if (chan->sec_level == BT_SECURITY_HIGH || chan->sec_level == BT_SECURITY_FIPS) return HCI_AT_NO_BONDING_MITM; else return HCI_AT_NO_BONDING; } fallthrough; default: switch (chan->sec_level) { case BT_SECURITY_HIGH: case BT_SECURITY_FIPS: return HCI_AT_GENERAL_BONDING_MITM; case BT_SECURITY_MEDIUM: return HCI_AT_GENERAL_BONDING; default: return HCI_AT_NO_BONDING; } break; } } /* Service level security */ int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) { struct l2cap_conn *conn = chan->conn; __u8 auth_type; if (conn->hcon->type == LE_LINK) return smp_conn_security(conn->hcon, chan->sec_level); auth_type = l2cap_get_auth_type(chan); return hci_conn_security(conn->hcon, chan->sec_level, auth_type, initiator); } static u8 l2cap_get_ident(struct l2cap_conn *conn) { u8 id; /* Get next available identificator. * 1 - 128 are used by kernel. * 129 - 199 are reserved. * 200 - 254 are used by utilities like l2ping, etc. */ mutex_lock(&conn->ident_lock); if (++conn->tx_ident > 128) conn->tx_ident = 1; id = conn->tx_ident; mutex_unlock(&conn->ident_lock); return id; } static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); u8 flags; BT_DBG("code 0x%2.2x", code); if (!skb) return; /* Use NO_FLUSH if supported or we have an LE link (which does * not support auto-flushing packets) */ if (lmp_no_flush_capable(conn->hcon->hdev) || conn->hcon->type == LE_LINK) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; skb->priority = HCI_PRIO_MAX; hci_send_acl(conn->hchan, skb, flags); } static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) { struct hci_conn *hcon = chan->conn->hcon; u16 flags; BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Use NO_FLUSH for LE links (where this is the only option) or * if the BR/EDR link supports it and flushing has not been * explicitly requested (through FLAG_FLUSHABLE). */ if (hcon->type == LE_LINK || (!test_bit(FLAG_FLUSHABLE, &chan->flags) && lmp_no_flush_capable(hcon->hdev))) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); hci_send_acl(chan->conn->hchan, skb, flags); } static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) { control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; if (enh & L2CAP_CTRL_FRAME_TYPE) { /* S-Frame */ control->sframe = 1; control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; control->sar = 0; control->txseq = 0; } else { /* I-Frame */ control->sframe = 0; control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; control->poll = 0; control->super = 0; } } static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) { control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { /* S-Frame */ control->sframe = 1; control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; control->sar = 0; control->txseq = 0; } else { /* I-Frame */ control->sframe = 0; control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; control->poll = 0; control->super = 0; } } static inline void __unpack_control(struct l2cap_chan *chan, struct sk_buff *skb) { if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { __unpack_extended_control(get_unaligned_le32(skb->data), &bt_cb(skb)->l2cap); skb_pull(skb, L2CAP_EXT_CTRL_SIZE); } else { __unpack_enhanced_control(get_unaligned_le16(skb->data), &bt_cb(skb)->l2cap); skb_pull(skb, L2CAP_ENH_CTRL_SIZE); } } static u32 __pack_extended_control(struct l2cap_ctrl *control) { u32 packed; packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; if (control->sframe) { packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; packed |= L2CAP_EXT_CTRL_FRAME_TYPE; } else { packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; } return packed; } static u16 __pack_enhanced_control(struct l2cap_ctrl *control) { u16 packed; packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; if (control->sframe) { packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; packed |= L2CAP_CTRL_FRAME_TYPE; } else { packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; } return packed; } static inline void __pack_control(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb) { if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { put_unaligned_le32(__pack_extended_control(control), skb->data + L2CAP_HDR_SIZE); } else { put_unaligned_le16(__pack_enhanced_control(control), skb->data + L2CAP_HDR_SIZE); } } static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) { if (test_bit(FLAG_EXT_CTRL, &chan->flags)) return L2CAP_EXT_HDR_SIZE; else return L2CAP_ENH_HDR_SIZE; } static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, u32 control) { struct sk_buff *skb; struct l2cap_hdr *lh; int hlen = __ertm_hdr_size(chan); if (chan->fcs == L2CAP_FCS_CRC16) hlen += L2CAP_FCS_SIZE; skb = bt_skb_alloc(hlen, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); lh = skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); else put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *)skb->data, skb->len); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } skb->priority = HCI_PRIO_MAX; return skb; } static void l2cap_send_sframe(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; u32 control_field; BT_DBG("chan %p, control %p", chan, control); if (!control->sframe) return; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && !control->poll) control->final = 1; if (control->super == L2CAP_SUPER_RR) clear_bit(CONN_RNR_SENT, &chan->conn_state); else if (control->super == L2CAP_SUPER_RNR) set_bit(CONN_RNR_SENT, &chan->conn_state); if (control->super != L2CAP_SUPER_SREJ) { chan->last_acked_seq = control->reqseq; __clear_ack_timer(chan); } BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, control->final, control->poll, control->super); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) control_field = __pack_extended_control(control); else control_field = __pack_enhanced_control(control); skb = l2cap_create_sframe_pdu(chan, control_field); if (!IS_ERR(skb)) l2cap_do_send(chan, skb); } static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) { struct l2cap_ctrl control; BT_DBG("chan %p, poll %d", chan, poll); memset(&control, 0, sizeof(control)); control.sframe = 1; control.poll = poll; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) control.super = L2CAP_SUPER_RNR; else control.super = L2CAP_SUPER_RR; control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &control); } static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) return true; return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); } void l2cap_send_conn_req(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_conn_req req; req.scid = cpu_to_le16(chan->scid); req.psm = chan->psm; chan->ident = l2cap_get_ident(conn); set_bit(CONF_CONNECT_PEND, &chan->conf_state); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); } static void l2cap_chan_ready(struct l2cap_chan *chan) { /* The channel may have already been flagged as connected in * case of receiving data before the L2CAP info req/rsp * procedure is complete. */ if (chan->state == BT_CONNECTED) return; /* This clears all conf flags, including CONF_NOT_COMPLETE */ chan->conf_state = 0; __clear_chan_timer(chan); switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: if (!chan->tx_credits) chan->ops->suspend(chan); break; } chan->state = BT_CONNECTED; chan->ops->ready(chan); } static void l2cap_le_connect(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_le_conn_req req; if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) return; if (!chan->imtu) chan->imtu = chan->conn->mtu; l2cap_le_flowctl_init(chan, 0); memset(&req, 0, sizeof(req)); req.psm = chan->psm; req.scid = cpu_to_le16(chan->scid); req.mtu = cpu_to_le16(chan->imtu); req.mps = cpu_to_le16(chan->mps); req.credits = cpu_to_le16(chan->rx_credits); chan->ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, sizeof(req), &req); } struct l2cap_ecred_conn_data { struct { struct l2cap_ecred_conn_req_hdr req; __le16 scid[5]; } __packed pdu; struct l2cap_chan *chan; struct pid *pid; int count; }; static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) { struct l2cap_ecred_conn_data *conn = data; struct pid *pid; if (chan == conn->chan) return; if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) return; pid = chan->ops->get_peer_pid(chan); /* Only add deferred channels with the same PID/PSM */ if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) return; if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; l2cap_ecred_init(chan, 0); /* Set the same ident so we can match on the rsp */ chan->ident = conn->chan->ident; /* Include all channels deferred */ conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); conn->count++; } static void l2cap_ecred_connect(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_ecred_conn_data data; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) return; if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; l2cap_ecred_init(chan, 0); memset(&data, 0, sizeof(data)); data.pdu.req.psm = chan->psm; data.pdu.req.mtu = cpu_to_le16(chan->imtu); data.pdu.req.mps = cpu_to_le16(chan->mps); data.pdu.req.credits = cpu_to_le16(chan->rx_credits); data.pdu.scid[0] = cpu_to_le16(chan->scid); chan->ident = l2cap_get_ident(conn); data.count = 1; data.chan = chan; data.pid = chan->ops->get_peer_pid(chan); __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data); l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ, sizeof(data.pdu.req) + data.count * sizeof(__le16), &data.pdu); } static void l2cap_le_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; if (!smp_conn_security(conn->hcon, chan->sec_level)) return; if (!chan->psm) { l2cap_chan_ready(chan); return; } if (chan->state == BT_CONNECT) { if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) l2cap_ecred_connect(chan); else l2cap_le_connect(chan); } } static void l2cap_start_connection(struct l2cap_chan *chan) { if (chan->conn->hcon->type == LE_LINK) { l2cap_le_start(chan); } else { l2cap_send_conn_req(chan); } } static void l2cap_request_info(struct l2cap_conn *conn) { struct l2cap_info_req req; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) return; req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } static bool l2cap_check_enc_key_size(struct hci_conn *hcon) { /* The minimum encryption key size needs to be enforced by the * host stack before establishing any L2CAP connections. The * specification in theory allows a minimum of 1, but to align * BR/EDR and LE transports, a minimum of 7 is chosen. * * This check might also be called for unencrypted connections * that have no key size requirements. Ensure that the link is * actually encrypted before enforcing a key size. */ int min_key_size = hcon->hdev->min_enc_key_size; /* On FIPS security level, key size must be 16 bytes */ if (hcon->sec_level == BT_SECURITY_FIPS) min_key_size = 16; return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || hcon->enc_key_size >= min_key_size); } static void l2cap_do_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; if (conn->hcon->type == LE_LINK) { l2cap_le_start(chan); return; } if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { l2cap_request_info(conn); return; } if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; if (!l2cap_chan_check_security(chan, true) || !__l2cap_no_conn_pending(chan)) return; if (l2cap_check_enc_key_size(conn->hcon)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) { u32 local_feat_mask = l2cap_feat_mask; if (!disable_ertm) local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; switch (mode) { case L2CAP_MODE_ERTM: return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; case L2CAP_MODE_STREAMING: return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; default: return 0x00; } } static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) { struct l2cap_conn *conn = chan->conn; struct l2cap_disconn_req req; if (!conn) return; if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); } req.dcid = cpu_to_le16(chan->dcid); req.scid = cpu_to_le16(chan->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, sizeof(req), &req); l2cap_state_change_and_error(chan, BT_DISCONN, err); } /* ---- L2CAP connections ---- */ static void l2cap_conn_start(struct l2cap_conn *conn) { struct l2cap_chan *chan, *tmp; BT_DBG("conn %p", conn); mutex_lock(&conn->chan_lock); list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { l2cap_chan_lock(chan); if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { l2cap_chan_ready(chan); l2cap_chan_unlock(chan); continue; } if (chan->state == BT_CONNECT) { if (!l2cap_chan_check_security(chan, true) || !__l2cap_no_conn_pending(chan)) { l2cap_chan_unlock(chan); continue; } if (!l2cap_mode_supported(chan->mode, conn->feat_mask) && test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { l2cap_chan_close(chan, ECONNRESET); l2cap_chan_unlock(chan); continue; } if (l2cap_check_enc_key_size(conn->hcon)) l2cap_start_connection(chan); else l2cap_chan_close(chan, ECONNREFUSED); } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; char buf[128]; rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); if (l2cap_chan_check_security(chan, false)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); chan->ops->defer(chan); } else { l2cap_state_change(chan, BT_CONFIG); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } } else { rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); } l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (test_bit(CONF_REQ_SENT, &chan->conf_state) || rsp.result != L2CAP_CR_SUCCESS) { l2cap_chan_unlock(chan); continue; } set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); } static void l2cap_le_conn_ready(struct l2cap_conn *conn) { struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; BT_DBG("%s conn %p", hdev->name, conn); /* For outgoing pairing which doesn't necessarily have an * associated socket (e.g. mgmt_pair_device). */ if (hcon->out) smp_conn_security(hcon, hcon->pending_sec_level); /* For LE peripheral connections, make sure the connection interval * is in the range of the minimum and maximum interval that has * been configured for this connection. If not, then trigger * the connection update procedure. */ if (hcon->role == HCI_ROLE_SLAVE && (hcon->le_conn_interval < hcon->le_conn_min_interval || hcon->le_conn_interval > hcon->le_conn_max_interval)) { struct l2cap_conn_param_update_req req; req.min = cpu_to_le16(hcon->le_conn_min_interval); req.max = cpu_to_le16(hcon->le_conn_max_interval); req.latency = cpu_to_le16(hcon->le_conn_latency); req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); } } static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan *chan; struct hci_conn *hcon = conn->hcon; BT_DBG("conn %p", conn); if (hcon->type == ACL_LINK) l2cap_request_info(conn); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); if (hcon->type == LE_LINK) { l2cap_le_start(chan); } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) l2cap_chan_ready(chan); } else if (chan->state == BT_CONNECT) { l2cap_do_start(chan); } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); if (hcon->type == LE_LINK) l2cap_le_conn_ready(conn); queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); } /* Notify sockets that we cannot guaranty reliability anymore */ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) { struct l2cap_chan *chan; BT_DBG("conn %p", conn); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) l2cap_chan_set_err(chan, err); } mutex_unlock(&conn->chan_lock); } static void l2cap_info_timeout(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, info_timer.work); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } /* * l2cap_user * External modules can register l2cap_user objects on l2cap_conn. The ->probe * callback is called during registration. The ->remove callback is called * during unregistration. * An l2cap_user object can either be explicitly unregistered or when the * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. * External modules must own a reference to the l2cap_conn object if they intend * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at * any time if they don't. */ int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) { struct hci_dev *hdev = conn->hcon->hdev; int ret; /* We need to check whether l2cap_conn is registered. If it is not, we * must not register the l2cap_user. l2cap_conn_del() is unregisters * l2cap_conn objects, but doesn't provide its own locking. Instead, it * relies on the parent hci_conn object to be locked. This itself relies * on the hci_dev object to be locked. So we must lock the hci device * here, too. */ hci_dev_lock(hdev); if (!list_empty(&user->list)) { ret = -EINVAL; goto out_unlock; } /* conn->hchan is NULL after l2cap_conn_del() was called */ if (!conn->hchan) { ret = -ENODEV; goto out_unlock; } ret = user->probe(conn, user); if (ret) goto out_unlock; list_add(&user->list, &conn->users); ret = 0; out_unlock: hci_dev_unlock(hdev); return ret; } EXPORT_SYMBOL(l2cap_register_user); void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) { struct hci_dev *hdev = conn->hcon->hdev; hci_dev_lock(hdev); if (list_empty(&user->list)) goto out_unlock; list_del_init(&user->list); user->remove(conn, user); out_unlock: hci_dev_unlock(hdev); } EXPORT_SYMBOL(l2cap_unregister_user); static void l2cap_unregister_all_users(struct l2cap_conn *conn) { struct l2cap_user *user; while (!list_empty(&conn->users)) { user = list_first_entry(&conn->users, struct l2cap_user, list); list_del_init(&user->list); user->remove(conn, user); } } static void l2cap_conn_del(struct hci_conn *hcon, int err) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan, *l; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); kfree_skb(conn->rx_skb); skb_queue_purge(&conn->pending_rx); /* We can not call flush_work(&conn->pending_rx_work) here since we * might block if we are running on a worker from the same workqueue * pending_rx_work is waiting on. */ if (work_pending(&conn->pending_rx_work)) cancel_work_sync(&conn->pending_rx_work); cancel_delayed_work_sync(&conn->id_addr_timer); l2cap_unregister_all_users(conn); /* Force the connection to be immediately dropped */ hcon->disc_timeout = 0; mutex_lock(&conn->chan_lock); /* Kill channels */ list_for_each_entry_safe(chan, l, &conn->chan_l, list) { l2cap_chan_hold(chan); l2cap_chan_lock(chan); l2cap_chan_del(chan, err); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } mutex_unlock(&conn->chan_lock); hci_chan_del(conn->hchan); if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) cancel_delayed_work_sync(&conn->info_timer); hcon->l2cap_data = NULL; conn->hchan = NULL; l2cap_conn_put(conn); } static void l2cap_conn_free(struct kref *ref) { struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); hci_conn_put(conn->hcon); kfree(conn); } struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) { kref_get(&conn->ref); return conn; } EXPORT_SYMBOL(l2cap_conn_get); void l2cap_conn_put(struct l2cap_conn *conn) { kref_put(&conn->ref, l2cap_conn_free); } EXPORT_SYMBOL(l2cap_conn_put); /* ---- Socket interface ---- */ /* Find socket with psm and source / destination bdaddr. * Returns closest match. */ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src, bdaddr_t *dst, u8 link_type) { struct l2cap_chan *c, *tmp, *c1 = NULL; read_lock(&chan_list_lock); list_for_each_entry_safe(c, tmp, &chan_list, global_l) { if (state && c->state != state) continue; if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) continue; if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) continue; if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { int src_match, dst_match; int src_any, dst_any; /* Exact match. */ src_match = !bacmp(&c->src, src); dst_match = !bacmp(&c->dst, dst); if (src_match && dst_match) { if (!l2cap_chan_hold_unless_zero(c)) continue; read_unlock(&chan_list_lock); return c; } /* Closest match */ src_any = !bacmp(&c->src, BDADDR_ANY); dst_any = !bacmp(&c->dst, BDADDR_ANY); if ((src_match && dst_any) || (src_any && dst_match) || (src_any && dst_any)) c1 = c; } } if (c1) c1 = l2cap_chan_hold_unless_zero(c1); read_unlock(&chan_list_lock); return c1; } static void l2cap_monitor_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, monitor_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); if (!chan->conn) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static void l2cap_retrans_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, retrans_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); if (!chan->conn) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static void l2cap_streaming_send(struct l2cap_chan *chan, struct sk_buff_head *skbs) { struct sk_buff *skb; struct l2cap_ctrl *control; BT_DBG("chan %p, skbs %p", chan, skbs); skb_queue_splice_tail_init(skbs, &chan->tx_q); while (!skb_queue_empty(&chan->tx_q)) { skb = skb_dequeue(&chan->tx_q); bt_cb(skb)->l2cap.retries = 1; control = &bt_cb(skb)->l2cap; control->reqseq = 0; control->txseq = chan->next_tx_seq; __pack_control(chan, control, skb); if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *) skb->data, skb->len); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } l2cap_do_send(chan, skb); BT_DBG("Sent txseq %u", control->txseq); chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); chan->frames_sent++; } } static int l2cap_ertm_send(struct l2cap_chan *chan) { struct sk_buff *skb, *tx_skb; struct l2cap_ctrl *control; int sent = 0; BT_DBG("chan %p", chan); if (chan->state != BT_CONNECTED) return -ENOTCONN; if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return 0; while (chan->tx_send_head && chan->unacked_frames < chan->remote_tx_win && chan->tx_state == L2CAP_TX_STATE_XMIT) { skb = chan->tx_send_head; bt_cb(skb)->l2cap.retries = 1; control = &bt_cb(skb)->l2cap; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control->final = 1; control->reqseq = chan->buffer_seq; chan->last_acked_seq = chan->buffer_seq; control->txseq = chan->next_tx_seq; __pack_control(chan, control, skb); if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *) skb->data, skb->len); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } /* Clone after data has been modified. Data is assumed to be read-only (for locking purposes) on cloned sk_buffs. */ tx_skb = skb_clone(skb, GFP_KERNEL); if (!tx_skb) break; __set_retrans_timer(chan); chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); chan->unacked_frames++; chan->frames_sent++; sent++; if (skb_queue_is_last(&chan->tx_q, skb)) chan->tx_send_head = NULL; else chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); l2cap_do_send(chan, tx_skb); BT_DBG("Sent txseq %u", control->txseq); } BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, chan->unacked_frames, skb_queue_len(&chan->tx_q)); return sent; } static void l2cap_ertm_resend(struct l2cap_chan *chan) { struct l2cap_ctrl control; struct sk_buff *skb; struct sk_buff *tx_skb; u16 seq; BT_DBG("chan %p", chan); if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return; while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { seq = l2cap_seq_list_pop(&chan->retrans_list); skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); if (!skb) { BT_DBG("Error: Can't retransmit seq %d, frame missing", seq); continue; } bt_cb(skb)->l2cap.retries++; control = bt_cb(skb)->l2cap; if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries > chan->max_tx) { BT_DBG("Retry limit exceeded (%d)", chan->max_tx); l2cap_send_disconn_req(chan, ECONNRESET); l2cap_seq_list_clear(&chan->retrans_list); break; } control.reqseq = chan->buffer_seq; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control.final = 1; else control.final = 0; if (skb_cloned(skb)) { /* Cloned sk_buffs are read-only, so we need a * writeable copy */ tx_skb = skb_copy(skb, GFP_KERNEL); } else { tx_skb = skb_clone(skb, GFP_KERNEL); } if (!tx_skb) { l2cap_seq_list_clear(&chan->retrans_list); break; } /* Update skb contents */ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { put_unaligned_le32(__pack_extended_control(&control), tx_skb->data + L2CAP_HDR_SIZE); } else { put_unaligned_le16(__pack_enhanced_control(&control), tx_skb->data + L2CAP_HDR_SIZE); } /* Update FCS */ if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len - L2CAP_FCS_SIZE); put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); BT_DBG("Resent txseq %d", control.txseq); chan->last_acked_seq = chan->buffer_seq; } } static void l2cap_retransmit(struct l2cap_chan *chan, struct l2cap_ctrl *control) { BT_DBG("chan %p, control %p", chan, control); l2cap_seq_list_append(&chan->retrans_list, control->reqseq); l2cap_ertm_resend(chan); } static void l2cap_retransmit_all(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; BT_DBG("chan %p, control %p", chan, control); if (control->poll) set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_seq_list_clear(&chan->retrans_list); if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return; if (chan->unacked_frames) { skb_queue_walk(&chan->tx_q, skb) { if (bt_cb(skb)->l2cap.txseq == control->reqseq || skb == chan->tx_send_head) break; } skb_queue_walk_from(&chan->tx_q, skb) { if (skb == chan->tx_send_head) break; l2cap_seq_list_append(&chan->retrans_list, bt_cb(skb)->l2cap.txseq); } l2cap_ertm_resend(chan); } } static void l2cap_send_ack(struct l2cap_chan *chan) { struct l2cap_ctrl control; u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, chan->last_acked_seq); int threshold; BT_DBG("chan %p last_acked_seq %d buffer_seq %d", chan, chan->last_acked_seq, chan->buffer_seq); memset(&control, 0, sizeof(control)); control.sframe = 1; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && chan->rx_state == L2CAP_RX_STATE_RECV) { __clear_ack_timer(chan); control.super = L2CAP_SUPER_RNR; control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &control); } else { if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { l2cap_ertm_send(chan); /* If any i-frames were sent, they included an ack */ if (chan->buffer_seq == chan->last_acked_seq) frames_to_ack = 0; } /* Ack now if the window is 3/4ths full. * Calculate without mul or div */ threshold = chan->ack_win; threshold += threshold << 1; threshold >>= 2; BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, threshold); if (frames_to_ack >= threshold) { __clear_ack_timer(chan); control.super = L2CAP_SUPER_RR; control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &control); frames_to_ack = 0; } if (frames_to_ack) __set_ack_timer(chan); } } static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb) { struct l2cap_conn *conn = chan->conn; struct sk_buff **frag; int sent = 0; if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter)) return -EFAULT; sent += count; len -= count; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { struct sk_buff *tmp; count = min_t(unsigned int, conn->mtu, len); tmp = chan->ops->alloc_skb(chan, 0, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(tmp)) return PTR_ERR(tmp); *frag = tmp; if (!copy_from_iter_full(skb_put(*frag, count), count, &msg->msg_iter)) return -EFAULT; sent += count; len -= count; skb->len += (*frag)->len; skb->data_len += (*frag)->len; frag = &(*frag)->next; } return sent; } static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; struct l2cap_hdr *lh; BT_DBG("chan %p psm 0x%2.2x len %zu", chan, __le16_to_cpu(chan->psm), len); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, hlen, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count; struct l2cap_hdr *lh; BT_DBG("chan %p len %zu", chan, len); count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 sdulen) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen; struct l2cap_hdr *lh; BT_DBG("chan %p len %zu", chan, len); if (!conn) return ERR_PTR(-ENOTCONN); hlen = __ertm_hdr_size(chan); if (sdulen) hlen += L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) hlen += L2CAP_FCS_SIZE; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, hlen, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); /* Control header is populated later */ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); else put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } bt_cb(skb)->l2cap.fcs = chan->fcs; bt_cb(skb)->l2cap.retries = 0; return skb; } static int l2cap_segment_sdu(struct l2cap_chan *chan, struct sk_buff_head *seg_queue, struct msghdr *msg, size_t len) { struct sk_buff *skb; u16 sdu_len; size_t pdu_len; u8 sar; BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); /* It is critical that ERTM PDUs fit in a single HCI fragment, * so fragmented skbs are not used. The HCI layer's handling * of fragmented skbs is not compatible with ERTM's queueing. */ /* PDU size is derived from the HCI MTU */ pdu_len = chan->conn->mtu; /* Constrain PDU size for BR/EDR connections */ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); /* Adjust for largest possible L2CAP overhead. */ if (chan->fcs) pdu_len -= L2CAP_FCS_SIZE; pdu_len -= __ertm_hdr_size(chan); /* Remote device may have requested smaller PDUs */ pdu_len = min_t(size_t, pdu_len, chan->remote_mps); if (len <= pdu_len) { sar = L2CAP_SAR_UNSEGMENTED; sdu_len = 0; pdu_len = len; } else { sar = L2CAP_SAR_START; sdu_len = len; } while (len > 0) { skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); if (IS_ERR(skb)) { __skb_queue_purge(seg_queue); return PTR_ERR(skb); } bt_cb(skb)->l2cap.sar = sar; __skb_queue_tail(seg_queue, skb); len -= pdu_len; if (sdu_len) sdu_len = 0; if (len <= pdu_len) { sar = L2CAP_SAR_END; pdu_len = len; } else { sar = L2CAP_SAR_CONTINUE; } } return 0; } static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 sdulen) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen; struct l2cap_hdr *lh; BT_DBG("chan %p len %zu", chan, len); if (!conn) return ERR_PTR(-ENOTCONN); hlen = L2CAP_HDR_SIZE; if (sdulen) hlen += L2CAP_SDULEN_SIZE; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, hlen, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static int l2cap_segment_le_sdu(struct l2cap_chan *chan, struct sk_buff_head *seg_queue, struct msghdr *msg, size_t len) { struct sk_buff *skb; size_t pdu_len; u16 sdu_len; BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); sdu_len = len; pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; while (len > 0) { if (len <= pdu_len) pdu_len = len; skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); if (IS_ERR(skb)) { __skb_queue_purge(seg_queue); return PTR_ERR(skb); } __skb_queue_tail(seg_queue, skb); len -= pdu_len; if (sdu_len) { sdu_len = 0; pdu_len += L2CAP_SDULEN_SIZE; } } return 0; } static void l2cap_le_flowctl_send(struct l2cap_chan *chan) { int sent = 0; BT_DBG("chan %p", chan); while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); chan->tx_credits--; sent++; } BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, skb_queue_len(&chan->tx_q)); } int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sk_buff *skb; int err; struct sk_buff_head seg_queue; if (!chan->conn) return -ENOTCONN; /* Connectionless channel */ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { skb = l2cap_create_connless_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); l2cap_do_send(chan, skb); return len; } switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: /* Check outgoing MTU */ if (len > chan->omtu) return -EMSGSIZE; __skb_queue_head_init(&seg_queue); err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); if (chan->state != BT_CONNECTED) { __skb_queue_purge(&seg_queue); err = -ENOTCONN; } if (err) return err; skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); l2cap_le_flowctl_send(chan); if (!chan->tx_credits) chan->ops->suspend(chan); err = len; break; case L2CAP_MODE_BASIC: /* Check outgoing MTU */ if (len > chan->omtu) return -EMSGSIZE; /* Create a basic PDU */ skb = l2cap_create_basic_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); l2cap_do_send(chan, skb); err = len; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: /* Check outgoing MTU */ if (len > chan->omtu) { err = -EMSGSIZE; break; } __skb_queue_head_init(&seg_queue); /* Do segmentation before calling in to the state machine, * since it's possible to block while waiting for memory * allocation. */ err = l2cap_segment_sdu(chan, &seg_queue, msg, len); if (err) break; if (chan->mode == L2CAP_MODE_ERTM) l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); else l2cap_streaming_send(chan, &seg_queue); err = len; /* If the skbs were not queued for sending, they'll still be in * seg_queue and need to be purged. */ __skb_queue_purge(&seg_queue); break; default: BT_DBG("bad state %1.1x", chan->mode); err = -EBADFD; } return err; } EXPORT_SYMBOL_GPL(l2cap_chan_send); static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) { struct l2cap_ctrl control; u16 seq; BT_DBG("chan %p, txseq %u", chan, txseq); memset(&control, 0, sizeof(control)); control.sframe = 1; control.super = L2CAP_SUPER_SREJ; for (seq = chan->expected_tx_seq; seq != txseq; seq = __next_seq(chan, seq)) { if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { control.reqseq = seq; l2cap_send_sframe(chan, &control); l2cap_seq_list_append(&chan->srej_list, seq); } } chan->expected_tx_seq = __next_seq(chan, txseq); } static void l2cap_send_srej_tail(struct l2cap_chan *chan) { struct l2cap_ctrl control; BT_DBG("chan %p", chan); if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) return; memset(&control, 0, sizeof(control)); control.sframe = 1; control.super = L2CAP_SUPER_SREJ; control.reqseq = chan->srej_list.tail; l2cap_send_sframe(chan, &control); } static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) { struct l2cap_ctrl control; u16 initial_head; u16 seq; BT_DBG("chan %p, txseq %u", chan, txseq); memset(&control, 0, sizeof(control)); control.sframe = 1; control.super = L2CAP_SUPER_SREJ; /* Capture initial list head to allow only one pass through the list. */ initial_head = chan->srej_list.head; do { seq = l2cap_seq_list_pop(&chan->srej_list); if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) break; control.reqseq = seq; l2cap_send_sframe(chan, &control); l2cap_seq_list_append(&chan->srej_list, seq); } while (chan->srej_list.head != initial_head); } static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) { struct sk_buff *acked_skb; u16 ackseq; BT_DBG("chan %p, reqseq %u", chan, reqseq); if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) return; BT_DBG("expected_ack_seq %u, unacked_frames %u", chan->expected_ack_seq, chan->unacked_frames); for (ackseq = chan->expected_ack_seq; ackseq != reqseq; ackseq = __next_seq(chan, ackseq)) { acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); if (acked_skb) { skb_unlink(acked_skb, &chan->tx_q); kfree_skb(acked_skb); chan->unacked_frames--; } } chan->expected_ack_seq = reqseq; if (chan->unacked_frames == 0) __clear_retrans_timer(chan); BT_DBG("unacked_frames %u", chan->unacked_frames); } static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) { BT_DBG("chan %p", chan); chan->expected_tx_seq = chan->buffer_seq; l2cap_seq_list_clear(&chan->srej_list); skb_queue_purge(&chan->srej_q); chan->rx_state = L2CAP_RX_STATE_RECV; } static void l2cap_tx_state_xmit(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event) { BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, event); switch (event) { case L2CAP_EV_DATA_REQUEST: if (chan->tx_send_head == NULL) chan->tx_send_head = skb_peek(skbs); skb_queue_splice_tail_init(skbs, &chan->tx_q); l2cap_ertm_send(chan); break; case L2CAP_EV_LOCAL_BUSY_DETECTED: BT_DBG("Enter LOCAL_BUSY"); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { /* The SREJ_SENT state must be aborted if we are to * enter the LOCAL_BUSY state. */ l2cap_abort_rx_srej_sent(chan); } l2cap_send_ack(chan); break; case L2CAP_EV_LOCAL_BUSY_CLEAR: BT_DBG("Exit LOCAL_BUSY"); clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { struct l2cap_ctrl local_control; memset(&local_control, 0, sizeof(local_control)); local_control.sframe = 1; local_control.super = L2CAP_SUPER_RR; local_control.poll = 1; local_control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &local_control); chan->retry_count = 1; __set_monitor_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; } break; case L2CAP_EV_RECV_REQSEQ_AND_FBIT: l2cap_process_reqseq(chan, control->reqseq); break; case L2CAP_EV_EXPLICIT_POLL: l2cap_send_rr_or_rnr(chan, 1); chan->retry_count = 1; __set_monitor_timer(chan); __clear_ack_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; break; case L2CAP_EV_RETRANS_TO: l2cap_send_rr_or_rnr(chan, 1); chan->retry_count = 1; __set_monitor_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; break; case L2CAP_EV_RECV_FBIT: /* Nothing to process */ break; default: break; } } static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event) { BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, event); switch (event) { case L2CAP_EV_DATA_REQUEST: if (chan->tx_send_head == NULL) chan->tx_send_head = skb_peek(skbs); /* Queue data, but don't send. */ skb_queue_splice_tail_init(skbs, &chan->tx_q); break; case L2CAP_EV_LOCAL_BUSY_DETECTED: BT_DBG("Enter LOCAL_BUSY"); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { /* The SREJ_SENT state must be aborted if we are to * enter the LOCAL_BUSY state. */ l2cap_abort_rx_srej_sent(chan); } l2cap_send_ack(chan); break; case L2CAP_EV_LOCAL_BUSY_CLEAR: BT_DBG("Exit LOCAL_BUSY"); clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { struct l2cap_ctrl local_control; memset(&local_control, 0, sizeof(local_control)); local_control.sframe = 1; local_control.super = L2CAP_SUPER_RR; local_control.poll = 1; local_control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &local_control); chan->retry_count = 1; __set_monitor_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; } break; case L2CAP_EV_RECV_REQSEQ_AND_FBIT: l2cap_process_reqseq(chan, control->reqseq); fallthrough; case L2CAP_EV_RECV_FBIT: if (control && control->final) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) __set_retrans_timer(chan); chan->retry_count = 0; chan->tx_state = L2CAP_TX_STATE_XMIT; BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); } break; case L2CAP_EV_EXPLICIT_POLL: /* Ignore */ break; case L2CAP_EV_MONITOR_TO: if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { l2cap_send_rr_or_rnr(chan, 1); __set_monitor_timer(chan); chan->retry_count++; } else { l2cap_send_disconn_req(chan, ECONNABORTED); } break; default: break; } } static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event) { BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", chan, control, skbs, event, chan->tx_state); switch (chan->tx_state) { case L2CAP_TX_STATE_XMIT: l2cap_tx_state_xmit(chan, control, skbs, event); break; case L2CAP_TX_STATE_WAIT_F: l2cap_tx_state_wait_f(chan, control, skbs, event); break; default: /* Ignore event */ break; } } static void l2cap_pass_to_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control) { BT_DBG("chan %p, control %p", chan, control); l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); } static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, struct l2cap_ctrl *control) { BT_DBG("chan %p, control %p", chan, control); l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); } /* Copy frame to all raw sockets on that connection */ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) { struct sk_buff *nskb; struct l2cap_chan *chan; BT_DBG("conn %p", conn); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { if (chan->chan_type != L2CAP_CHAN_RAW) continue; /* Don't send frame to the channel it came from */ if (bt_cb(skb)->l2cap.chan == chan) continue; nskb = skb_clone(skb, GFP_KERNEL); if (!nskb) continue; if (chan->ops->recv(chan, nskb)) kfree_skb(nskb); } mutex_unlock(&conn->chan_lock); } /* ---- L2CAP signalling commands ---- */ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data) { struct sk_buff *skb, **frag; struct l2cap_cmd_hdr *cmd; struct l2cap_hdr *lh; int len, count; BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", conn, code, ident, dlen); if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) return NULL; len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; count = min_t(unsigned int, conn->mtu, len); skb = bt_skb_alloc(count, GFP_KERNEL); if (!skb) return NULL; lh = skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); if (conn->hcon->type == LE_LINK) lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); else lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; cmd->ident = ident; cmd->len = cpu_to_le16(dlen); if (dlen) { count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; skb_put_data(skb, data, count); data += count; } len -= skb->len; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { count = min_t(unsigned int, conn->mtu, len); *frag = bt_skb_alloc(count, GFP_KERNEL); if (!*frag) goto fail; skb_put_data(*frag, data, count); len -= count; data += count; frag = &(*frag)->next; } return skb; fail: kfree_skb(skb); return NULL; } static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) { struct l2cap_conf_opt *opt = *ptr; int len; len = L2CAP_CONF_OPT_SIZE + opt->len; *ptr += len; *type = opt->type; *olen = opt->len; switch (opt->len) { case 1: *val = *((u8 *) opt->val); break; case 2: *val = get_unaligned_le16(opt->val); break; case 4: *val = get_unaligned_le32(opt->val); break; default: *val = (unsigned long) opt->val; break; } BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); return len; } static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) { struct l2cap_conf_opt *opt = *ptr; BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); if (size < L2CAP_CONF_OPT_SIZE + len) return; opt->type = type; opt->len = len; switch (len) { case 1: *((u8 *) opt->val) = val; break; case 2: put_unaligned_le16(val, opt->val); break; case 4: put_unaligned_le32(val, opt->val); break; default: memcpy(opt->val, (void *) val, len); break; } *ptr += L2CAP_CONF_OPT_SIZE + len; } static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size) { struct l2cap_conf_efs efs; switch (chan->mode) { case L2CAP_MODE_ERTM: efs.id = chan->local_id; efs.stype = chan->local_stype; efs.msdu = cpu_to_le16(chan->local_msdu); efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); break; case L2CAP_MODE_STREAMING: efs.id = 1; efs.stype = L2CAP_SERV_BESTEFFORT; efs.msdu = cpu_to_le16(chan->local_msdu); efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); efs.acc_lat = 0; efs.flush_to = 0; break; default: return; } l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, size); } static void l2cap_ack_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, ack_timer.work); u16 frames_to_ack; BT_DBG("chan %p", chan); l2cap_chan_lock(chan); frames_to_ack = __seq_offset(chan, chan->buffer_seq, chan->last_acked_seq); if (frames_to_ack) l2cap_send_rr_or_rnr(chan, 0); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } int l2cap_ertm_init(struct l2cap_chan *chan) { int err; chan->next_tx_seq = 0; chan->expected_tx_seq = 0; chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; chan->frames_sent = 0; chan->last_acked_seq = 0; chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; skb_queue_head_init(&chan->tx_q); if (chan->mode != L2CAP_MODE_ERTM) return 0; chan->rx_state = L2CAP_RX_STATE_RECV; chan->tx_state = L2CAP_TX_STATE_XMIT; skb_queue_head_init(&chan->srej_q); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); if (err < 0) return err; err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); if (err < 0) l2cap_seq_list_free(&chan->srej_list); return err; } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) { switch (mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (l2cap_mode_supported(mode, remote_feat_mask)) return mode; fallthrough; default: return L2CAP_MODE_BASIC; } } static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) { return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW); } static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) { return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW); } static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, struct l2cap_conf_rfc *rfc) { rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); } static inline void l2cap_txwin_setup(struct l2cap_chan *chan) { if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && __l2cap_ews_supported(chan->conn)) { /* use extended control field */ set_bit(FLAG_EXT_CTRL, &chan->flags); chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; } else { chan->tx_win = min_t(u16, chan->tx_win, L2CAP_DEFAULT_TX_WINDOW); chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; } chan->ack_win = chan->tx_win; } static void l2cap_mtu_auto(struct l2cap_chan *chan) { struct hci_conn *conn = chan->conn->hcon; chan->imtu = L2CAP_DEFAULT_MIN_MTU; /* The 2-DH1 packet has between 2 and 56 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_2DH1)) chan->imtu = 54; /* The 3-DH1 packet has between 2 and 85 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_3DH1)) chan->imtu = 83; /* The 2-DH3 packet has between 2 and 369 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_2DH3)) chan->imtu = 367; /* The 3-DH3 packet has between 2 and 554 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_3DH3)) chan->imtu = 552; /* The 2-DH5 packet has between 2 and 681 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_2DH5)) chan->imtu = 679; /* The 3-DH5 packet has between 2 and 1023 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_3DH5)) chan->imtu = 1021; } static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) { struct l2cap_conf_req *req = data; struct l2cap_conf_rfc rfc = { .mode = chan->mode }; void *ptr = req->data; void *endptr = data + data_size; u16 size; BT_DBG("chan %p", chan); if (chan->num_conf_req || chan->num_conf_rsp) goto done; switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) break; if (__l2cap_efs_supported(chan->conn)) set_bit(FLAG_EFS_ENABLE, &chan->flags); fallthrough; default: chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; } done: if (chan->imtu != L2CAP_DEFAULT_MTU) { if (!chan->imtu) l2cap_mtu_auto(chan); l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr); } switch (chan->mode) { case L2CAP_MODE_BASIC: if (disable_ertm) break; if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) break; rfc.mode = L2CAP_MODE_BASIC; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; case L2CAP_MODE_ERTM: rfc.mode = L2CAP_MODE_ERTM; rfc.max_transmit = chan->max_tx; __l2cap_set_ertm_timeouts(chan, &rfc); size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); l2cap_txwin_setup(chan); rfc.txwin_size = min_t(u16, chan->tx_win, L2CAP_DEFAULT_TX_WINDOW); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) l2cap_add_opt_efs(&ptr, chan, endptr - ptr); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, chan->tx_win, endptr - ptr); if (chan->conn->feat_mask & L2CAP_FEAT_FCS) if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs, endptr - ptr); } break; case L2CAP_MODE_STREAMING: l2cap_txwin_setup(chan); rfc.mode = L2CAP_MODE_STREAMING; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) l2cap_add_opt_efs(&ptr, chan, endptr - ptr); if (chan->conn->feat_mask & L2CAP_FEAT_FCS) if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs, endptr - ptr); } break; } req->dcid = cpu_to_le16(chan->dcid); req->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; void *endptr = data + data_size; void *req = chan->conf_req; int len = chan->conf_len; int type, hint, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; struct l2cap_conf_efs efs; u8 remote_efs = 0; u16 mtu = L2CAP_DEFAULT_MTU; u16 result = L2CAP_CONF_SUCCESS; u16 size; BT_DBG("chan %p", chan); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&req, &type, &olen, &val); if (len < 0) break; hint = type & L2CAP_CONF_HINT; type &= L2CAP_CONF_MASK; switch (type) { case L2CAP_CONF_MTU: if (olen != 2) break; mtu = val; break; case L2CAP_CONF_FLUSH_TO: if (olen != 2) break; chan->flush_to = val; break; case L2CAP_CONF_QOS: break; case L2CAP_CONF_RFC: if (olen != sizeof(rfc)) break; memcpy(&rfc, (void *) val, olen); break; case L2CAP_CONF_FCS: if (olen != 1) break; if (val == L2CAP_FCS_NONE) set_bit(CONF_RECV_NO_FCS, &chan->conf_state); break; case L2CAP_CONF_EFS: if (olen != sizeof(efs)) break; remote_efs = 1; memcpy(&efs, (void *) val, olen); break; case L2CAP_CONF_EWS: if (olen != 2) break; return -ECONNREFUSED; default: if (hint) break; result = L2CAP_CONF_UNKNOWN; l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr); break; } } if (chan->num_conf_rsp || chan->num_conf_req > 1) goto done; switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; } if (remote_efs) { if (__l2cap_efs_supported(chan->conn)) set_bit(FLAG_EFS_ENABLE, &chan->flags); else return -ECONNREFUSED; } if (chan->mode != rfc.mode) return -ECONNREFUSED; break; } done: if (chan->mode != rfc.mode) { result = L2CAP_CONF_UNACCEPT; rfc.mode = chan->mode; if (chan->num_conf_rsp == 1) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); } if (result == L2CAP_CONF_SUCCESS) { /* Configure output options and let the other side know * which ones we don't like. */ if (mtu < L2CAP_DEFAULT_MIN_MTU) result = L2CAP_CONF_UNACCEPT; else { chan->omtu = mtu; set_bit(CONF_MTU_DONE, &chan->conf_state); } l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr); if (remote_efs) { if (chan->local_stype != L2CAP_SERV_NOTRAFIC && efs.stype != L2CAP_SERV_NOTRAFIC && efs.stype != chan->local_stype) { result = L2CAP_CONF_UNACCEPT; if (chan->num_conf_req >= 1) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, endptr - ptr); } else { /* Send PENDING Conf Rsp */ result = L2CAP_CONF_PENDING; set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); } } switch (rfc.mode) { case L2CAP_MODE_BASIC: chan->fcs = L2CAP_FCS_NONE; set_bit(CONF_MODE_DONE, &chan->conf_state); break; case L2CAP_MODE_ERTM: if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) chan->remote_tx_win = rfc.txwin_size; else rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; chan->remote_max_tx = rfc.max_transmit; size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); chan->remote_mps = size; __l2cap_set_ertm_timeouts(chan, &rfc); set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); if (remote_efs && test_bit(FLAG_EFS_ENABLE, &chan->flags)) { chan->remote_id = efs.id; chan->remote_stype = efs.stype; chan->remote_msdu = le16_to_cpu(efs.msdu); chan->remote_flush_to = le32_to_cpu(efs.flush_to); chan->remote_acc_lat = le32_to_cpu(efs.acc_lat); chan->remote_sdu_itime = le32_to_cpu(efs.sdu_itime); l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, endptr - ptr); } break; case L2CAP_MODE_STREAMING: size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); chan->remote_mps = size; set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; default: result = L2CAP_CONF_UNACCEPT; memset(&rfc, 0, sizeof(rfc)); rfc.mode = chan->mode; } if (result == L2CAP_CONF_SUCCESS) set_bit(CONF_OUTPUT_DONE, &chan->conf_state); } rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, size_t size, u16 *result) { struct l2cap_conf_req *req = data; void *ptr = req->data; void *endptr = data + size; int type, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; struct l2cap_conf_efs efs; BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); if (len < 0) break; switch (type) { case L2CAP_CONF_MTU: if (olen != 2) break; if (val < L2CAP_DEFAULT_MIN_MTU) { *result = L2CAP_CONF_UNACCEPT; chan->imtu = L2CAP_DEFAULT_MIN_MTU; } else chan->imtu = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr); break; case L2CAP_CONF_FLUSH_TO: if (olen != 2) break; chan->flush_to = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, chan->flush_to, endptr - ptr); break; case L2CAP_CONF_RFC: if (olen != sizeof(rfc)) break; memcpy(&rfc, (void *)val, olen); if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && rfc.mode != chan->mode) return -ECONNREFUSED; chan->fcs = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; case L2CAP_CONF_EWS: if (olen != 2) break; chan->ack_win = min_t(u16, val, chan->ack_win); l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, chan->tx_win, endptr - ptr); break; case L2CAP_CONF_EFS: if (olen != sizeof(efs)) break; memcpy(&efs, (void *)val, olen); if (chan->local_stype != L2CAP_SERV_NOTRAFIC && efs.stype != L2CAP_SERV_NOTRAFIC && efs.stype != chan->local_stype) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, endptr - ptr); break; case L2CAP_CONF_FCS: if (olen != 1) break; if (*result == L2CAP_CONF_PENDING) if (val == L2CAP_FCS_NONE) set_bit(CONF_RECV_NO_FCS, &chan->conf_state); break; } } if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) return -ECONNREFUSED; chan->mode = rfc.mode; if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) chan->ack_win = min_t(u16, chan->ack_win, rfc.txwin_size); if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { chan->local_msdu = le16_to_cpu(efs.msdu); chan->local_sdu_itime = le32_to_cpu(efs.sdu_itime); chan->local_acc_lat = le32_to_cpu(efs.acc_lat); chan->local_flush_to = le32_to_cpu(efs.flush_to); } break; case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } } req->dcid = cpu_to_le16(chan->dcid); req->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; BT_DBG("chan %p", chan); rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(flags); return ptr - data; } void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) { struct l2cap_le_conn_rsp rsp; struct l2cap_conn *conn = chan->conn; BT_DBG("chan %p", chan); rsp.dcid = cpu_to_le16(chan->scid); rsp.mtu = cpu_to_le16(chan->imtu); rsp.mps = cpu_to_le16(chan->mps); rsp.credits = cpu_to_le16(chan->rx_credits); rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); } static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) { int *result = data; if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; switch (chan->state) { case BT_CONNECT2: /* If channel still pending accept add to result */ (*result)++; return; case BT_CONNECTED: return; default: /* If not connected or pending accept it has been refused */ *result = -ECONNREFUSED; return; } } struct l2cap_ecred_rsp_data { struct { struct l2cap_ecred_conn_rsp_hdr rsp; __le16 scid[L2CAP_ECRED_MAX_CID]; } __packed pdu; int count; }; static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) { struct l2cap_ecred_rsp_data *rsp = data; struct l2cap_ecred_conn_rsp *rsp_flex = container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr); if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; /* Reset ident so only one response is sent */ chan->ident = 0; /* Include all channels pending with the same ident */ if (!rsp->pdu.rsp.result) rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid); else l2cap_chan_del(chan, ECONNRESET); } void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_ecred_rsp_data data; u16 id = chan->ident; int result = 0; if (!id) return; BT_DBG("chan %p id %d", chan, id); memset(&data, 0, sizeof(data)); data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); data.pdu.rsp.mps = cpu_to_le16(chan->mps); data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); /* Verify that all channels are ready */ __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result); if (result > 0) return; if (result < 0) data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); /* Build response */ __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data); l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP, sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), &data.pdu); } void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) { struct l2cap_conn_rsp rsp; struct l2cap_conn *conn = chan->conn; u8 buf[128]; u8 rsp_code; rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); rsp_code = L2CAP_CONN_RSP; BT_DBG("chan %p rsp_code %u", chan, rsp_code); l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) return; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) { int type, olen; unsigned long val; /* Use sane default values in case a misbehaving remote device * did not send an RFC or extended window size option. */ u16 txwin_ext = chan->ack_win; struct l2cap_conf_rfc rfc = { .mode = chan->mode, .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), .max_pdu_size = cpu_to_le16(chan->imtu), .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), }; BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) return; while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); if (len < 0) break; switch (type) { case L2CAP_CONF_RFC: if (olen != sizeof(rfc)) break; memcpy(&rfc, (void *)val, olen); break; case L2CAP_CONF_EWS: if (olen != 2) break; txwin_ext = val; break; } } switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); else chan->ack_win = min_t(u16, chan->ack_win, rfc.txwin_size); break; case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } } static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; if (cmd_len < sizeof(*rej)) return -EPROTO; if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) return 0; if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && cmd->ident == conn->info_ident) { cancel_delayed_work(&conn->info_timer); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } return 0; } static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data, u8 rsp_code) { struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_rsp rsp; struct l2cap_chan *chan = NULL, *pchan = NULL; int result, status = L2CAP_CS_NO_INFO; u16 dcid = 0, scid = __le16_to_cpu(req->scid); __le16 psm = req->psm; BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, ACL_LINK); if (!pchan) { result = L2CAP_CR_BAD_PSM; goto response; } mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && !hci_conn_check_link_mode(conn->hcon)) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; } result = L2CAP_CR_NO_MEM; /* Check for valid dynamic CID range (as per Erratum 3253) */ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) { result = L2CAP_CR_INVALID_SCID; goto response; } /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { result = L2CAP_CR_SCID_IN_USE; goto response; } chan = pchan->ops->new_connection(pchan); if (!chan) goto response; /* For certain devices (ex: HID mouse), support for authentication, * pairing and bonding is optional. For such devices, inorder to avoid * the ACL alive for too long after L2CAP disconnection, reset the ACL * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. */ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; bacpy(&chan->src, &conn->hcon->src); bacpy(&chan->dst, &conn->hcon->dst); chan->src_type = bdaddr_src_type(conn->hcon); chan->dst_type = bdaddr_dst_type(conn->hcon); chan->psm = psm; chan->dcid = scid; __l2cap_chan_add(conn, chan); dcid = chan->scid; __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); chan->ident = cmd->ident; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { if (l2cap_chan_check_security(chan, false)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHOR_PEND; chan->ops->defer(chan); } else { l2cap_state_change(chan, BT_CONFIG); result = L2CAP_CR_SUCCESS; status = L2CAP_CS_NO_INFO; } } else { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHEN_PEND; } } else { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_NO_INFO; } response: rsp.scid = cpu_to_le16(scid); rsp.dcid = cpu_to_le16(dcid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(status); l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); if (!pchan) return; if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(info), &info); } if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && result == L2CAP_CR_SUCCESS) { u8 buf[128]; set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); } static int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { if (cmd_len < sizeof(struct l2cap_conn_req)) return -EPROTO; l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP); return 0; } static int l2cap_connect_create_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; u16 scid, dcid, result, status; struct l2cap_chan *chan; u8 req[128]; int err; if (cmd_len < sizeof(*rsp)) return -EPROTO; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); result = __le16_to_cpu(rsp->result); status = __le16_to_cpu(rsp->status); if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || dcid > L2CAP_CID_DYN_END)) return -EPROTO; BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); mutex_lock(&conn->chan_lock); if (scid) { chan = __l2cap_get_chan_by_scid(conn, scid); if (!chan) { err = -EBADSLT; goto unlock; } } else { chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) { err = -EBADSLT; goto unlock; } } chan = l2cap_chan_hold_unless_zero(chan); if (!chan) { err = -EBADSLT; goto unlock; } err = 0; l2cap_chan_lock(chan); switch (result) { case L2CAP_CR_SUCCESS: if (__l2cap_get_chan_by_dcid(conn, dcid)) { err = -EBADSLT; break; } l2cap_state_change(chan, BT_CONFIG); chan->ident = 0; chan->dcid = dcid; clear_bit(CONF_CONNECT_PEND, &chan->conf_state); if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) break; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, req, sizeof(req)), req); chan->num_conf_req++; break; case L2CAP_CR_PEND: set_bit(CONF_CONNECT_PEND, &chan->conf_state); break; default: l2cap_chan_del(chan, ECONNREFUSED); break; } l2cap_chan_unlock(chan); l2cap_chan_put(chan); unlock: mutex_unlock(&conn->chan_lock); return err; } static inline void set_default_fcs(struct l2cap_chan *chan) { /* FCS is enabled only in ERTM or streaming mode, if one or both * sides request it. */ if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) chan->fcs = L2CAP_FCS_NONE; else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) chan->fcs = L2CAP_FCS_CRC16; } static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, u8 ident, u16 flags) { struct l2cap_conn *conn = chan->conn; BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, flags); clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); set_bit(CONF_OUTPUT_DONE, &chan->conf_state); l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, data, L2CAP_CONF_SUCCESS, flags), data); } static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, u16 scid, u16 dcid) { struct l2cap_cmd_rej_cid rej; rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); rej.scid = __cpu_to_le16(scid); rej.dcid = __cpu_to_le16(dcid); l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; u16 dcid, flags; u8 rsp[64]; struct l2cap_chan *chan; int len, err = 0; if (cmd_len < sizeof(*req)) return -EPROTO; dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); chan = l2cap_get_chan_by_scid(conn, dcid); if (!chan) { cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); return 0; } if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && chan->state != BT_CONNECTED) { cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); goto unlock; } /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); if (chan->conf_len + len > sizeof(chan->conf_req)) { l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_REJECT, flags), rsp); goto unlock; } /* Store config. */ memcpy(chan->conf_req + chan->conf_len, req->data, len); chan->conf_len += len; if (flags & L2CAP_CONF_FLAG_CONTINUATION) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_SUCCESS, flags), rsp); goto unlock; } /* Complete config. */ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp)); if (len < 0) { l2cap_send_disconn_req(chan, ECONNRESET); goto unlock; } chan->ident = cmd->ident; l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) chan->num_conf_rsp++; /* Reset config buffer. */ chan->conf_len = 0; if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) goto unlock; if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); if (err < 0) l2cap_send_disconn_req(chan, -err); else l2cap_chan_ready(chan); goto unlock; } if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { u8 buf[64]; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } /* Got Conf Rsp PENDING from remote side and assume we sent Conf Rsp PENDING in the code above */ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { /* check compatibility */ /* Send rsp for BR/EDR channel */ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); } unlock: l2cap_chan_unlock(chan); l2cap_chan_put(chan); return err; } static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; u16 scid, flags, result; struct l2cap_chan *chan; int len = cmd_len - sizeof(*rsp); int err = 0; if (cmd_len < sizeof(*rsp)) return -EPROTO; scid = __le16_to_cpu(rsp->scid); flags = __le16_to_cpu(rsp->flags); result = __le16_to_cpu(rsp->result); BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, result, len); chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) return 0; switch (result) { case L2CAP_CONF_SUCCESS: l2cap_conf_rfc_get(chan, rsp->data, len); clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); break; case L2CAP_CONF_PENDING: set_bit(CONF_REM_CONF_PEND, &chan->conf_state); if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { char buf[64]; len = l2cap_parse_conf_rsp(chan, rsp->data, len, buf, sizeof(buf), &result); if (len < 0) { l2cap_send_disconn_req(chan, ECONNRESET); goto done; } l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0); } goto done; case L2CAP_CONF_UNKNOWN: case L2CAP_CONF_UNACCEPT: if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { char req[64]; if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { l2cap_send_disconn_req(chan, ECONNRESET); goto done; } /* throw out any old stored conf requests */ result = L2CAP_CONF_SUCCESS; len = l2cap_parse_conf_rsp(chan, rsp->data, len, req, sizeof(req), &result); if (len < 0) { l2cap_send_disconn_req(chan, ECONNRESET); goto done; } l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, len, req); chan->num_conf_req++; if (result != L2CAP_CONF_SUCCESS) goto done; break; } fallthrough; default: l2cap_chan_set_err(chan, ECONNRESET); __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); l2cap_send_disconn_req(chan, ECONNRESET); goto done; } if (flags & L2CAP_CONF_FLAG_CONTINUATION) goto done; set_bit(CONF_INPUT_DONE, &chan->conf_state); if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); if (err < 0) l2cap_send_disconn_req(chan, -err); else l2cap_chan_ready(chan); } done: l2cap_chan_unlock(chan); l2cap_chan_put(chan); return err; } static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; struct l2cap_disconn_rsp rsp; u16 dcid, scid; struct l2cap_chan *chan; if (cmd_len != sizeof(*req)) return -EPROTO; scid = __le16_to_cpu(req->scid); dcid = __le16_to_cpu(req->dcid); BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); chan = l2cap_get_chan_by_scid(conn, dcid); if (!chan) { cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); return 0; } rsp.dcid = cpu_to_le16(chan->scid); rsp.scid = cpu_to_le16(chan->dcid); l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); chan->ops->set_shutdown(chan); l2cap_chan_unlock(chan); mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNRESET); mutex_unlock(&conn->chan_lock); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; u16 dcid, scid; struct l2cap_chan *chan; if (cmd_len != sizeof(*rsp)) return -EPROTO; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) { return 0; } if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } l2cap_chan_unlock(chan); mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); l2cap_chan_del(chan, 0); mutex_unlock(&conn->chan_lock); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_info_req *req = (struct l2cap_info_req *) data; u16 type; if (cmd_len != sizeof(*req)) return -EPROTO; type = __le16_to_cpu(req->type); BT_DBG("type 0x%4.4x", type); if (type == L2CAP_IT_FEAT_MASK) { u8 buf[8]; u32 feat_mask = l2cap_feat_mask; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; put_unaligned_le32(feat_mask, rsp->data); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else if (type == L2CAP_IT_FIXED_CHAN) { u8 buf[12]; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); rsp->data[0] = conn->local_fixed_chan; memset(rsp->data + 1, 0, 7); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { struct l2cap_info_rsp rsp; rsp.type = cpu_to_le16(type); rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); } return 0; } static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; u16 type, result; if (cmd_len < sizeof(*rsp)) return -EPROTO; type = __le16_to_cpu(rsp->type); result = __le16_to_cpu(rsp->result); BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); /* L2CAP Info req/rsp are unbound to channels, add extra checks */ if (cmd->ident != conn->info_ident || conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) return 0; cancel_delayed_work(&conn->info_timer); if (result != L2CAP_IR_SUCCESS) { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); return 0; } switch (type) { case L2CAP_IT_FEAT_MASK: conn->feat_mask = get_unaligned_le32(rsp->data); if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { struct l2cap_info_req req; req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); conn->info_ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } else { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } break; case L2CAP_IT_FIXED_CHAN: conn->remote_fixed_chan = rsp->data[0]; conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); break; } return 0; } static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct hci_conn *hcon = conn->hcon; struct l2cap_conn_param_update_req *req; struct l2cap_conn_param_update_rsp rsp; u16 min, max, latency, to_multiplier; int err; if (hcon->role != HCI_ROLE_MASTER) return -EINVAL; if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) return -EPROTO; req = (struct l2cap_conn_param_update_req *) data; min = __le16_to_cpu(req->min); max = __le16_to_cpu(req->max); latency = __le16_to_cpu(req->latency); to_multiplier = __le16_to_cpu(req->to_multiplier); BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", min, max, latency, to_multiplier); memset(&rsp, 0, sizeof(rsp)); err = hci_check_conn_params(min, max, latency, to_multiplier); if (err) rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, sizeof(rsp), &rsp); if (!err) { u8 store_hint; store_hint = hci_le_conn_update(hcon, min, max, latency, to_multiplier); mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type, store_hint, min, max, latency, to_multiplier); } return 0; } static int l2cap_le_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; struct hci_conn *hcon = conn->hcon; u16 dcid, mtu, mps, credits, result; struct l2cap_chan *chan; int err, sec_level; if (cmd_len < sizeof(*rsp)) return -EPROTO; dcid = __le16_to_cpu(rsp->dcid); mtu = __le16_to_cpu(rsp->mtu); mps = __le16_to_cpu(rsp->mps); credits = __le16_to_cpu(rsp->credits); result = __le16_to_cpu(rsp->result); if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 || dcid < L2CAP_CID_DYN_START || dcid > L2CAP_CID_LE_DYN_END)) return -EPROTO; BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", dcid, mtu, mps, credits, result); mutex_lock(&conn->chan_lock); chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) { err = -EBADSLT; goto unlock; } err = 0; l2cap_chan_lock(chan); switch (result) { case L2CAP_CR_LE_SUCCESS: if (__l2cap_get_chan_by_dcid(conn, dcid)) { err = -EBADSLT; break; } chan->ident = 0; chan->dcid = dcid; chan->omtu = mtu; chan->remote_mps = mps; chan->tx_credits = credits; l2cap_chan_ready(chan); break; case L2CAP_CR_LE_AUTHENTICATION: case L2CAP_CR_LE_ENCRYPTION: /* If we already have MITM protection we can't do * anything. */ if (hcon->sec_level > BT_SECURITY_MEDIUM) { l2cap_chan_del(chan, ECONNREFUSED); break; } sec_level = hcon->sec_level + 1; if (chan->sec_level < sec_level) chan->sec_level = sec_level; /* We'll need to send a new Connect Request */ clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); smp_conn_security(hcon, chan->sec_level); break; default: l2cap_chan_del(chan, ECONNREFUSED); break; } l2cap_chan_unlock(chan); unlock: mutex_unlock(&conn->chan_lock); return err; } static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { int err = 0; switch (cmd->code) { case L2CAP_COMMAND_REJ: l2cap_command_rej(conn, cmd, cmd_len, data); break; case L2CAP_CONN_REQ: err = l2cap_connect_req(conn, cmd, cmd_len, data); break; case L2CAP_CONN_RSP: l2cap_connect_create_rsp(conn, cmd, cmd_len, data); break; case L2CAP_CONF_REQ: err = l2cap_config_req(conn, cmd, cmd_len, data); break; case L2CAP_CONF_RSP: l2cap_config_rsp(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_REQ: err = l2cap_disconnect_req(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_RSP: l2cap_disconnect_rsp(conn, cmd, cmd_len, data); break; case L2CAP_ECHO_REQ: l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); break; case L2CAP_ECHO_RSP: break; case L2CAP_INFO_REQ: err = l2cap_information_req(conn, cmd, cmd_len, data); break; case L2CAP_INFO_RSP: l2cap_information_rsp(conn, cmd, cmd_len, data); break; default: BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); err = -EINVAL; break; } return err; } static int l2cap_le_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; struct l2cap_le_conn_rsp rsp; struct l2cap_chan *chan, *pchan; u16 dcid, scid, credits, mtu, mps; __le16 psm; u8 result; if (cmd_len != sizeof(*req)) return -EPROTO; scid = __le16_to_cpu(req->scid); mtu = __le16_to_cpu(req->mtu); mps = __le16_to_cpu(req->mps); psm = req->psm; dcid = 0; credits = 0; if (mtu < 23 || mps < 23) return -EPROTO; BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), scid, mtu, mps); /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A * page 1059: * * Valid range: 0x0001-0x00ff * * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges */ if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { result = L2CAP_CR_LE_BAD_PSM; chan = NULL; goto response; } /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, LE_LINK); if (!pchan) { result = L2CAP_CR_LE_BAD_PSM; chan = NULL; goto response; } mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, SMP_ALLOW_STK)) { result = L2CAP_CR_LE_AUTHENTICATION; chan = NULL; goto response_unlock; } /* Check for valid dynamic CID range */ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { result = L2CAP_CR_LE_INVALID_SCID; chan = NULL; goto response_unlock; } /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { result = L2CAP_CR_LE_SCID_IN_USE; chan = NULL; goto response_unlock; } chan = pchan->ops->new_connection(pchan); if (!chan) { result = L2CAP_CR_LE_NO_MEM; goto response_unlock; } bacpy(&chan->src, &conn->hcon->src); bacpy(&chan->dst, &conn->hcon->dst); chan->src_type = bdaddr_src_type(conn->hcon); chan->dst_type = bdaddr_dst_type(conn->hcon); chan->psm = psm; chan->dcid = scid; chan->omtu = mtu; chan->remote_mps = mps; __l2cap_chan_add(conn, chan); l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits)); dcid = chan->scid; credits = chan->rx_credits; __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); chan->ident = cmd->ident; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); /* The following result value is actually not defined * for LE CoC but we use it to let the function know * that it should bail out after doing its cleanup * instead of sending a response. */ result = L2CAP_CR_PEND; chan->ops->defer(chan); } else { l2cap_chan_ready(chan); result = L2CAP_CR_LE_SUCCESS; } response_unlock: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); if (result == L2CAP_CR_PEND) return 0; response: if (chan) { rsp.mtu = cpu_to_le16(chan->imtu); rsp.mps = cpu_to_le16(chan->mps); } else { rsp.mtu = 0; rsp.mps = 0; } rsp.dcid = cpu_to_le16(dcid); rsp.credits = cpu_to_le16(credits); rsp.result = cpu_to_le16(result); l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); return 0; } static inline int l2cap_le_credits(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_le_credits *pkt; struct l2cap_chan *chan; u16 cid, credits, max_credits; if (cmd_len != sizeof(*pkt)) return -EPROTO; pkt = (struct l2cap_le_credits *) data; cid = __le16_to_cpu(pkt->cid); credits = __le16_to_cpu(pkt->credits); BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); chan = l2cap_get_chan_by_dcid(conn, cid); if (!chan) return -EBADSLT; max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; if (credits > max_credits) { BT_ERR("LE credits overflow"); l2cap_send_disconn_req(chan, ECONNRESET); /* Return 0 so that we don't trigger an unnecessary * command reject packet. */ goto unlock; } chan->tx_credits += credits; /* Resume sending */ l2cap_le_flowctl_send(chan); if (chan->tx_credits) chan->ops->resume(chan); unlock: l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_ecred_conn_req *req = (void *) data; DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID); struct l2cap_chan *chan, *pchan; u16 mtu, mps; __le16 psm; u8 result, len = 0; int i, num_scid; bool defer = false; if (!enable_ecred) return -EINVAL; if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { result = L2CAP_CR_LE_INVALID_PARAMS; goto response; } cmd_len -= sizeof(*req); num_scid = cmd_len / sizeof(u16); if (num_scid > L2CAP_ECRED_MAX_CID) { result = L2CAP_CR_LE_INVALID_PARAMS; goto response; } mtu = __le16_to_cpu(req->mtu); mps = __le16_to_cpu(req->mps); if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { result = L2CAP_CR_LE_UNACCEPT_PARAMS; goto response; } psm = req->psm; /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A * page 1059: * * Valid range: 0x0001-0x00ff * * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges */ if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { result = L2CAP_CR_LE_BAD_PSM; goto response; } BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); memset(pdu, 0, sizeof(*pdu)); /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, LE_LINK); if (!pchan) { result = L2CAP_CR_LE_BAD_PSM; goto response; } mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, SMP_ALLOW_STK)) { result = L2CAP_CR_LE_AUTHENTICATION; goto unlock; } result = L2CAP_CR_LE_SUCCESS; for (i = 0; i < num_scid; i++) { u16 scid = __le16_to_cpu(req->scid[i]); BT_DBG("scid[%d] 0x%4.4x", i, scid); pdu->dcid[i] = 0x0000; len += sizeof(*pdu->dcid); /* Check for valid dynamic CID range */ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { result = L2CAP_CR_LE_INVALID_SCID; continue; } /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { result = L2CAP_CR_LE_SCID_IN_USE; continue; } chan = pchan->ops->new_connection(pchan); if (!chan) { result = L2CAP_CR_LE_NO_MEM; continue; } bacpy(&chan->src, &conn->hcon->src); bacpy(&chan->dst, &conn->hcon->dst); chan->src_type = bdaddr_src_type(conn->hcon); chan->dst_type = bdaddr_dst_type(conn->hcon); chan->psm = psm; chan->dcid = scid; chan->omtu = mtu; chan->remote_mps = mps; __l2cap_chan_add(conn, chan); l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); /* Init response */ if (!pdu->credits) { pdu->mtu = cpu_to_le16(chan->imtu); pdu->mps = cpu_to_le16(chan->mps); pdu->credits = cpu_to_le16(chan->rx_credits); } pdu->dcid[i] = cpu_to_le16(chan->scid); __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); chan->ident = cmd->ident; chan->mode = L2CAP_MODE_EXT_FLOWCTL; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); defer = true; chan->ops->defer(chan); } else { l2cap_chan_ready(chan); } } unlock: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); response: pdu->result = cpu_to_le16(result); if (defer) return 0; l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, sizeof(*pdu) + len, pdu); return 0; } static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_ecred_conn_rsp *rsp = (void *) data; struct hci_conn *hcon = conn->hcon; u16 mtu, mps, credits, result; struct l2cap_chan *chan, *tmp; int err = 0, sec_level; int i = 0; if (cmd_len < sizeof(*rsp)) return -EPROTO; mtu = __le16_to_cpu(rsp->mtu); mps = __le16_to_cpu(rsp->mps); credits = __le16_to_cpu(rsp->credits); result = __le16_to_cpu(rsp->result); BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, result); mutex_lock(&conn->chan_lock); cmd_len -= sizeof(*rsp); list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { u16 dcid; if (chan->ident != cmd->ident || chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state == BT_CONNECTED) continue; l2cap_chan_lock(chan); /* Check that there is a dcid for each pending channel */ if (cmd_len < sizeof(dcid)) { l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); continue; } dcid = __le16_to_cpu(rsp->dcid[i++]); cmd_len -= sizeof(u16); BT_DBG("dcid[%d] 0x%4.4x", i, dcid); /* Check if dcid is already in use */ if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) { /* If a device receives a * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an * already-assigned Destination CID, then both the * original channel and the new channel shall be * immediately discarded and not used. */ l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); chan = __l2cap_get_chan_by_dcid(conn, dcid); l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNRESET); l2cap_chan_unlock(chan); continue; } switch (result) { case L2CAP_CR_LE_AUTHENTICATION: case L2CAP_CR_LE_ENCRYPTION: /* If we already have MITM protection we can't do * anything. */ if (hcon->sec_level > BT_SECURITY_MEDIUM) { l2cap_chan_del(chan, ECONNREFUSED); break; } sec_level = hcon->sec_level + 1; if (chan->sec_level < sec_level) chan->sec_level = sec_level; /* We'll need to send a new Connect Request */ clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags); smp_conn_security(hcon, chan->sec_level); break; case L2CAP_CR_LE_BAD_PSM: l2cap_chan_del(chan, ECONNREFUSED); break; default: /* If dcid was not set it means channels was refused */ if (!dcid) { l2cap_chan_del(chan, ECONNREFUSED); break; } chan->ident = 0; chan->dcid = dcid; chan->omtu = mtu; chan->remote_mps = mps; chan->tx_credits = credits; l2cap_chan_ready(chan); break; } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); return err; } static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_ecred_reconf_req *req = (void *) data; struct l2cap_ecred_reconf_rsp rsp; u16 mtu, mps, result; struct l2cap_chan *chan; int i, num_scid; if (!enable_ecred) return -EINVAL; if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { result = L2CAP_CR_LE_INVALID_PARAMS; goto respond; } mtu = __le16_to_cpu(req->mtu); mps = __le16_to_cpu(req->mps); BT_DBG("mtu %u mps %u", mtu, mps); if (mtu < L2CAP_ECRED_MIN_MTU) { result = L2CAP_RECONF_INVALID_MTU; goto respond; } if (mps < L2CAP_ECRED_MIN_MPS) { result = L2CAP_RECONF_INVALID_MPS; goto respond; } cmd_len -= sizeof(*req); num_scid = cmd_len / sizeof(u16); result = L2CAP_RECONF_SUCCESS; for (i = 0; i < num_scid; i++) { u16 scid; scid = __le16_to_cpu(req->scid[i]); if (!scid) return -EPROTO; chan = __l2cap_get_chan_by_dcid(conn, scid); if (!chan) continue; /* If the MTU value is decreased for any of the included * channels, then the receiver shall disconnect all * included channels. */ if (chan->omtu > mtu) { BT_ERR("chan %p decreased MTU %u -> %u", chan, chan->omtu, mtu); result = L2CAP_RECONF_INVALID_MTU; } chan->omtu = mtu; chan->remote_mps = mps; } respond: rsp.result = cpu_to_le16(result); l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp), &rsp); return 0; } static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_chan *chan, *tmp; struct l2cap_ecred_conn_rsp *rsp = (void *) data; u16 result; if (cmd_len < sizeof(*rsp)) return -EPROTO; result = __le16_to_cpu(rsp->result); BT_DBG("result 0x%4.4x", rsp->result); if (!result) return 0; list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { if (chan->ident != cmd->ident) continue; l2cap_chan_del(chan, ECONNRESET); } return 0; } static inline int l2cap_le_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; struct l2cap_chan *chan; if (cmd_len < sizeof(*rej)) return -EPROTO; mutex_lock(&conn->chan_lock); chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) goto done; chan = l2cap_chan_hold_unless_zero(chan); if (!chan) goto done; l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); l2cap_chan_put(chan); done: mutex_unlock(&conn->chan_lock); return 0; } static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { int err = 0; switch (cmd->code) { case L2CAP_COMMAND_REJ: l2cap_le_command_rej(conn, cmd, cmd_len, data); break; case L2CAP_CONN_PARAM_UPDATE_REQ: err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); break; case L2CAP_CONN_PARAM_UPDATE_RSP: break; case L2CAP_LE_CONN_RSP: l2cap_le_connect_rsp(conn, cmd, cmd_len, data); break; case L2CAP_LE_CONN_REQ: err = l2cap_le_connect_req(conn, cmd, cmd_len, data); break; case L2CAP_LE_CREDITS: err = l2cap_le_credits(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_CONN_REQ: err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_CONN_RSP: err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_RECONF_REQ: err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_RECONF_RSP: err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_REQ: err = l2cap_disconnect_req(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_RSP: l2cap_disconnect_rsp(conn, cmd, cmd_len, data); break; default: BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); err = -EINVAL; break; } return err; } static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; struct l2cap_cmd_hdr *cmd; u16 len; int err; if (hcon->type != LE_LINK) goto drop; if (skb->len < L2CAP_CMD_HDR_SIZE) goto drop; cmd = (void *) skb->data; skb_pull(skb, L2CAP_CMD_HDR_SIZE); len = le16_to_cpu(cmd->len); BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); if (len != skb->len || !cmd->ident) { BT_DBG("corrupted command"); goto drop; } err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); if (err) { struct l2cap_cmd_rej_unk rej; BT_ERR("Wrong link type (%d)", err); rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } drop: kfree_skb(skb); } static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident) { struct l2cap_cmd_rej_unk rej; rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; struct l2cap_cmd_hdr *cmd; int err; l2cap_raw_recv(conn, skb); if (hcon->type != ACL_LINK) goto drop; while (skb->len >= L2CAP_CMD_HDR_SIZE) { u16 len; cmd = (void *) skb->data; skb_pull(skb, L2CAP_CMD_HDR_SIZE); len = le16_to_cpu(cmd->len); BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); if (len > skb->len || !cmd->ident) { BT_DBG("corrupted command"); l2cap_sig_send_rej(conn, cmd->ident); skb_pull(skb, len > skb->len ? skb->len : len); continue; } err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); if (err) { BT_ERR("Wrong link type (%d)", err); l2cap_sig_send_rej(conn, cmd->ident); } skb_pull(skb, len); } if (skb->len > 0) { BT_DBG("corrupted command"); l2cap_sig_send_rej(conn, 0); } drop: kfree_skb(skb); } static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) { u16 our_fcs, rcv_fcs; int hdr_size; if (test_bit(FLAG_EXT_CTRL, &chan->flags)) hdr_size = L2CAP_EXT_HDR_SIZE; else hdr_size = L2CAP_ENH_HDR_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) { skb_trim(skb, skb->len - L2CAP_FCS_SIZE); rcv_fcs = get_unaligned_le16(skb->data + skb->len); our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); if (our_fcs != rcv_fcs) return -EBADMSG; } return 0; } static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { struct l2cap_ctrl control; BT_DBG("chan %p", chan); memset(&control, 0, sizeof(control)); control.sframe = 1; control.final = 1; control.reqseq = chan->buffer_seq; set_bit(CONN_SEND_FBIT, &chan->conn_state); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control.super = L2CAP_SUPER_RNR; l2cap_send_sframe(chan, &control); } if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames > 0) __set_retrans_timer(chan); /* Send pending iframes */ l2cap_ertm_send(chan); if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && test_bit(CONN_SEND_FBIT, &chan->conn_state)) { /* F-bit wasn't sent in an s-frame or i-frame yet, so * send it now. */ control.super = L2CAP_SUPER_RR; l2cap_send_sframe(chan, &control); } } static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, struct sk_buff **last_frag) { /* skb->len reflects data in skb as well as all fragments * skb->data_len reflects only data in fragments */ if (!skb_has_frag_list(skb)) skb_shinfo(skb)->frag_list = new_frag; new_frag->next = NULL; (*last_frag)->next = new_frag; *last_frag = new_frag; skb->len += new_frag->len; skb->data_len += new_frag->len; skb->truesize += new_frag->truesize; } static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, struct l2cap_ctrl *control) { int err = -EINVAL; switch (control->sar) { case L2CAP_SAR_UNSEGMENTED: if (chan->sdu) break; err = chan->ops->recv(chan, skb); break; case L2CAP_SAR_START: if (chan->sdu) break; if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) break; chan->sdu_len = get_unaligned_le16(skb->data); skb_pull(skb, L2CAP_SDULEN_SIZE); if (chan->sdu_len > chan->imtu) { err = -EMSGSIZE; break; } if (skb->len >= chan->sdu_len) break; chan->sdu = skb; chan->sdu_last_frag = skb; skb = NULL; err = 0; break; case L2CAP_SAR_CONTINUE: if (!chan->sdu) break; append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); skb = NULL; if (chan->sdu->len >= chan->sdu_len) break; err = 0; break; case L2CAP_SAR_END: if (!chan->sdu) break; append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); skb = NULL; if (chan->sdu->len != chan->sdu_len) break; err = chan->ops->recv(chan, chan->sdu); if (!err) { /* Reassembly complete */ chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } break; } if (err) { kfree_skb(skb); kfree_skb(chan->sdu); chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } return err; } static int l2cap_resegment(struct l2cap_chan *chan) { /* Placeholder */ return 0; } void l2cap_chan_busy(struct l2cap_chan *chan, int busy) { u8 event; if (chan->mode != L2CAP_MODE_ERTM) return; event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; l2cap_tx(chan, NULL, NULL, event); } static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) { int err = 0; /* Pass sequential frames to l2cap_reassemble_sdu() * until a gap is encountered. */ BT_DBG("chan %p", chan); while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { struct sk_buff *skb; BT_DBG("Searching for skb with txseq %d (queue len %d)", chan->buffer_seq, skb_queue_len(&chan->srej_q)); skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); if (!skb) break; skb_unlink(skb, &chan->srej_q); chan->buffer_seq = __next_seq(chan, chan->buffer_seq); err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); if (err) break; } if (skb_queue_empty(&chan->srej_q)) { chan->rx_state = L2CAP_RX_STATE_RECV; l2cap_send_ack(chan); } return err; } static void l2cap_handle_srej(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; BT_DBG("chan %p, control %p", chan, control); if (control->reqseq == chan->next_tx_seq) { BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); l2cap_send_disconn_req(chan, ECONNRESET); return; } skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); if (skb == NULL) { BT_DBG("Seq %d not available for retransmission", control->reqseq); return; } if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { BT_DBG("Retry limit exceeded (%d)", chan->max_tx); l2cap_send_disconn_req(chan, ECONNRESET); return; } clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (control->poll) { l2cap_pass_to_tx(chan, control); set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_retransmit(chan, control); l2cap_ertm_send(chan); if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { set_bit(CONN_SREJ_ACT, &chan->conn_state); chan->srej_save_reqseq = control->reqseq; } } else { l2cap_pass_to_tx_fbit(chan, control); if (control->final) { if (chan->srej_save_reqseq != control->reqseq || !test_and_clear_bit(CONN_SREJ_ACT, &chan->conn_state)) l2cap_retransmit(chan, control); } else { l2cap_retransmit(chan, control); if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { set_bit(CONN_SREJ_ACT, &chan->conn_state); chan->srej_save_reqseq = control->reqseq; } } } } static void l2cap_handle_rej(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; BT_DBG("chan %p, control %p", chan, control); if (control->reqseq == chan->next_tx_seq) { BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); l2cap_send_disconn_req(chan, ECONNRESET); return; } skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); if (chan->max_tx && skb && bt_cb(skb)->l2cap.retries >= chan->max_tx) { BT_DBG("Retry limit exceeded (%d)", chan->max_tx); l2cap_send_disconn_req(chan, ECONNRESET); return; } clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_pass_to_tx(chan, control); if (control->final) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_all(chan, control); } else { l2cap_retransmit_all(chan, control); l2cap_ertm_send(chan); if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) set_bit(CONN_REJ_ACT, &chan->conn_state); } } static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) { BT_DBG("chan %p, txseq %d", chan, txseq); BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, chan->expected_tx_seq); if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { /* See notes below regarding "double poll" and * invalid packets. */ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { BT_DBG("Invalid/Ignore - after SREJ"); return L2CAP_TXSEQ_INVALID_IGNORE; } else { BT_DBG("Invalid - in window after SREJ sent"); return L2CAP_TXSEQ_INVALID; } } if (chan->srej_list.head == txseq) { BT_DBG("Expected SREJ"); return L2CAP_TXSEQ_EXPECTED_SREJ; } if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { BT_DBG("Duplicate SREJ - txseq already stored"); return L2CAP_TXSEQ_DUPLICATE_SREJ; } if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { BT_DBG("Unexpected SREJ - not requested"); return L2CAP_TXSEQ_UNEXPECTED_SREJ; } } if (chan->expected_tx_seq == txseq) { if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { BT_DBG("Invalid - txseq outside tx window"); return L2CAP_TXSEQ_INVALID; } else { BT_DBG("Expected"); return L2CAP_TXSEQ_EXPECTED; } } if (__seq_offset(chan, txseq, chan->last_acked_seq) < __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { BT_DBG("Duplicate - expected_tx_seq later than txseq"); return L2CAP_TXSEQ_DUPLICATE; } if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { /* A source of invalid packets is a "double poll" condition, * where delays cause us to send multiple poll packets. If * the remote stack receives and processes both polls, * sequence numbers can wrap around in such a way that a * resent frame has a sequence number that looks like new data * with a sequence gap. This would trigger an erroneous SREJ * request. * * Fortunately, this is impossible with a tx window that's * less than half of the maximum sequence number, which allows * invalid frames to be safely ignored. * * With tx window sizes greater than half of the tx window * maximum, the frame is invalid and cannot be ignored. This * causes a disconnect. */ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { BT_DBG("Invalid/Ignore - txseq outside tx window"); return L2CAP_TXSEQ_INVALID_IGNORE; } else { BT_DBG("Invalid - txseq outside tx window"); return L2CAP_TXSEQ_INVALID; } } else { BT_DBG("Unexpected - txseq indicates missing frames"); return L2CAP_TXSEQ_UNEXPECTED; } } static int l2cap_rx_state_recv(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { struct l2cap_ctrl local_control; int err = 0; bool skb_in_use = false; BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, event); switch (event) { case L2CAP_EV_RECV_IFRAME: switch (l2cap_classify_txseq(chan, control->txseq)) { case L2CAP_TXSEQ_EXPECTED: l2cap_pass_to_tx(chan, control); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { BT_DBG("Busy, discarding expected seq %d", control->txseq); break; } chan->expected_tx_seq = __next_seq(chan, control->txseq); chan->buffer_seq = chan->expected_tx_seq; skb_in_use = true; /* l2cap_reassemble_sdu may free skb, hence invalidate * control, so make a copy in advance to use it after * l2cap_reassemble_sdu returns and to avoid the race * condition, for example: * * The current thread calls: * l2cap_reassemble_sdu * chan->ops->recv == l2cap_sock_recv_cb * __sock_queue_rcv_skb * Another thread calls: * bt_sock_recvmsg * skb_recv_datagram * skb_free_datagram * Then the current thread tries to access control, but * it was freed by skb_free_datagram. */ local_control = *control; err = l2cap_reassemble_sdu(chan, skb, control); if (err) break; if (local_control.final) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { local_control.final = 0; l2cap_retransmit_all(chan, &local_control); l2cap_ertm_send(chan); } } if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) l2cap_send_ack(chan); break; case L2CAP_TXSEQ_UNEXPECTED: l2cap_pass_to_tx(chan, control); /* Can't issue SREJ frames in the local busy state. * Drop this frame, it will be seen as missing * when local busy is exited. */ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { BT_DBG("Busy, discarding unexpected seq %d", control->txseq); break; } /* There was a gap in the sequence, so an SREJ * must be sent for each missing frame. The * current frame is stored for later use. */ skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); clear_bit(CONN_SREJ_ACT, &chan->conn_state); l2cap_seq_list_clear(&chan->srej_list); l2cap_send_srej(chan, control->txseq); chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; break; case L2CAP_TXSEQ_DUPLICATE: l2cap_pass_to_tx(chan, control); break; case L2CAP_TXSEQ_INVALID_IGNORE: break; case L2CAP_TXSEQ_INVALID: default: l2cap_send_disconn_req(chan, ECONNRESET); break; } break; case L2CAP_EV_RECV_RR: l2cap_pass_to_tx(chan, control); if (control->final) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { control->final = 0; l2cap_retransmit_all(chan, control); } l2cap_ertm_send(chan); } else if (control->poll) { l2cap_send_i_or_rr_or_rnr(chan); } else { if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames) __set_retrans_timer(chan); l2cap_ertm_send(chan); } break; case L2CAP_EV_RECV_RNR: set_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_pass_to_tx(chan, control); if (control && control->poll) { set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_send_rr_or_rnr(chan, 0); } __clear_retrans_timer(chan); l2cap_seq_list_clear(&chan->retrans_list); break; case L2CAP_EV_RECV_REJ: l2cap_handle_rej(chan, control); break; case L2CAP_EV_RECV_SREJ: l2cap_handle_srej(chan, control); break; default: break; } if (skb && !skb_in_use) { BT_DBG("Freeing %p", skb); kfree_skb(skb); } return err; } static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err = 0; u16 txseq = control->txseq; bool skb_in_use = false; BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, event); switch (event) { case L2CAP_EV_RECV_IFRAME: switch (l2cap_classify_txseq(chan, txseq)) { case L2CAP_TXSEQ_EXPECTED: /* Keep frame for reassembly later */ l2cap_pass_to_tx(chan, control); skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); chan->expected_tx_seq = __next_seq(chan, txseq); break; case L2CAP_TXSEQ_EXPECTED_SREJ: l2cap_seq_list_pop(&chan->srej_list); l2cap_pass_to_tx(chan, control); skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); err = l2cap_rx_queued_iframes(chan); if (err) break; break; case L2CAP_TXSEQ_UNEXPECTED: /* Got a frame that can't be reassembled yet. * Save it for later, and send SREJs to cover * the missing frames. */ skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); l2cap_pass_to_tx(chan, control); l2cap_send_srej(chan, control->txseq); break; case L2CAP_TXSEQ_UNEXPECTED_SREJ: /* This frame was requested with an SREJ, but * some expected retransmitted frames are * missing. Request retransmission of missing * SREJ'd frames. */ skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); l2cap_pass_to_tx(chan, control); l2cap_send_srej_list(chan, control->txseq); break; case L2CAP_TXSEQ_DUPLICATE_SREJ: /* We've already queued this frame. Drop this copy. */ l2cap_pass_to_tx(chan, control); break; case L2CAP_TXSEQ_DUPLICATE: /* Expecting a later sequence number, so this frame * was already received. Ignore it completely. */ break; case L2CAP_TXSEQ_INVALID_IGNORE: break; case L2CAP_TXSEQ_INVALID: default: l2cap_send_disconn_req(chan, ECONNRESET); break; } break; case L2CAP_EV_RECV_RR: l2cap_pass_to_tx(chan, control); if (control->final) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { control->final = 0; l2cap_retransmit_all(chan, control); } l2cap_ertm_send(chan); } else if (control->poll) { if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames) { __set_retrans_timer(chan); } set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_send_srej_tail(chan); } else { if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames) __set_retrans_timer(chan); l2cap_send_ack(chan); } break; case L2CAP_EV_RECV_RNR: set_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_pass_to_tx(chan, control); if (control->poll) { l2cap_send_srej_tail(chan); } else { struct l2cap_ctrl rr_control; memset(&rr_control, 0, sizeof(rr_control)); rr_control.sframe = 1; rr_control.super = L2CAP_SUPER_RR; rr_control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &rr_control); } break; case L2CAP_EV_RECV_REJ: l2cap_handle_rej(chan, control); break; case L2CAP_EV_RECV_SREJ: l2cap_handle_srej(chan, control); break; } if (skb && !skb_in_use) { BT_DBG("Freeing %p", skb); kfree_skb(skb); } return err; } static int l2cap_finish_move(struct l2cap_chan *chan) { BT_DBG("chan %p", chan); chan->rx_state = L2CAP_RX_STATE_RECV; chan->conn->mtu = chan->conn->hcon->mtu; return l2cap_resegment(chan); } static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err; BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, event); if (!control->poll) return -EPROTO; l2cap_process_reqseq(chan, control->reqseq); if (!skb_queue_empty(&chan->tx_q)) chan->tx_send_head = skb_peek(&chan->tx_q); else chan->tx_send_head = NULL; /* Rewind next_tx_seq to the point expected * by the receiver. */ chan->next_tx_seq = control->reqseq; chan->unacked_frames = 0; err = l2cap_finish_move(chan); if (err) return err; set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_send_i_or_rr_or_rnr(chan); if (event == L2CAP_EV_RECV_IFRAME) return -EPROTO; return l2cap_rx_state_recv(chan, control, NULL, event); } static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err; if (!control->final) return -EPROTO; clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->rx_state = L2CAP_RX_STATE_RECV; l2cap_process_reqseq(chan, control->reqseq); if (!skb_queue_empty(&chan->tx_q)) chan->tx_send_head = skb_peek(&chan->tx_q); else chan->tx_send_head = NULL; /* Rewind next_tx_seq to the point expected * by the receiver. */ chan->next_tx_seq = control->reqseq; chan->unacked_frames = 0; chan->conn->mtu = chan->conn->hcon->mtu; err = l2cap_resegment(chan); if (!err) err = l2cap_rx_state_recv(chan, control, skb, event); return err; } static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) { /* Make sure reqseq is for a packet that has been sent but not acked */ u16 unacked; unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; } static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err = 0; BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, control, skb, event, chan->rx_state); if (__valid_reqseq(chan, control->reqseq)) { switch (chan->rx_state) { case L2CAP_RX_STATE_RECV: err = l2cap_rx_state_recv(chan, control, skb, event); break; case L2CAP_RX_STATE_SREJ_SENT: err = l2cap_rx_state_srej_sent(chan, control, skb, event); break; case L2CAP_RX_STATE_WAIT_P: err = l2cap_rx_state_wait_p(chan, control, skb, event); break; case L2CAP_RX_STATE_WAIT_F: err = l2cap_rx_state_wait_f(chan, control, skb, event); break; default: /* shut it down */ break; } } else { BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", control->reqseq, chan->next_tx_seq, chan->expected_ack_seq); l2cap_send_disconn_req(chan, ECONNRESET); } return err; } static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb) { /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store * the txseq field in advance to use it after l2cap_reassemble_sdu * returns and to avoid the race condition, for example: * * The current thread calls: * l2cap_reassemble_sdu * chan->ops->recv == l2cap_sock_recv_cb * __sock_queue_rcv_skb * Another thread calls: * bt_sock_recvmsg * skb_recv_datagram * skb_free_datagram * Then the current thread tries to access control, but it was freed by * skb_free_datagram. */ u16 txseq = control->txseq; BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, chan->rx_state); if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { l2cap_pass_to_tx(chan, control); BT_DBG("buffer_seq %u->%u", chan->buffer_seq, __next_seq(chan, chan->buffer_seq)); chan->buffer_seq = __next_seq(chan, chan->buffer_seq); l2cap_reassemble_sdu(chan, skb, control); } else { if (chan->sdu) { kfree_skb(chan->sdu); chan->sdu = NULL; } chan->sdu_last_frag = NULL; chan->sdu_len = 0; if (skb) { BT_DBG("Freeing %p", skb); kfree_skb(skb); } } chan->last_acked_seq = txseq; chan->expected_tx_seq = __next_seq(chan, txseq); return 0; } static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; u16 len; u8 event; __unpack_control(chan, skb); len = skb->len; /* * We can just drop the corrupted I-frame here. * Receiver will miss it and start proper recovery * procedures and ask for retransmission. */ if (l2cap_check_fcs(chan, skb)) goto drop; if (!control->sframe && control->sar == L2CAP_SAR_START) len -= L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) len -= L2CAP_FCS_SIZE; if (len > chan->mps) { l2cap_send_disconn_req(chan, ECONNRESET); goto drop; } if (chan->ops->filter) { if (chan->ops->filter(chan, skb)) goto drop; } if (!control->sframe) { int err; BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", control->sar, control->reqseq, control->final, control->txseq); /* Validate F-bit - F=0 always valid, F=1 only * valid in TX WAIT_F */ if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) goto drop; if (chan->mode != L2CAP_MODE_STREAMING) { event = L2CAP_EV_RECV_IFRAME; err = l2cap_rx(chan, control, skb, event); } else { err = l2cap_stream_rx(chan, control, skb); } if (err) l2cap_send_disconn_req(chan, ECONNRESET); } else { const u8 rx_func_to_event[4] = { L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ }; /* Only I-frames are expected in streaming mode */ if (chan->mode == L2CAP_MODE_STREAMING) goto drop; BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", control->reqseq, control->final, control->poll, control->super); if (len != 0) { BT_ERR("Trailing bytes: %d in sframe", len); l2cap_send_disconn_req(chan, ECONNRESET); goto drop; } /* Validate F and P bits */ if (control->final && (control->poll || chan->tx_state != L2CAP_TX_STATE_WAIT_F)) goto drop; event = rx_func_to_event[control->super]; if (l2cap_rx(chan, control, skb, event)) l2cap_send_disconn_req(chan, ECONNRESET); } return 0; drop: kfree_skb(skb); return 0; } static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_le_credits pkt; u16 return_credits = l2cap_le_rx_credits(chan); if (chan->rx_credits >= return_credits) return; return_credits -= chan->rx_credits; BT_DBG("chan %p returning %u credits to sender", chan, return_credits); chan->rx_credits += return_credits; pkt.cid = cpu_to_le16(chan->scid); pkt.credits = cpu_to_le16(return_credits); chan->ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); } void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail) { if (chan->rx_avail == rx_avail) return; BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail); chan->rx_avail = rx_avail; if (chan->state == BT_CONNECTED) l2cap_chan_le_send_credits(chan); } static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); /* Wait recv to confirm reception before updating the credits */ err = chan->ops->recv(chan, skb); if (err < 0 && chan->rx_avail != -1) { BT_ERR("Queueing received LE L2CAP data failed"); l2cap_send_disconn_req(chan, ECONNRESET); return err; } /* Update credits whenever an SDU is received */ l2cap_chan_le_send_credits(chan); return err; } static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; if (!chan->rx_credits) { BT_ERR("No credits to receive LE L2CAP data"); l2cap_send_disconn_req(chan, ECONNRESET); return -ENOBUFS; } if (chan->imtu < skb->len) { BT_ERR("Too big LE L2CAP PDU"); return -ENOBUFS; } chan->rx_credits--; BT_DBG("chan %p: rx_credits %u -> %u", chan, chan->rx_credits + 1, chan->rx_credits); /* Update if remote had run out of credits, this should only happens * if the remote is not using the entire MPS. */ if (!chan->rx_credits) l2cap_chan_le_send_credits(chan); err = 0; if (!chan->sdu) { u16 sdu_len; sdu_len = get_unaligned_le16(skb->data); skb_pull(skb, L2CAP_SDULEN_SIZE); BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", sdu_len, skb->len, chan->imtu); if (sdu_len > chan->imtu) { BT_ERR("Too big LE L2CAP SDU length received"); err = -EMSGSIZE; goto failed; } if (skb->len > sdu_len) { BT_ERR("Too much LE L2CAP data received"); err = -EINVAL; goto failed; } if (skb->len == sdu_len) return l2cap_ecred_recv(chan, skb); chan->sdu = skb; chan->sdu_len = sdu_len; chan->sdu_last_frag = skb; /* Detect if remote is not able to use the selected MPS */ if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; /* Adjust the number of credits */ BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); chan->mps = mps_len; l2cap_chan_le_send_credits(chan); } return 0; } BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", chan->sdu->len, skb->len, chan->sdu_len); if (chan->sdu->len + skb->len > chan->sdu_len) { BT_ERR("Too much LE L2CAP data received"); err = -EINVAL; goto failed; } append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); skb = NULL; if (chan->sdu->len == chan->sdu_len) { err = l2cap_ecred_recv(chan, chan->sdu); if (!err) { chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } } failed: if (err) { kfree_skb(skb); kfree_skb(chan->sdu); chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } /* We can't return an error here since we took care of the skb * freeing internally. An error return would cause the caller to * do a double-free of the skb. */ return 0; } static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) { struct l2cap_chan *chan; chan = l2cap_get_chan_by_scid(conn, cid); if (!chan) { BT_DBG("unknown cid 0x%4.4x", cid); /* Drop packet and return */ kfree_skb(skb); return; } BT_DBG("chan %p, len %d", chan, skb->len); /* If we receive data on a fixed channel before the info req/rsp * procedure is done simply assume that the channel is supported * and mark it as ready. */ if (chan->chan_type == L2CAP_CHAN_FIXED) l2cap_chan_ready(chan); if (chan->state != BT_CONNECTED) goto drop; switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: if (l2cap_ecred_data_rcv(chan, skb) < 0) goto drop; goto done; case L2CAP_MODE_BASIC: /* If socket recv buffers overflows we drop data here * which is *bad* because L2CAP has to be reliable. * But we don't have any other choice. L2CAP doesn't * provide flow control mechanism. */ if (chan->imtu < skb->len) { BT_ERR("Dropping L2CAP data: receive buffer overflow"); goto drop; } if (!chan->ops->recv(chan, skb)) goto done; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: l2cap_data_rcv(chan, skb); goto done; default: BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); break; } drop: kfree_skb(skb); done: l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; if (hcon->type != ACL_LINK) goto free_skb; chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, ACL_LINK); if (!chan) goto free_skb; BT_DBG("chan %p, len %d", chan, skb->len); l2cap_chan_lock(chan); if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) goto drop; if (chan->imtu < skb->len) goto drop; /* Store remote BD_ADDR and PSM for msg_name */ bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); bt_cb(skb)->l2cap.psm = psm; if (!chan->ops->recv(chan, skb)) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } drop: l2cap_chan_unlock(chan); l2cap_chan_put(chan); free_skb: kfree_skb(skb); } static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_hdr *lh = (void *) skb->data; struct hci_conn *hcon = conn->hcon; u16 cid, len; __le16 psm; if (hcon->state != BT_CONNECTED) { BT_DBG("queueing pending rx skb"); skb_queue_tail(&conn->pending_rx, skb); return; } skb_pull(skb, L2CAP_HDR_SIZE); cid = __le16_to_cpu(lh->cid); len = __le16_to_cpu(lh->len); if (len != skb->len) { kfree_skb(skb); return; } /* Since we can't actively block incoming LE connections we must * at least ensure that we ignore incoming data from them. */ if (hcon->type == LE_LINK && hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst, bdaddr_dst_type(hcon))) { kfree_skb(skb); return; } BT_DBG("len %d, cid 0x%4.4x", len, cid); switch (cid) { case L2CAP_CID_SIGNALING: l2cap_sig_channel(conn, skb); break; case L2CAP_CID_CONN_LESS: psm = get_unaligned((__le16 *) skb->data); skb_pull(skb, L2CAP_PSMLEN_SIZE); l2cap_conless_channel(conn, psm, skb); break; case L2CAP_CID_LE_SIGNALING: l2cap_le_sig_channel(conn, skb); break; default: l2cap_data_channel(conn, cid, skb); break; } } static void process_pending_rx(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, pending_rx_work); struct sk_buff *skb; BT_DBG(""); while ((skb = skb_dequeue(&conn->pending_rx))) l2cap_recv_frame(conn, skb); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) { struct l2cap_conn *conn = hcon->l2cap_data; struct hci_chan *hchan; if (conn) return conn; hchan = hci_chan_create(hcon); if (!hchan) return NULL; conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) { hci_chan_del(hchan); return NULL; } kref_init(&conn->ref); hcon->l2cap_data = conn; conn->hcon = hci_conn_get(hcon); conn->hchan = hchan; BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); conn->mtu = hcon->mtu; conn->feat_mask = 0; conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && (bredr_sc_enabled(hcon->hdev) || hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; mutex_init(&conn->ident_lock); mutex_init(&conn->chan_lock); INIT_LIST_HEAD(&conn->chan_l); INIT_LIST_HEAD(&conn->users); INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); skb_queue_head_init(&conn->pending_rx); INIT_WORK(&conn->pending_rx_work, process_pending_rx); INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; return conn; } static bool is_valid_psm(u16 psm, u8 dst_type) { if (!psm) return false; if (bdaddr_type_is_le(dst_type)) return (psm <= 0x00ff); /* PSM must be odd and lsb of upper byte must be 0 */ return ((psm & 0x0101) == 0x0001); } struct l2cap_chan_data { struct l2cap_chan *chan; struct pid *pid; int count; }; static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) { struct l2cap_chan_data *d = data; struct pid *pid; if (chan == d->chan) return; if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) return; pid = chan->ops->get_peer_pid(chan); /* Only count deferred channels with the same PID/PSM */ if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) return; d->count++; } int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst, u8 dst_type, u16 timeout) { struct l2cap_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err; BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, dst, dst_type, __le16_to_cpu(psm), chan->mode); hdev = hci_get_route(dst, &chan->src, chan->src_type); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; goto done; } if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { err = -EINVAL; goto done; } if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { err = -EINVAL; goto done; } switch (chan->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_LE_FLOWCTL: break; case L2CAP_MODE_EXT_FLOWCTL: if (!enable_ecred) { err = -EOPNOTSUPP; goto done; } break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; fallthrough; default: err = -EOPNOTSUPP; goto done; } switch (chan->state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ err = 0; goto done; case BT_CONNECTED: /* Already connected */ err = -EISCONN; goto done; case BT_OPEN: case BT_BOUND: /* Can connect */ break; default: err = -EBADFD; goto done; } /* Set destination address and psm */ bacpy(&chan->dst, dst); chan->dst_type = dst_type; chan->psm = psm; chan->dcid = cid; if (bdaddr_type_is_le(dst_type)) { /* Convert from L2CAP channel address type to HCI address type */ if (dst_type == BDADDR_LE_PUBLIC) dst_type = ADDR_LE_DEV_PUBLIC; else dst_type = ADDR_LE_DEV_RANDOM; if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) hcon = hci_connect_le(hdev, dst, dst_type, false, chan->sec_level, timeout, HCI_ROLE_SLAVE, 0, 0); else hcon = hci_connect_le_scan(hdev, dst, dst_type, chan->sec_level, timeout, CONN_REASON_L2CAP_CHAN); } else { u8 auth_type = l2cap_get_auth_type(chan); hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, CONN_REASON_L2CAP_CHAN, timeout); } if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; } conn = l2cap_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto done; } if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { struct l2cap_chan_data data; data.chan = chan; data.pid = chan->ops->get_peer_pid(chan); data.count = 1; l2cap_chan_list(conn, l2cap_chan_by_pid, &data); /* Check if there isn't too many channels being connected */ if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { hci_conn_drop(hcon); err = -EPROTO; goto done; } } mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { hci_conn_drop(hcon); err = -EBUSY; goto chan_unlock; } /* Update source addr of the socket */ bacpy(&chan->src, &hcon->src); chan->src_type = bdaddr_src_type(hcon); __l2cap_chan_add(conn, chan); /* l2cap_chan_add takes its own ref so we can drop this one */ hci_conn_drop(hcon); l2cap_state_change(chan, BT_CONNECT); __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); /* Release chan->sport so that it can be reused by other * sockets (as it's only used for listening sockets). */ write_lock(&chan_list_lock); chan->sport = 0; write_unlock(&chan_list_lock); if (hcon->state == BT_CONNECTED) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { __clear_chan_timer(chan); if (l2cap_chan_check_security(chan, true)) l2cap_state_change(chan, BT_CONNECTED); } else l2cap_do_start(chan); } err = 0; chan_unlock: l2cap_chan_unlock(chan); mutex_unlock(&conn->chan_lock); done: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } EXPORT_SYMBOL_GPL(l2cap_chan_connect); static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1); pdu->mtu = cpu_to_le16(chan->imtu); pdu->mps = cpu_to_le16(chan->mps); pdu->scid[0] = cpu_to_le16(chan->scid); chan->ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ, sizeof(pdu), &pdu); } int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) { if (chan->imtu > mtu) return -EINVAL; BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); chan->imtu = mtu; l2cap_ecred_reconfigure(chan); return 0; } /* ---- L2CAP interface with lower layer (HCI) ---- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) { int exact = 0, lm1 = 0, lm2 = 0; struct l2cap_chan *c; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets and check their link_mode */ read_lock(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { if (c->state != BT_LISTEN) continue; if (!bacmp(&c->src, &hdev->bdaddr)) { lm1 |= HCI_LM_ACCEPT; if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) lm1 |= HCI_LM_MASTER; exact++; } else if (!bacmp(&c->src, BDADDR_ANY)) { lm2 |= HCI_LM_ACCEPT; if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) lm2 |= HCI_LM_MASTER; } } read_unlock(&chan_list_lock); return exact ? lm1 : lm2; } /* Find the next fixed channel in BT_LISTEN state, continue iteration * from an existing channel in the list or from the beginning of the * global list (by passing NULL as first parameter). */ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, struct hci_conn *hcon) { u8 src_type = bdaddr_src_type(hcon); read_lock(&chan_list_lock); if (c) c = list_next_entry(c, global_l); else c = list_entry(chan_list.next, typeof(*c), global_l); list_for_each_entry_from(c, &chan_list, global_l) { if (c->chan_type != L2CAP_CHAN_FIXED) continue; if (c->state != BT_LISTEN) continue; if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) continue; if (src_type != c->src_type) continue; c = l2cap_chan_hold_unless_zero(c); read_unlock(&chan_list_lock); return c; } read_unlock(&chan_list_lock); return NULL; } static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct hci_dev *hdev = hcon->hdev; struct l2cap_conn *conn; struct l2cap_chan *pchan; u8 dst_type; if (hcon->type != ACL_LINK && hcon->type != LE_LINK) return; BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (status) { l2cap_conn_del(hcon, bt_to_errno(status)); return; } conn = l2cap_conn_add(hcon); if (!conn) return; dst_type = bdaddr_dst_type(hcon); /* If device is blocked, do not create channels for it */ if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type)) return; /* Find fixed channels and notify them of the new connection. We * use multiple individual lookups, continuing each time where * we left off, because the list lock would prevent calling the * potentially sleeping l2cap_chan_lock() function. */ pchan = l2cap_global_fixed_chan(NULL, hcon); while (pchan) { struct l2cap_chan *chan, *next; /* Client fixed channels should override server ones */ if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) goto next; l2cap_chan_lock(pchan); chan = pchan->ops->new_connection(pchan); if (chan) { bacpy(&chan->src, &hcon->src); bacpy(&chan->dst, &hcon->dst); chan->src_type = bdaddr_src_type(hcon); chan->dst_type = dst_type; __l2cap_chan_add(conn, chan); } l2cap_chan_unlock(pchan); next: next = l2cap_global_fixed_chan(pchan, hcon); l2cap_chan_put(pchan); pchan = next; } l2cap_conn_ready(conn); } int l2cap_disconn_ind(struct hci_conn *hcon) { struct l2cap_conn *conn = hcon->l2cap_data; BT_DBG("hcon %p", hcon); if (!conn) return HCI_ERROR_REMOTE_USER_TERM; return conn->disc_reason; } static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { if (hcon->type != ACL_LINK && hcon->type != LE_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); l2cap_conn_del(hcon, bt_to_errno(reason)); } static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) return; if (encrypt == 0x00) { if (chan->sec_level == BT_SECURITY_MEDIUM) { __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); } else if (chan->sec_level == BT_SECURITY_HIGH || chan->sec_level == BT_SECURITY_FIPS) l2cap_chan_close(chan, ECONNREFUSED); } else { if (chan->sec_level == BT_SECURITY_MEDIUM) __clear_chan_timer(chan); } } static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan; if (!conn) return; BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, state_to_string(chan->state)); if (!status && encrypt) chan->sec_level = hcon->sec_level; if (!__l2cap_no_conn_pending(chan)) { l2cap_chan_unlock(chan); continue; } if (!status && (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)) { chan->ops->resume(chan); l2cap_check_encryption(chan, encrypt); l2cap_chan_unlock(chan); continue; } if (chan->state == BT_CONNECT) { if (!status && l2cap_check_enc_key_size(hcon)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } else if (chan->state == BT_CONNECT2 && !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || chan->mode == L2CAP_MODE_LE_FLOWCTL)) { struct l2cap_conn_rsp rsp; __u16 res, stat; if (!status && l2cap_check_enc_key_size(hcon)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; chan->ops->defer(chan); } else { l2cap_state_change(chan, BT_CONFIG); res = L2CAP_CR_SUCCESS; stat = L2CAP_CS_NO_INFO; } } else { l2cap_state_change(chan, BT_DISCONN); __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); res = L2CAP_CR_SEC_BLOCK; stat = L2CAP_CS_NO_INFO; } rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(res); rsp.status = cpu_to_le16(stat); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && res == L2CAP_CR_SUCCESS) { char buf[128]; set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); } /* Append fragment into frame respecting the maximum len of rx_skb */ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, u16 len) { if (!conn->rx_skb) { /* Allocate skb for the complete frame (with header) */ conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); if (!conn->rx_skb) return -ENOMEM; /* Init rx_len */ conn->rx_len = len; } /* Copy as much as the rx_skb can hold */ len = min_t(u16, len, skb->len); skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len); skb_pull(skb, len); conn->rx_len -= len; return len; } static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) { struct sk_buff *rx_skb; int len; /* Append just enough to complete the header */ len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); /* If header could not be read just continue */ if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) return len; rx_skb = conn->rx_skb; len = get_unaligned_le16(rx_skb->data); /* Check if rx_skb has enough space to received all fragments */ if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) { /* Update expected len */ conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); return L2CAP_LEN_SIZE; } /* Reset conn->rx_skb since it will need to be reallocated in order to * fit all fragments. */ conn->rx_skb = NULL; /* Reallocates rx_skb using the exact expected length */ len = l2cap_recv_frag(conn, rx_skb, len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); kfree_skb(rx_skb); return len; } static void l2cap_recv_reset(struct l2cap_conn *conn) { kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; } void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { struct l2cap_conn *conn = hcon->l2cap_data; int len; if (!conn) conn = l2cap_conn_add(hcon); if (!conn) goto drop; BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); switch (flags) { case ACL_START: case ACL_START_NO_FLUSH: case ACL_COMPLETE: if (conn->rx_skb) { BT_ERR("Unexpected start frame (len %d)", skb->len); l2cap_recv_reset(conn); l2cap_conn_unreliable(conn, ECOMM); } /* Start fragment may not contain the L2CAP length so just * copy the initial byte when that happens and use conn->mtu as * expected length. */ if (skb->len < L2CAP_LEN_SIZE) { l2cap_recv_frag(conn, skb, conn->mtu); break; } len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; if (len == skb->len) { /* Complete frame received */ l2cap_recv_frame(conn, skb); return; } BT_DBG("Start: total len %d, frag len %u", len, skb->len); if (skb->len > len) { BT_ERR("Frame is too long (len %u, expected len %d)", skb->len, len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Append fragment into frame (with header) */ if (l2cap_recv_frag(conn, skb, len) < 0) goto drop; break; case ACL_CONT: BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len); if (!conn->rx_skb) { BT_ERR("Unexpected continuation frame (len %d)", skb->len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Complete the L2CAP length if it has not been read */ if (conn->rx_skb->len < L2CAP_LEN_SIZE) { if (l2cap_recv_len(conn, skb) < 0) { l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Header still could not be read just continue */ if (conn->rx_skb->len < L2CAP_LEN_SIZE) break; } if (skb->len > conn->rx_len) { BT_ERR("Fragment is too long (len %u, expected %u)", skb->len, conn->rx_len); l2cap_recv_reset(conn); l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Append fragment into frame (with header) */ l2cap_recv_frag(conn, skb, skb->len); if (!conn->rx_len) { /* Complete frame received. l2cap_recv_frame * takes ownership of the skb so set the global * rx_skb pointer to NULL first. */ struct sk_buff *rx_skb = conn->rx_skb; conn->rx_skb = NULL; l2cap_recv_frame(conn, rx_skb); } break; } drop: kfree_skb(skb); } static struct hci_cb l2cap_cb = { .name = "L2CAP", .connect_cfm = l2cap_connect_cfm, .disconn_cfm = l2cap_disconn_cfm, .security_cfm = l2cap_security_cfm, }; static int l2cap_debugfs_show(struct seq_file *f, void *p) { struct l2cap_chan *c; read_lock(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", &c->src, c->src_type, &c->dst, c->dst_type, c->state, __le16_to_cpu(c->psm), c->scid, c->dcid, c->imtu, c->omtu, c->sec_level, c->mode); } read_unlock(&chan_list_lock); return 0; } DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs); static struct dentry *l2cap_debugfs; int __init l2cap_init(void) { int err; err = l2cap_init_sockets(); if (err < 0) return err; hci_register_cb(&l2cap_cb); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, NULL, &l2cap_debugfs_fops); return 0; } void l2cap_exit(void) { debugfs_remove(l2cap_debugfs); hci_unregister_cb(&l2cap_cb); l2cap_cleanup_sockets(); } module_param(disable_ertm, bool, 0644); MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); module_param(enable_ecred, bool, 0644); MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
4 3 3 2 1 3 3 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 // SPDX-License-Identifier: GPL-2.0-only /* Kernel module to match the bridge port in and * out device for IP packets coming into contact with a bridge. */ /* (C) 2001-2003 Bart De Schuymer <bdschuym@pandora.be> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/if.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter/x_tables.h> #include <uapi/linux/netfilter/xt_physdev.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); MODULE_DESCRIPTION("Xtables: Bridge physical device match"); MODULE_ALIAS("ipt_physdev"); MODULE_ALIAS("ip6t_physdev"); static bool physdev_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_physdev_info *info = par->matchinfo; const struct net_device *physdev; unsigned long ret; const char *indev, *outdev; /* Not a bridged IP packet or no info available yet: * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if * the destination device will be a bridge. */ if (!nf_bridge_info_exists(skb)) { /* Return MATCH if the invert flags of the used options are on */ if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) && !(info->invert & XT_PHYSDEV_OP_BRIDGED)) return false; if ((info->bitmask & XT_PHYSDEV_OP_ISIN) && !(info->invert & XT_PHYSDEV_OP_ISIN)) return false; if ((info->bitmask & XT_PHYSDEV_OP_ISOUT) && !(info->invert & XT_PHYSDEV_OP_ISOUT)) return false; if ((info->bitmask & XT_PHYSDEV_OP_IN) && !(info->invert & XT_PHYSDEV_OP_IN)) return false; if ((info->bitmask & XT_PHYSDEV_OP_OUT) && !(info->invert & XT_PHYSDEV_OP_OUT)) return false; return true; } physdev = nf_bridge_get_physoutdev(skb); outdev = physdev ? physdev->name : NULL; /* This only makes sense in the FORWARD and POSTROUTING chains */ if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) && (!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED))) return false; physdev = nf_bridge_get_physindev(skb, xt_net(par)); indev = physdev ? physdev->name : NULL; if ((info->bitmask & XT_PHYSDEV_OP_ISIN && (!indev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) || (info->bitmask & XT_PHYSDEV_OP_ISOUT && (!outdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT)))) return false; if (!(info->bitmask & XT_PHYSDEV_OP_IN)) goto match_outdev; if (indev) { ret = ifname_compare_aligned(indev, info->physindev, info->in_mask); if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN)) return false; } match_outdev: if (!(info->bitmask & XT_PHYSDEV_OP_OUT)) return true; if (!outdev) return false; ret = ifname_compare_aligned(outdev, info->physoutdev, info->out_mask); return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT)); } static int physdev_mt_check(const struct xt_mtchk_param *par) { const struct xt_physdev_info *info = par->matchinfo; static bool brnf_probed __read_mostly; if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || info->bitmask & ~XT_PHYSDEV_OP_MASK) return -EINVAL; if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || info->invert & XT_PHYSDEV_OP_BRIDGED) && par->hook_mask & (1 << NF_INET_LOCAL_OUT)) { pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); return -EINVAL; } if (!brnf_probed) { brnf_probed = true; request_module("br_netfilter"); } return 0; } static struct xt_match physdev_mt_reg __read_mostly = { .name = "physdev", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = physdev_mt_check, .match = physdev_mt, .matchsize = sizeof(struct xt_physdev_info), .me = THIS_MODULE, }; static int __init physdev_mt_init(void) { return xt_register_match(&physdev_mt_reg); } static void __exit physdev_mt_exit(void) { xt_unregister_match(&physdev_mt_reg); } module_init(physdev_mt_init); module_exit(physdev_mt_exit);
7 11 15 15 15 8 15 3 3 4 15 15 9 3 15 15 8 2 5 2 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 // SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/char/ttyprintk.c * * Copyright (C) 2010 Samo Pogacnik */ /* * This pseudo device allows user to make printk messages. It is possible * to store "console" messages inline with kernel messages for better analyses * of the boot process, for example. */ #include <linux/console.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/spinlock.h> struct ttyprintk_port { struct tty_port port; spinlock_t spinlock; }; static struct ttyprintk_port tpk_port; /* * Our simple preformatting supports transparent output of (time-stamped) * printk messages (also suitable for logging service): * - any cr is replaced by nl * - adds a ttyprintk source tag in front of each line * - too long message is fragmented, with '\'nl between fragments * - TPK_STR_SIZE isn't really the write_room limiting factor, because * it is emptied on the fly during preformatting. */ #define TPK_STR_SIZE 508 /* should be bigger then max expected line length */ #define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */ #define TPK_PREFIX KERN_SOH __stringify(CONFIG_TTY_PRINTK_LEVEL) static int tpk_curr; static u8 tpk_buffer[TPK_STR_SIZE + 4]; static void tpk_flush(void) { if (tpk_curr > 0) { tpk_buffer[tpk_curr] = '\0'; printk(TPK_PREFIX "[U] %s\n", tpk_buffer); tpk_curr = 0; } } static int tpk_printk(const u8 *buf, size_t count) { size_t i; for (i = 0; i < count; i++) { if (tpk_curr >= TPK_STR_SIZE) { /* end of tmp buffer reached: cut the message in two */ tpk_buffer[tpk_curr++] = '\\'; tpk_flush(); } switch (buf[i]) { case '\r': tpk_flush(); if ((i + 1) < count && buf[i + 1] == '\n') i++; break; case '\n': tpk_flush(); break; default: tpk_buffer[tpk_curr++] = buf[i]; break; } } return count; } /* * TTY operations open function. */ static int tpk_open(struct tty_struct *tty, struct file *filp) { tty->driver_data = &tpk_port; return tty_port_open(&tpk_port.port, tty, filp); } /* * TTY operations close function. */ static void tpk_close(struct tty_struct *tty, struct file *filp) { struct ttyprintk_port *tpkp = tty->driver_data; tty_port_close(&tpkp->port, tty, filp); } /* * TTY operations write function. */ static ssize_t tpk_write(struct tty_struct *tty, const u8 *buf, size_t count) { struct ttyprintk_port *tpkp = tty->driver_data; unsigned long flags; int ret; /* exclusive use of tpk_printk within this tty */ spin_lock_irqsave(&tpkp->spinlock, flags); ret = tpk_printk(buf, count); spin_unlock_irqrestore(&tpkp->spinlock, flags); return ret; } /* * TTY operations write_room function. */ static unsigned int tpk_write_room(struct tty_struct *tty) { return TPK_MAX_ROOM; } /* * TTY operations hangup function. */ static void tpk_hangup(struct tty_struct *tty) { struct ttyprintk_port *tpkp = tty->driver_data; tty_port_hangup(&tpkp->port); } /* * TTY port operations shutdown function. */ static void tpk_port_shutdown(struct tty_port *tport) { struct ttyprintk_port *tpkp = container_of(tport, struct ttyprintk_port, port); unsigned long flags; spin_lock_irqsave(&tpkp->spinlock, flags); tpk_flush(); spin_unlock_irqrestore(&tpkp->spinlock, flags); } static const struct tty_operations ttyprintk_ops = { .open = tpk_open, .close = tpk_close, .write = tpk_write, .write_room = tpk_write_room, .hangup = tpk_hangup, }; static const struct tty_port_operations tpk_port_ops = { .shutdown = tpk_port_shutdown, }; static struct tty_driver *ttyprintk_driver; static struct tty_driver *ttyprintk_console_device(struct console *c, int *index) { *index = 0; return ttyprintk_driver; } static struct console ttyprintk_console = { .name = "ttyprintk", .device = ttyprintk_console_device, }; static int __init ttyprintk_init(void) { int ret; spin_lock_init(&tpk_port.spinlock); ttyprintk_driver = tty_alloc_driver(1, TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_UNNUMBERED_NODE); if (IS_ERR(ttyprintk_driver)) return PTR_ERR(ttyprintk_driver); tty_port_init(&tpk_port.port); tpk_port.port.ops = &tpk_port_ops; ttyprintk_driver->driver_name = "ttyprintk"; ttyprintk_driver->name = "ttyprintk"; ttyprintk_driver->major = TTYAUX_MAJOR; ttyprintk_driver->minor_start = 3; ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE; ttyprintk_driver->init_termios = tty_std_termios; ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET; tty_set_operations(ttyprintk_driver, &ttyprintk_ops); tty_port_link_device(&tpk_port.port, ttyprintk_driver, 0); ret = tty_register_driver(ttyprintk_driver); if (ret < 0) { printk(KERN_ERR "Couldn't register ttyprintk driver\n"); goto error; } register_console(&ttyprintk_console); return 0; error: tty_driver_kref_put(ttyprintk_driver); tty_port_destroy(&tpk_port.port); return ret; } static void __exit ttyprintk_exit(void) { unregister_console(&ttyprintk_console); tty_unregister_driver(ttyprintk_driver); tty_driver_kref_put(ttyprintk_driver); tty_port_destroy(&tpk_port.port); } device_initcall(ttyprintk_init); module_exit(ttyprintk_exit); MODULE_DESCRIPTION("TTY driver to output user messages via printk"); MODULE_LICENSE("GPL");
13 13 13 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 // SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous * and asynchronous requests. * * - RPC header generation and argument serialization. * - Credential refresh. * - TCP connect handling. * - Retry of operation when it is suspected the operation failed because * of uid squashing on the server, or when the credentials were stale * and need to be refreshed, or when a packet was damaged in transit. * This may be have to be moved to the VFS layer. * * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kallsyms.h> #include <linux/mm.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/utsname.h> #include <linux/workqueue.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/un.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/metrics.h> #include <linux/sunrpc/bc_xprt.h> #include <trace/events/sunrpc.h> #include "sunrpc.h" #include "sysfs.h" #include "netns.h" #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_CALL #endif static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); static void call_start(struct rpc_task *task); static void call_reserve(struct rpc_task *task); static void call_reserveresult(struct rpc_task *task); static void call_allocate(struct rpc_task *task); static void call_encode(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); static void call_status(struct rpc_task *task); static void call_transmit_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); static void call_refreshresult(struct rpc_task *task); static void call_connect(struct rpc_task *task); static void call_connect_status(struct rpc_task *task); static int rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr); static int rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr); static int rpc_ping(struct rpc_clnt *clnt); static int rpc_ping_noreply(struct rpc_clnt *clnt); static void rpc_check_timeout(struct rpc_task *task); static void rpc_register_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_add(&clnt->cl_clients, &sn->all_clients); spin_unlock(&sn->rpc_client_lock); } static void rpc_unregister_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_del(&clnt->cl_clients); spin_unlock(&sn->rpc_client_lock); } static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { rpc_remove_client_dir(clnt); } static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { if (pipefs_sb == clnt->pipefs_sb) __rpc_clnt_remove_pipedir(clnt); rpc_put_sb_net(net); } } static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, struct rpc_clnt *clnt) { static uint32_t clntid; const char *dir_name = clnt->cl_program->pipe_dir_name; char name[15]; struct dentry *dir, *dentry; dir = rpc_d_lookup_sb(sb, dir_name); if (dir == NULL) { pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); return dir; } for (;;) { snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); name[sizeof(name) - 1] = '\0'; dentry = rpc_create_client_dir(dir, name, clnt); if (!IS_ERR(dentry)) break; if (dentry == ERR_PTR(-EEXIST)) continue; printk(KERN_INFO "RPC: Couldn't create pipefs entry" " %s/%s, error %ld\n", dir_name, name, PTR_ERR(dentry)); break; } dput(dir); return dentry; } static int rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) { struct dentry *dentry; clnt->pipefs_sb = pipefs_sb; if (clnt->cl_program->pipe_dir_name != NULL) { dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt); if (IS_ERR(dentry)) return PTR_ERR(dentry); } return 0; } static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) { if (clnt->cl_program->pipe_dir_name == NULL) return 1; switch (event) { case RPC_PIPEFS_MOUNT: if (clnt->cl_pipedir_objects.pdh_dentry != NULL) return 1; if (refcount_read(&clnt->cl_count) == 0) return 1; break; case RPC_PIPEFS_UMOUNT: if (clnt->cl_pipedir_objects.pdh_dentry == NULL) return 1; break; } return 0; } static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { struct dentry *dentry; switch (event) { case RPC_PIPEFS_MOUNT: dentry = rpc_setup_pipedir_sb(sb, clnt); if (!dentry) return -ENOENT; if (IS_ERR(dentry)) return PTR_ERR(dentry); break; case RPC_PIPEFS_UMOUNT: __rpc_clnt_remove_pipedir(clnt); break; default: printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); return -ENOTSUPP; } return 0; } static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { int error = 0; for (;; clnt = clnt->cl_parent) { if (!rpc_clnt_skip_event(clnt, event)) error = __rpc_clnt_handle_event(clnt, event, sb); if (error || clnt == clnt->cl_parent) break; } return error; } static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct rpc_clnt *clnt; spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { if (rpc_clnt_skip_event(clnt, event)) continue; spin_unlock(&sn->rpc_client_lock); return clnt; } spin_unlock(&sn->rpc_client_lock); return NULL; } static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct super_block *sb = ptr; struct rpc_clnt *clnt; int error = 0; while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { error = __rpc_pipefs_event(clnt, event, sb); if (error) break; } return error; } static struct notifier_block rpc_clients_block = { .notifier_call = rpc_pipefs_event, .priority = SUNRPC_PIPEFS_RPC_PRIO, }; int rpc_clients_notifier_register(void) { return rpc_pipefs_notifier_register(&rpc_clients_block); } void rpc_clients_notifier_unregister(void) { return rpc_pipefs_notifier_unregister(&rpc_clients_block); } static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, struct rpc_xprt *xprt, const struct rpc_timeout *timeout) { struct rpc_xprt *old; spin_lock(&clnt->cl_lock); old = rcu_dereference_protected(clnt->cl_xprt, lockdep_is_held(&clnt->cl_lock)); if (!xprt_bound(xprt)) clnt->cl_autobind = 1; clnt->cl_timeout = timeout; rcu_assign_pointer(clnt->cl_xprt, xprt); spin_unlock(&clnt->cl_lock); return old; } static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) { ssize_t copied; copied = strscpy(clnt->cl_nodename, nodename, sizeof(clnt->cl_nodename)); clnt->cl_nodelen = copied < 0 ? sizeof(clnt->cl_nodename) - 1 : copied; } static int rpc_client_register(struct rpc_clnt *clnt, rpc_authflavor_t pseudoflavor, const char *client_name) { struct rpc_auth_create_args auth_args = { .pseudoflavor = pseudoflavor, .target_name = client_name, }; struct rpc_auth *auth; struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; int err; rpc_clnt_debugfs_register(clnt); pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { err = rpc_setup_pipedir(pipefs_sb, clnt); if (err) goto out; } rpc_register_client(clnt); if (pipefs_sb) rpc_put_sb_net(net); auth = rpcauth_create(&auth_args, clnt); if (IS_ERR(auth)) { dprintk("RPC: Couldn't create auth handle (flavor %u)\n", pseudoflavor); err = PTR_ERR(auth); goto err_auth; } return 0; err_auth: pipefs_sb = rpc_get_sb_net(net); rpc_unregister_client(clnt); __rpc_clnt_remove_pipedir(clnt); out: if (pipefs_sb) rpc_put_sb_net(net); rpc_sysfs_client_destroy(clnt); rpc_clnt_debugfs_unregister(clnt); return err; } static DEFINE_IDA(rpc_clids); void rpc_cleanup_clids(void) { ida_destroy(&rpc_clids); } static int rpc_alloc_clid(struct rpc_clnt *clnt) { int clid; clid = ida_alloc(&rpc_clids, GFP_KERNEL); if (clid < 0) return clid; clnt->cl_clid = clid; return 0; } static void rpc_free_clid(struct rpc_clnt *clnt) { ida_free(&rpc_clids, clnt->cl_clid); } static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, struct rpc_clnt *parent) { const struct rpc_program *program = args->program; const struct rpc_version *version; struct rpc_clnt *clnt = NULL; const struct rpc_timeout *timeout; const char *nodename = args->nodename; int err; err = rpciod_up(); if (err) goto out_no_rpciod; err = -EINVAL; if (args->version >= program->nrvers) goto out_err; version = program->version[args->version]; if (version == NULL) goto out_err; err = -ENOMEM; clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) goto out_err; clnt->cl_parent = parent ? : clnt; clnt->cl_xprtsec = args->xprtsec; err = rpc_alloc_clid(clnt); if (err) goto out_no_clid; clnt->cl_cred = get_cred(args->cred); clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_prog = args->prognumber ? : program->number; clnt->cl_vers = version->number; clnt->cl_stats = args->stats ? : program->stats; clnt->cl_metrics = rpc_alloc_iostats(clnt); rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); err = -ENOMEM; if (clnt->cl_metrics == NULL) goto out_no_stats; clnt->cl_program = program; INIT_LIST_HEAD(&clnt->cl_tasks); spin_lock_init(&clnt->cl_lock); timeout = xprt->timeout; if (args->timeout != NULL) { memcpy(&clnt->cl_timeout_default, args->timeout, sizeof(clnt->cl_timeout_default)); timeout = &clnt->cl_timeout_default; } rpc_clnt_set_transport(clnt, xprt, timeout); xprt->main = true; xprt_iter_init(&clnt->cl_xpi, xps); xprt_switch_put(xps); clnt->cl_rtt = &clnt->cl_rtt_default; rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); refcount_set(&clnt->cl_count, 1); if (nodename == NULL) nodename = utsname()->nodename; /* save the nodename */ rpc_clnt_set_nodename(clnt, nodename); rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt)); err = rpc_client_register(clnt, args->authflavor, args->client_name); if (err) goto out_no_path; if (parent) refcount_inc(&parent->cl_count); trace_rpc_clnt_new(clnt, xprt, args); return clnt; out_no_path: rpc_free_iostats(clnt->cl_metrics); out_no_stats: put_cred(clnt->cl_cred); rpc_free_clid(clnt); out_no_clid: kfree(clnt); out_err: rpciod_down(); out_no_rpciod: xprt_switch_put(xps); xprt_put(xprt); trace_rpc_clnt_new_err(program->name, args->servername, err); return ERR_PTR(err); } static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, struct rpc_xprt *xprt) { struct rpc_clnt *clnt = NULL; struct rpc_xprt_switch *xps; if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); xps = args->bc_xprt->xpt_bc_xps; xprt_switch_get(xps); } else { xps = xprt_switch_alloc(xprt, GFP_KERNEL); if (xps == NULL) { xprt_put(xprt); return ERR_PTR(-ENOMEM); } if (xprt->bc_xprt) { xprt_switch_get(xps); xprt->bc_xprt->xpt_bc_xps = xps; } } clnt = rpc_new_client(args, xps, xprt, NULL); if (IS_ERR(clnt)) return clnt; if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { int err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); } } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { int err = rpc_ping_noreply(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); } } clnt->cl_softrtry = 1; if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { clnt->cl_softrtry = 0; if (args->flags & RPC_CLNT_CREATE_SOFTERR) clnt->cl_softerr = 1; } if (args->flags & RPC_CLNT_CREATE_AUTOBIND) clnt->cl_autobind = 1; if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) clnt->cl_noretranstimeo = 1; if (args->flags & RPC_CLNT_CREATE_DISCRTRY) clnt->cl_discrtry = 1; if (!(args->flags & RPC_CLNT_CREATE_QUIET)) clnt->cl_chatty = 1; return clnt; } /** * rpc_create - create an RPC client and transport with one call * @args: rpc_clnt create argument structure * * Creates and initializes an RPC transport and an RPC client. * * It can ping the server in order to determine if it is up, and to see if * it supports this program and version. RPC_CLNT_CREATE_NOPING disables * this behavior so asynchronous tasks can also use rpc_create. */ struct rpc_clnt *rpc_create(struct rpc_create_args *args) { struct rpc_xprt *xprt; struct xprt_create xprtargs = { .net = args->net, .ident = args->protocol, .srcaddr = args->saddress, .dstaddr = args->address, .addrlen = args->addrsize, .servername = args->servername, .bc_xprt = args->bc_xprt, .xprtsec = args->xprtsec, .connect_timeout = args->connect_timeout, .reconnect_timeout = args->reconnect_timeout, }; char servername[RPC_MAXNETNAMELEN]; struct rpc_clnt *clnt; int i; if (args->bc_xprt) { WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); xprt = args->bc_xprt->xpt_bc_xprt; if (xprt) { xprt_get(xprt); return rpc_create_xprt(args, xprt); } } if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; /* * If the caller chooses not to specify a hostname, whip * up a string representation of the passed-in address. */ if (xprtargs.servername == NULL) { struct sockaddr_un *sun = (struct sockaddr_un *)args->address; struct sockaddr_in *sin = (struct sockaddr_in *)args->address; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)args->address; servername[0] = '\0'; switch (args->address->sa_family) { case AF_LOCAL: if (sun->sun_path[0]) snprintf(servername, sizeof(servername), "%s", sun->sun_path); else snprintf(servername, sizeof(servername), "@%s", sun->sun_path+1); break; case AF_INET: snprintf(servername, sizeof(servername), "%pI4", &sin->sin_addr.s_addr); break; case AF_INET6: snprintf(servername, sizeof(servername), "%pI6", &sin6->sin6_addr); break; default: /* caller wants default server name, but * address family isn't recognized. */ return ERR_PTR(-EINVAL); } xprtargs.servername = servername; } xprt = xprt_create_transport(&xprtargs); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; /* * By default, kernel RPC client connects from a reserved port. * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, * but it is always enabled for rpciod, which handles the connect * operation. */ xprt->resvport = 1; if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) xprt->resvport = 0; xprt->reuseport = 0; if (args->flags & RPC_CLNT_CREATE_REUSEPORT) xprt->reuseport = 1; clnt = rpc_create_xprt(args, xprt); if (IS_ERR(clnt) || args->nconnect <= 1) return clnt; for (i = 0; i < args->nconnect - 1; i++) { if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) break; } return clnt; } EXPORT_SYMBOL_GPL(rpc_create); /* * This function clones the RPC client structure. It allows us to share the * same transport while varying parameters such as the authentication * flavour. */ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, struct rpc_clnt *clnt) { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; struct rpc_clnt *new; int err; err = -ENOMEM; rcu_read_lock(); xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); rcu_read_unlock(); if (xprt == NULL || xps == NULL) { xprt_put(xprt); xprt_switch_put(xps); goto out_err; } args->servername = xprt->servername; args->nodename = clnt->cl_nodename; new = rpc_new_client(args, xps, xprt, clnt); if (IS_ERR(new)) return new; /* Turn off autobind on clones */ new->cl_autobind = 0; new->cl_softrtry = clnt->cl_softrtry; new->cl_softerr = clnt->cl_softerr; new->cl_noretranstimeo = clnt->cl_noretranstimeo; new->cl_discrtry = clnt->cl_discrtry; new->cl_chatty = clnt->cl_chatty; new->cl_principal = clnt->cl_principal; new->cl_max_connect = clnt->cl_max_connect; return new; out_err: trace_rpc_clnt_clone_err(clnt, err); return ERR_PTR(err); } /** * rpc_clone_client - Clone an RPC client structure * * @clnt: RPC client whose parameters are copied * * Returns a fresh RPC client or an ERR_PTR. */ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) { struct rpc_create_args args = { .program = clnt->cl_program, .prognumber = clnt->cl_prog, .version = clnt->cl_vers, .authflavor = clnt->cl_auth->au_flavor, .cred = clnt->cl_cred, .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } EXPORT_SYMBOL_GPL(rpc_clone_client); /** * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth * * @clnt: RPC client whose parameters are copied * @flavor: security flavor for new client * * Returns a fresh RPC client or an ERR_PTR. */ struct rpc_clnt * rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) { struct rpc_create_args args = { .program = clnt->cl_program, .prognumber = clnt->cl_prog, .version = clnt->cl_vers, .authflavor = flavor, .cred = clnt->cl_cred, .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); /** * rpc_switch_client_transport: switch the RPC transport on the fly * @clnt: pointer to a struct rpc_clnt * @args: pointer to the new transport arguments * @timeout: pointer to the new timeout parameters * * This function allows the caller to switch the RPC transport for the * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS * server, for instance. It assumes that the caller has ensured that * there are no active RPC tasks by using some form of locking. * * Returns zero if "clnt" is now using the new xprt. Otherwise a * negative errno is returned, and "clnt" continues to use the old * xprt. */ int rpc_switch_client_transport(struct rpc_clnt *clnt, struct xprt_create *args, const struct rpc_timeout *timeout) { const struct rpc_timeout *old_timeo; rpc_authflavor_t pseudoflavor; struct rpc_xprt_switch *xps, *oldxps; struct rpc_xprt *xprt, *old; struct rpc_clnt *parent; int err; args->xprtsec = clnt->cl_xprtsec; xprt = xprt_create_transport(args); if (IS_ERR(xprt)) return PTR_ERR(xprt); xps = xprt_switch_alloc(xprt, GFP_KERNEL); if (xps == NULL) { xprt_put(xprt); return -ENOMEM; } pseudoflavor = clnt->cl_auth->au_flavor; old_timeo = clnt->cl_timeout; old = rpc_clnt_set_transport(clnt, xprt, timeout); oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps); rpc_unregister_client(clnt); __rpc_clnt_remove_pipedir(clnt); rpc_sysfs_client_destroy(clnt); rpc_clnt_debugfs_unregister(clnt); /* * A new transport was created. "clnt" therefore * becomes the root of a new cl_parent tree. clnt's * children, if it has any, still point to the old xprt. */ parent = clnt->cl_parent; clnt->cl_parent = clnt; /* * The old rpc_auth cache cannot be re-used. GSS * contexts in particular are between a single * client and server. */ err = rpc_client_register(clnt, pseudoflavor, NULL); if (err) goto out_revert; synchronize_rcu(); if (parent != clnt) rpc_release_client(parent); xprt_switch_put(oldxps); xprt_put(old); trace_rpc_clnt_replace_xprt(clnt); return 0; out_revert: xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps); rpc_clnt_set_transport(clnt, old, old_timeo); clnt->cl_parent = parent; rpc_client_register(clnt, pseudoflavor, NULL); xprt_switch_put(xps); xprt_put(xprt); trace_rpc_clnt_replace_xprt_err(clnt); return err; } EXPORT_SYMBOL_GPL(rpc_switch_client_transport); static struct rpc_xprt_switch *rpc_clnt_xprt_switch_get(struct rpc_clnt *clnt) { struct rpc_xprt_switch *xps; rcu_read_lock(); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); rcu_read_unlock(); return xps; } static int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) { struct rpc_xprt_switch *xps; xps = rpc_clnt_xprt_switch_get(clnt); if (xps == NULL) return -EAGAIN; func(xpi, xps); xprt_switch_put(xps); return 0; } static int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) { return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall); } static int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) { return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline); } /** * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports * @clnt: pointer to client * @fn: function to apply * @data: void pointer to function data * * Iterates through the list of RPC transports currently attached to the * client and applies the function fn(clnt, xprt, data). * * On error, the iteration stops, and the function returns the error value. */ int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), void *data) { struct rpc_xprt_iter xpi; int ret; ret = rpc_clnt_xprt_iter_init(clnt, &xpi); if (ret) return ret; for (;;) { struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); if (!xprt) break; ret = fn(clnt, xprt, data); xprt_put(xprt); if (ret < 0) break; } xprt_iter_destroy(&xpi); return ret; } EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); /* * Kill all tasks for the given client. * XXX: kill their descendants as well? */ void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task *rovr; if (list_empty(&clnt->cl_tasks)) return; /* * Spin lock all_tasks to prevent changes... */ trace_rpc_clnt_killall(clnt); spin_lock(&clnt->cl_lock); list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) rpc_signal_task(rovr); spin_unlock(&clnt->cl_lock); } EXPORT_SYMBOL_GPL(rpc_killall_tasks); /** * rpc_cancel_tasks - try to cancel a set of RPC tasks * @clnt: Pointer to RPC client * @error: RPC task error value to set * @fnmatch: Pointer to selector function * @data: User data * * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. * The argument @error must be a negative error value. */ unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, bool (*fnmatch)(const struct rpc_task *, const void *), const void *data) { struct rpc_task *task; unsigned long count = 0; if (list_empty(&clnt->cl_tasks)) return 0; /* * Spin lock all_tasks to prevent changes... */ spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!RPC_IS_ACTIVATED(task)) continue; if (!fnmatch(task, data)) continue; rpc_task_try_cancel(task, error); count++; } spin_unlock(&clnt->cl_lock); return count; } EXPORT_SYMBOL_GPL(rpc_cancel_tasks); static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *dummy) { if (xprt_connected(xprt)) xprt_force_disconnect(xprt); return 0; } void rpc_clnt_disconnect(struct rpc_clnt *clnt) { rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); } EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); /* * Properly shut down an RPC client, terminating all outstanding * requests. */ void rpc_shutdown_client(struct rpc_clnt *clnt) { might_sleep(); trace_rpc_clnt_shutdown(clnt); while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); wait_event_timeout(destroy_wait, list_empty(&clnt->cl_tasks), 1*HZ); } rpc_release_client(clnt); } EXPORT_SYMBOL_GPL(rpc_shutdown_client); /* * Free an RPC client */ static void rpc_free_client_work(struct work_struct *work) { struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); trace_rpc_clnt_free(clnt); /* These might block on processes that might allocate memory, * so they cannot be called in rpciod, so they are handled separately * here. */ rpc_sysfs_client_destroy(clnt); rpc_clnt_debugfs_unregister(clnt); rpc_free_clid(clnt); rpc_clnt_remove_pipedir(clnt); xprt_put(rcu_dereference_raw(clnt->cl_xprt)); kfree(clnt); rpciod_down(); } static struct rpc_clnt * rpc_free_client(struct rpc_clnt *clnt) { struct rpc_clnt *parent = NULL; trace_rpc_clnt_release(clnt); if (clnt->cl_parent != clnt) parent = clnt->cl_parent; rpc_unregister_client(clnt); rpc_free_iostats(clnt->cl_metrics); clnt->cl_metrics = NULL; xprt_iter_destroy(&clnt->cl_xpi); put_cred(clnt->cl_cred); INIT_WORK(&clnt->cl_work, rpc_free_client_work); schedule_work(&clnt->cl_work); return parent; } /* * Free an RPC client */ static struct rpc_clnt * rpc_free_auth(struct rpc_clnt *clnt) { /* * Note: RPCSEC_GSS may need to send NULL RPC calls in order to * release remaining GSS contexts. This mechanism ensures * that it can do so safely. */ if (clnt->cl_auth != NULL) { rpcauth_release(clnt->cl_auth); clnt->cl_auth = NULL; } if (refcount_dec_and_test(&clnt->cl_count)) return rpc_free_client(clnt); return NULL; } /* * Release reference to the RPC client */ void rpc_release_client(struct rpc_clnt *clnt) { do { if (list_empty(&clnt->cl_tasks)) wake_up(&destroy_wait); if (refcount_dec_not_one(&clnt->cl_count)) break; clnt = rpc_free_auth(clnt); } while (clnt != NULL); } EXPORT_SYMBOL_GPL(rpc_release_client); /** * rpc_bind_new_program - bind a new RPC program to an existing client * @old: old rpc_client * @program: rpc program to set * @vers: rpc program version * * Clones the rpc client and sets up a new RPC program. This is mainly * of use for enabling different RPC programs to share the same transport. * The Sun NFSv2/v3 ACL protocol can do this. */ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, const struct rpc_program *program, u32 vers) { struct rpc_create_args args = { .program = program, .prognumber = program->number, .version = vers, .authflavor = old->cl_auth->au_flavor, .cred = old->cl_cred, .stats = old->cl_stats, .timeout = old->cl_timeout, }; struct rpc_clnt *clnt; int err; clnt = __rpc_clone_client(&args, old); if (IS_ERR(clnt)) goto out; err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); clnt = ERR_PTR(err); } out: return clnt; } EXPORT_SYMBOL_GPL(rpc_bind_new_program); struct rpc_xprt * rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; if (!xprt) return NULL; rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); atomic_long_inc(&xps->xps_queuelen); rcu_read_unlock(); atomic_long_inc(&xprt->queuelen); return xprt; } static void rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; atomic_long_dec(&xprt->queuelen); rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); atomic_long_dec(&xps->xps_queuelen); rcu_read_unlock(); xprt_put(xprt); } void rpc_task_release_transport(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; if (xprt) { task->tk_xprt = NULL; if (task->tk_client) rpc_task_release_xprt(task->tk_client, xprt); else xprt_put(xprt); } } EXPORT_SYMBOL_GPL(rpc_task_release_transport); void rpc_task_release_client(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; rpc_task_release_transport(task); if (clnt != NULL) { /* Remove from client task list */ spin_lock(&clnt->cl_lock); list_del(&task->tk_task); spin_unlock(&clnt->cl_lock); task->tk_client = NULL; rpc_release_client(clnt); } } static struct rpc_xprt * rpc_task_get_first_xprt(struct rpc_clnt *clnt) { struct rpc_xprt *xprt; rcu_read_lock(); xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); return rpc_task_get_xprt(clnt, xprt); } static struct rpc_xprt * rpc_task_get_next_xprt(struct rpc_clnt *clnt) { return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi)); } static void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) { if (task->tk_xprt) { if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && (task->tk_flags & RPC_TASK_MOVEABLE))) return; xprt_release(task); xprt_put(task->tk_xprt); } if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) task->tk_xprt = rpc_task_get_first_xprt(clnt); else task->tk_xprt = rpc_task_get_next_xprt(clnt); } static void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) { rpc_task_set_transport(task, clnt); task->tk_client = clnt; refcount_inc(&clnt->cl_count); if (clnt->cl_softrtry) task->tk_flags |= RPC_TASK_SOFT; if (clnt->cl_softerr) task->tk_flags |= RPC_TASK_TIMEOUT; if (clnt->cl_noretranstimeo) task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; /* Add to the client's list of all tasks */ spin_lock(&clnt->cl_lock); list_add_tail(&task->tk_task, &clnt->cl_tasks); spin_unlock(&clnt->cl_lock); } static void rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) { if (msg != NULL) { task->tk_msg.rpc_proc = msg->rpc_proc; task->tk_msg.rpc_argp = msg->rpc_argp; task->tk_msg.rpc_resp = msg->rpc_resp; task->tk_msg.rpc_cred = msg->rpc_cred; if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) get_cred(task->tk_msg.rpc_cred); } } /* * Default callback for async RPC calls */ static void rpc_default_callback(struct rpc_task *task, void *data) { } static const struct rpc_call_ops rpc_default_ops = { .rpc_call_done = rpc_default_callback, }; /** * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it * @task_setup_data: pointer to task initialisation data */ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) { struct rpc_task *task; task = rpc_new_task(task_setup_data); if (IS_ERR(task)) return task; if (!RPC_IS_ASYNC(task)) task->tk_flags |= RPC_TASK_CRED_NOREF; rpc_task_set_client(task, task_setup_data->rpc_client); rpc_task_set_rpc_message(task, task_setup_data->rpc_message); if (task->tk_action == NULL) rpc_call_start(task); atomic_inc(&task->tk_count); rpc_execute(task); return task; } EXPORT_SYMBOL_GPL(rpc_run_task); /** * rpc_call_sync - Perform a synchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags */ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = &rpc_default_ops, .flags = flags, }; int status; WARN_ON_ONCE(flags & RPC_TASK_ASYNC); if (flags & RPC_TASK_ASYNC) { rpc_release_calldata(task_setup_data.callback_ops, task_setup_data.callback_data); return -EINVAL; } task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } EXPORT_SYMBOL_GPL(rpc_call_sync); /** * rpc_call_async - Perform an asynchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags * @tk_ops: RPC call ops * @data: user call data */ int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *data) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = tk_ops, .callback_data = data, .flags = flags|RPC_TASK_ASYNC, }; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } EXPORT_SYMBOL_GPL(rpc_call_async); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void call_bc_encode(struct rpc_task *task); /** * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run * rpc_execute against it * @req: RPC request * @timeout: timeout values to use for this task */ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, struct rpc_timeout *timeout) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .callback_ops = &rpc_default_ops, .flags = RPC_TASK_SOFTCONN | RPC_TASK_NO_RETRANS_TIMEOUT, }; dprintk("RPC: rpc_run_bc_task req= %p\n", req); /* * Create an rpc_task to send the data */ task = rpc_new_task(&task_setup_data); if (IS_ERR(task)) { xprt_free_bc_request(req); return task; } xprt_init_bc_request(req, task, timeout); task->tk_action = call_bc_encode; atomic_inc(&task->tk_count); WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); rpc_execute(task); dprintk("RPC: rpc_run_bc_task: task= %p\n", task); return task; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /** * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages * @req: RPC request to prepare * @pages: vector of struct page pointers * @base: offset in first page where receive should start, in bytes * @len: expected size of the upper layer data payload, in bytes * @hdrsize: expected size of upper layer reply header, in XDR words * */ void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len, unsigned int hdrsize) { hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); } EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); void rpc_call_start(struct rpc_task *task) { task->tk_action = call_start; } EXPORT_SYMBOL_GPL(rpc_call_start); /** * rpc_peeraddr - extract remote peer address from clnt's xprt * @clnt: RPC client structure * @buf: target buffer * @bufsize: length of target buffer * * Returns the number of bytes that are actually in the stored address. */ size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) { size_t bytes; struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); bytes = xprt->addrlen; if (bytes > bufsize) bytes = bufsize; memcpy(buf, &xprt->addr, bytes); rcu_read_unlock(); return bytes; } EXPORT_SYMBOL_GPL(rpc_peeraddr); /** * rpc_peeraddr2str - return remote peer address in printable format * @clnt: RPC client structure * @format: address format * * NB: the lifetime of the memory referenced by the returned pointer is * the same as the rpc_xprt itself. As long as the caller uses this * pointer, it must hold the RCU read lock. */ const char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) { struct rpc_xprt *xprt; xprt = rcu_dereference(clnt->cl_xprt); if (xprt->address_strings[format] != NULL) return xprt->address_strings[format]; else return "unprintable"; } EXPORT_SYMBOL_GPL(rpc_peeraddr2str); static const struct sockaddr_in rpc_inaddr_loopback = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; static const struct sockaddr_in6 rpc_in6addr_loopback = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, }; /* * Try a getsockname() on a connected datagram socket. Using a * connected datagram socket prevents leaving a socket in TIME_WAIT. * This conserves the ephemeral port number space. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned. */ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, struct sockaddr *buf) { struct socket *sock; int err; err = __sock_create(net, sap->sa_family, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); if (err < 0) { dprintk("RPC: can't create UDP socket (%d)\n", err); goto out; } switch (sap->sa_family) { case AF_INET: err = kernel_bind(sock, (struct sockaddr *)&rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: err = kernel_bind(sock, (struct sockaddr *)&rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default: err = -EAFNOSUPPORT; goto out_release; } if (err < 0) { dprintk("RPC: can't bind UDP socket (%d)\n", err); goto out_release; } err = kernel_connect(sock, sap, salen, 0); if (err < 0) { dprintk("RPC: can't connect UDP socket (%d)\n", err); goto out_release; } err = kernel_getsockname(sock, buf); if (err < 0) { dprintk("RPC: getsockname failed (%d)\n", err); goto out_release; } err = 0; if (buf->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; sin6->sin6_scope_id = 0; } dprintk("RPC: %s succeeded\n", __func__); out_release: sock_release(sock); out: return err; } /* * Scraping a connected socket failed, so we don't have a useable * local address. Fallback: generate an address that will prevent * the server from calling us back. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned. */ static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) { switch (family) { case AF_INET: if (buflen < sizeof(rpc_inaddr_loopback)) return -EINVAL; memcpy(buf, &rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: if (buflen < sizeof(rpc_in6addr_loopback)) return -EINVAL; memcpy(buf, &rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default: dprintk("RPC: %s: address family not supported\n", __func__); return -EAFNOSUPPORT; } dprintk("RPC: %s: succeeded\n", __func__); return 0; } /** * rpc_localaddr - discover local endpoint address for an RPC client * @clnt: RPC client structure * @buf: target buffer * @buflen: size of target buffer, in bytes * * Returns zero and fills in "buf" and "buflen" if successful; * otherwise, a negative errno is returned. * * This works even if the underlying transport is not currently connected, * or if the upper layer never previously provided a source address. * * The result of this function call is transient: multiple calls in * succession may give different results, depending on how local * networking configuration changes over time. */ int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) { struct sockaddr_storage address; struct sockaddr *sap = (struct sockaddr *)&address; struct rpc_xprt *xprt; struct net *net; size_t salen; int err; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); salen = xprt->addrlen; memcpy(sap, &xprt->addr, salen); net = get_net(xprt->xprt_net); rcu_read_unlock(); rpc_set_port(sap, 0); err = rpc_sockname(net, sap, salen, buf); put_net(net); if (err != 0) /* Couldn't discover local address, return ANYADDR */ return rpc_anyaddr(sap->sa_family, buf, buflen); return 0; } EXPORT_SYMBOL_GPL(rpc_localaddr); void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); if (xprt->ops->set_buffer_size) xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rpc_setbufsize); /** * rpc_net_ns - Get the network namespace for this RPC client * @clnt: RPC client to query * */ struct net *rpc_net_ns(struct rpc_clnt *clnt) { struct net *ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->xprt_net; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_net_ns); /** * rpc_max_payload - Get maximum payload size for a transport, in bytes * @clnt: RPC client to query * * For stream transports, this is one RPC record fragment (see RFC * 1831), as we don't support multi-record requests yet. For datagram * transports, this is the size of an IP packet minus the IP, UDP, and * RPC header sizes. */ size_t rpc_max_payload(struct rpc_clnt *clnt) { size_t ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->max_payload; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_max_payload); /** * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes * @clnt: RPC client to query */ size_t rpc_max_bc_payload(struct rpc_clnt *clnt) { struct rpc_xprt *xprt; size_t ret; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); ret = xprt->ops->bc_maxpayload(xprt); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_max_bc_payload); unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) { struct rpc_xprt *xprt; unsigned int ret; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); ret = xprt->ops->bc_num_slots(xprt); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_num_bc_slots); /** * rpc_force_rebind - force transport to check that remote port is unchanged * @clnt: client to rebind * */ void rpc_force_rebind(struct rpc_clnt *clnt) { if (clnt->cl_autobind) { rcu_read_lock(); xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); } } EXPORT_SYMBOL_GPL(rpc_force_rebind); static int __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) { task->tk_status = 0; task->tk_rpc_status = 0; task->tk_action = action; return 1; } /* * Restart an (async) RPC call. Usually called from within the * exit handler. */ int rpc_restart_call(struct rpc_task *task) { return __rpc_restart_call(task, call_start); } EXPORT_SYMBOL_GPL(rpc_restart_call); /* * Restart an (async) RPC call from the call_prepare state. * Usually called from within the exit handler. */ int rpc_restart_call_prepare(struct rpc_task *task) { if (task->tk_ops->rpc_call_prepare != NULL) return __rpc_restart_call(task, rpc_prepare_task); return rpc_restart_call(task); } EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); const char *rpc_proc_name(const struct rpc_task *task) { const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; if (proc) { if (proc->p_name) return proc->p_name; else return "NULL"; } else return "no proc"; } static void __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) { trace_rpc_call_rpcerror(task, tk_status, rpc_status); rpc_task_set_rpc_status(task, rpc_status); rpc_exit(task, tk_status); } static void rpc_call_rpcerror(struct rpc_task *task, int status) { __rpc_call_rpcerror(task, status, status); } /* * 0. Initial state * * Other FSM states can be visited zero or more times, but * this state is visited exactly once for each RPC. */ static void call_start(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int idx = task->tk_msg.rpc_proc->p_statidx; trace_rpc_request(task); if (task->tk_client->cl_shutdown) { rpc_call_rpcerror(task, -EIO); return; } /* Increment call count (version might not be valid for ping) */ if (clnt->cl_program->version[clnt->cl_vers]) clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; clnt->cl_stats->rpccnt++; task->tk_action = call_reserve; rpc_task_set_transport(task, clnt); } /* * 1. Reserve an RPC call slot */ static void call_reserve(struct rpc_task *task) { task->tk_status = 0; task->tk_action = call_reserveresult; xprt_reserve(task); } static void call_retry_reserve(struct rpc_task *task); /* * 1b. Grok the result of xprt_reserve() */ static void call_reserveresult(struct rpc_task *task) { int status = task->tk_status; /* * After a call to xprt_reserve(), we must have either * a request slot or else an error status. */ task->tk_status = 0; if (status >= 0) { if (task->tk_rqstp) { task->tk_action = call_refresh; return; } rpc_call_rpcerror(task, -EIO); return; } switch (status) { case -ENOMEM: rpc_delay(task, HZ >> 2); fallthrough; case -EAGAIN: /* woken up; retry */ task->tk_action = call_retry_reserve; return; default: rpc_call_rpcerror(task, status); } } /* * 1c. Retry reserving an RPC call slot */ static void call_retry_reserve(struct rpc_task *task) { task->tk_status = 0; task->tk_action = call_reserveresult; xprt_retry_reserve(task); } /* * 2. Bind and/or refresh the credentials */ static void call_refresh(struct rpc_task *task) { task->tk_action = call_refreshresult; task->tk_status = 0; task->tk_client->cl_stats->rpcauthrefresh++; rpcauth_refreshcred(task); } /* * 2a. Process the results of a credential refresh */ static void call_refreshresult(struct rpc_task *task) { int status = task->tk_status; task->tk_status = 0; task->tk_action = call_refresh; switch (status) { case 0: if (rpcauth_uptodatecred(task)) { task->tk_action = call_allocate; return; } /* Use rate-limiting and a max number of retries if refresh * had status 0 but failed to update the cred. */ fallthrough; case -ETIMEDOUT: rpc_delay(task, 3*HZ); fallthrough; case -EAGAIN: status = -EACCES; fallthrough; case -EKEYEXPIRED: if (!task->tk_cred_retry) break; task->tk_cred_retry--; trace_rpc_retry_refresh_status(task); return; case -ENOMEM: rpc_delay(task, HZ >> 4); return; } trace_rpc_refresh_status(task); rpc_call_rpcerror(task, status); } /* * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in xprt_release). */ static void call_allocate(struct rpc_task *task) { const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; int status; task->tk_status = 0; task->tk_action = call_encode; if (req->rq_buffer) return; /* * Calculate the size (in quads) of the RPC call * and reply headers, and convert both values * to byte sizes. */ req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + proc->p_arglen; req->rq_callsize <<= 2; /* * Note: the reply buffer must at minimum allocate enough space * for the 'struct accepted_reply' from RFC5531. */ req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ max_t(size_t, proc->p_replen, 2); req->rq_rcvsize <<= 2; status = xprt->ops->buf_alloc(task); trace_rpc_buf_alloc(task, status); if (status == 0) return; if (status != -ENOMEM) { rpc_call_rpcerror(task, status); return; } if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { task->tk_action = call_allocate; rpc_delay(task, HZ>>4); return; } rpc_call_rpcerror(task, -ERESTARTSYS); } static int rpc_task_need_encode(struct rpc_task *task) { return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && (!(task->tk_flags & RPC_TASK_SENT) || !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || xprt_request_need_retransmit(task)); } static void rpc_xdr_encode(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct xdr_stream xdr; xdr_buf_init(&req->rq_snd_buf, req->rq_buffer, req->rq_callsize); xdr_buf_init(&req->rq_rcv_buf, req->rq_rbuffer, req->rq_rcvsize); req->rq_reply_bytes_recvd = 0; req->rq_snd_buf.head[0].iov_len = 0; xdr_init_encode(&xdr, &req->rq_snd_buf, req->rq_snd_buf.head[0].iov_base, req); if (rpc_encode_header(task, &xdr)) return; task->tk_status = rpcauth_wrap_req(task, &xdr); } /* * 3. Encode arguments of an RPC call */ static void call_encode(struct rpc_task *task) { if (!rpc_task_need_encode(task)) goto out; /* Dequeue task from the receive queue while we're encoding */ xprt_request_dequeue_xprt(task); /* Encode here so that rpcsec_gss can use correct sequence number. */ rpc_xdr_encode(task); /* Add task to reply queue before transmission to avoid races */ if (task->tk_status == 0 && rpc_reply_expected(task)) task->tk_status = xprt_request_enqueue_receive(task); /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ switch (task->tk_status) { case -EAGAIN: case -ENOMEM: rpc_delay(task, HZ >> 4); break; case -EKEYEXPIRED: if (!task->tk_cred_retry) { rpc_call_rpcerror(task, task->tk_status); } else { task->tk_action = call_refresh; task->tk_cred_retry--; trace_rpc_retry_refresh_status(task); } break; default: rpc_call_rpcerror(task, task->tk_status); } return; } xprt_request_enqueue_transmit(task); out: task->tk_action = call_transmit; /* Check that the connection is OK */ if (!xprt_bound(task->tk_xprt)) task->tk_action = call_bind; else if (!xprt_connected(task->tk_xprt)) task->tk_action = call_connect; } /* * Helpers to check if the task was already transmitted, and * to take action when that is the case. */ static bool rpc_task_transmitted(struct rpc_task *task) { return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); } static void rpc_task_handle_transmitted(struct rpc_task *task) { xprt_end_transmit(task); task->tk_action = call_transmit_status; } /* * 4. Get the server port number if not yet set */ static void call_bind(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } if (xprt_bound(xprt)) { task->tk_action = call_connect; return; } task->tk_action = call_bind_status; if (!xprt_prepare_transmit(task)) return; xprt->ops->rpcbind(task); } /* * 4a. Sort out bind result */ static void call_bind_status(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; int status = -EIO; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } if (task->tk_status >= 0) goto out_next; if (xprt_bound(xprt)) { task->tk_status = 0; goto out_next; } switch (task->tk_status) { case -ENOMEM: rpc_delay(task, HZ >> 2); goto retry_timeout; case -EACCES: trace_rpcb_prog_unavail_err(task); /* fail immediately if this is an RPC ping */ if (task->tk_msg.rpc_proc->p_proc == 0) { status = -EOPNOTSUPP; break; } rpc_delay(task, 3*HZ); goto retry_timeout; case -ENOBUFS: rpc_delay(task, HZ >> 2); goto retry_timeout; case -EAGAIN: goto retry_timeout; case -ETIMEDOUT: trace_rpcb_timeout_err(task); goto retry_timeout; case -EPFNOSUPPORT: /* server doesn't support any rpcbind version we know of */ trace_rpcb_bind_version_err(task); break; case -EPROTONOSUPPORT: trace_rpcb_bind_version_err(task); goto retry_timeout; case -ECONNREFUSED: /* connection problems */ case -ECONNRESET: case -ECONNABORTED: case -ENOTCONN: case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPIPE: trace_rpcb_unreachable_err(task); if (!RPC_IS_SOFTCONN(task)) { rpc_delay(task, 5*HZ); goto retry_timeout; } status = task->tk_status; break; default: trace_rpcb_unrecognized_err(task); } rpc_call_rpcerror(task, status); return; out_next: task->tk_action = call_connect; return; retry_timeout: task->tk_status = 0; task->tk_action = call_bind; rpc_check_timeout(task); } /* * 4b. Connect to the RPC server */ static void call_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } if (xprt_connected(xprt)) { task->tk_action = call_transmit; return; } task->tk_action = call_connect_status; if (task->tk_status < 0) return; if (task->tk_flags & RPC_TASK_NOCONNECT) { rpc_call_rpcerror(task, -ENOTCONN); return; } if (!xprt_prepare_transmit(task)) return; xprt_connect(task); } /* * 4c. Sort out connect result */ static void call_connect_status(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } trace_rpc_connect_status(task); if (task->tk_status == 0) { clnt->cl_stats->netreconn++; goto out_next; } if (xprt_connected(xprt)) { task->tk_status = 0; goto out_next; } task->tk_status = 0; switch (status) { case -ECONNREFUSED: case -ECONNRESET: /* A positive refusal suggests a rebind is needed. */ if (RPC_IS_SOFTCONN(task)) break; if (clnt->cl_autobind) { rpc_force_rebind(clnt); goto out_retry; } fallthrough; case -ECONNABORTED: case -ENETDOWN: case -ENETUNREACH: case -EHOSTUNREACH: case -EPIPE: case -EPROTO: xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, task->tk_rqstp->rq_connect_cookie); if (RPC_IS_SOFTCONN(task)) break; /* retry with existing socket, after a delay */ rpc_delay(task, 3*HZ); fallthrough; case -EADDRINUSE: case -ENOTCONN: case -EAGAIN: case -ETIMEDOUT: if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && (task->tk_flags & RPC_TASK_MOVEABLE) && test_bit(XPRT_REMOVE, &xprt->state)) { struct rpc_xprt *saved = task->tk_xprt; struct rpc_xprt_switch *xps; xps = rpc_clnt_xprt_switch_get(clnt); if (xps->xps_nxprts > 1) { long value; xprt_release(task); value = atomic_long_dec_return(&xprt->queuelen); if (value == 0) rpc_xprt_switch_remove_xprt(xps, saved, true); xprt_put(saved); task->tk_xprt = NULL; task->tk_action = call_start; } xprt_switch_put(xps); if (!task->tk_xprt) goto out; } goto out_retry; case -ENOBUFS: rpc_delay(task, HZ >> 2); goto out_retry; } rpc_call_rpcerror(task, status); return; out_next: task->tk_action = call_transmit; return; out_retry: /* Check for timeouts before looping back to call_bind */ task->tk_action = call_bind; out: rpc_check_timeout(task); } /* * 5. Transmit the RPC request, and wait for reply */ static void call_transmit(struct rpc_task *task) { if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } task->tk_action = call_transmit_status; if (!xprt_prepare_transmit(task)) return; task->tk_status = 0; if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { if (!xprt_connected(task->tk_xprt)) { task->tk_status = -ENOTCONN; return; } xprt_transmit(task); } xprt_end_transmit(task); } /* * 5a. Handle cleanup after a transmission */ static void call_transmit_status(struct rpc_task *task) { task->tk_action = call_status; /* * Common case: success. Force the compiler to put this * test first. */ if (rpc_task_transmitted(task)) { task->tk_status = 0; xprt_request_wait_receive(task); return; } switch (task->tk_status) { default: break; case -EBADMSG: task->tk_status = 0; task->tk_action = call_encode; break; /* * Special cases: if we've been waiting on the * socket's write_space() callback, or if the * socket just returned a connection error, * then hold onto the transport lock. */ case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; case -EBADSLT: case -EAGAIN: task->tk_action = call_transmit; task->tk_status = 0; break; case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPERM: break; case -ECONNREFUSED: if (RPC_IS_SOFTCONN(task)) { if (!task->tk_msg.rpc_proc->p_proc) trace_xprt_ping(task->tk_xprt, task->tk_status); rpc_call_rpcerror(task, task->tk_status); return; } fallthrough; case -ECONNRESET: case -ECONNABORTED: case -EADDRINUSE: case -ENOTCONN: case -EPIPE: task->tk_action = call_bind; task->tk_status = 0; break; } rpc_check_timeout(task); } #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void call_bc_transmit(struct rpc_task *task); static void call_bc_transmit_status(struct rpc_task *task); static void call_bc_encode(struct rpc_task *task) { xprt_request_enqueue_transmit(task); task->tk_action = call_bc_transmit; } /* * 5b. Send the backchannel RPC reply. On error, drop the reply. In * addition, disconnect on connectivity errors. */ static void call_bc_transmit(struct rpc_task *task) { task->tk_action = call_bc_transmit_status; if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { if (!xprt_prepare_transmit(task)) return; task->tk_status = 0; xprt_transmit(task); } xprt_end_transmit(task); } static void call_bc_transmit_status(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; if (rpc_task_transmitted(task)) task->tk_status = 0; switch (task->tk_status) { case 0: /* Success */ case -ENETDOWN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -ECONNRESET: case -ECONNREFUSED: case -EADDRINUSE: case -ENOTCONN: case -EPIPE: break; case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; case -EBADSLT: case -EAGAIN: task->tk_status = 0; task->tk_action = call_bc_transmit; return; case -ETIMEDOUT: /* * Problem reaching the server. Disconnect and let the * forechannel reestablish the connection. The server will * have to retransmit the backchannel request and we'll * reprocess it. Since these ops are idempotent, there's no * need to cache our reply at this time. */ printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); break; default: /* * We were unable to reply and will have to drop the * request. The server should reconnect and retransmit. */ printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); break; } task->tk_action = rpc_exit_task; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /* * 6. Sort out the RPC call status */ static void call_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int status; if (!task->tk_msg.rpc_proc->p_proc) trace_xprt_ping(task->tk_xprt, task->tk_status); status = task->tk_status; if (status >= 0) { task->tk_action = call_decode; return; } trace_rpc_call_status(task); task->tk_status = 0; switch(status) { case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPERM: if (RPC_IS_SOFTCONN(task)) goto out_exit; /* * Delay any retries for 3 seconds, then handle as if it * were a timeout. */ rpc_delay(task, 3*HZ); fallthrough; case -ETIMEDOUT: break; case -ECONNREFUSED: case -ECONNRESET: case -ECONNABORTED: case -ENOTCONN: rpc_force_rebind(clnt); break; case -EADDRINUSE: rpc_delay(task, 3*HZ); fallthrough; case -EPIPE: case -EAGAIN: break; case -ENFILE: case -ENOBUFS: case -ENOMEM: rpc_delay(task, HZ>>2); break; case -EIO: /* shutdown or soft timeout */ goto out_exit; default: if (clnt->cl_chatty) printk("%s: RPC call returned error %d\n", clnt->cl_program->name, -status); goto out_exit; } task->tk_action = call_encode; rpc_check_timeout(task); return; out_exit: rpc_call_rpcerror(task, status); } static bool rpc_check_connected(const struct rpc_rqst *req) { /* No allocated request or transport? return true */ if (!req || !req->rq_xprt) return true; return xprt_connected(req->rq_xprt); } static void rpc_check_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (RPC_SIGNALLED(task)) return; if (xprt_adjust_timeout(task->tk_rqstp) == 0) return; trace_rpc_timeout_status(task); task->tk_timeouts++; if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { rpc_call_rpcerror(task, -ETIMEDOUT); return; } if (RPC_IS_SOFT(task)) { /* * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has * been sent, it should time out only if the transport * connection gets terminally broken. */ if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && rpc_check_connected(task->tk_rqstp)) return; if (clnt->cl_chatty) { pr_notice_ratelimited( "%s: server %s not responding, timed out\n", clnt->cl_program->name, task->tk_xprt->servername); } if (task->tk_flags & RPC_TASK_TIMEOUT) rpc_call_rpcerror(task, -ETIMEDOUT); else __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); return; } if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { task->tk_flags |= RPC_CALL_MAJORSEEN; if (clnt->cl_chatty) { pr_notice_ratelimited( "%s: server %s not responding, still trying\n", clnt->cl_program->name, task->tk_xprt->servername); } } rpc_force_rebind(clnt); /* * Did our request time out due to an RPCSEC_GSS out-of-sequence * event? RFC2203 requires the server to drop all such requests. */ rpcauth_invalcred(task); } /* * 7. Decode the RPC reply */ static void call_decode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct xdr_stream xdr; int err; if (!task->tk_msg.rpc_proc->p_decode) { task->tk_action = rpc_exit_task; return; } if (task->tk_flags & RPC_CALL_MAJORSEEN) { if (clnt->cl_chatty) { pr_notice_ratelimited("%s: server %s OK\n", clnt->cl_program->name, task->tk_xprt->servername); } task->tk_flags &= ~RPC_CALL_MAJORSEEN; } /* * Did we ever call xprt_complete_rqst()? If not, we should assume * the message is incomplete. */ err = -EAGAIN; if (!req->rq_reply_bytes_recvd) goto out; /* Ensure that we see all writes made by xprt_complete_rqst() * before it changed req->rq_reply_bytes_recvd. */ smp_rmb(); req->rq_rcv_buf.len = req->rq_private_buf.len; trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf); /* Check that the softirq receive buffer is valid */ WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, sizeof(req->rq_rcv_buf)) != 0); xdr_init_decode(&xdr, &req->rq_rcv_buf, req->rq_rcv_buf.head[0].iov_base, req); err = rpc_decode_header(task, &xdr); out: switch (err) { case 0: task->tk_action = rpc_exit_task; task->tk_status = rpcauth_unwrap_resp(task, &xdr); xdr_finish_decode(&xdr); return; case -EAGAIN: task->tk_status = 0; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); task->tk_action = call_encode; rpc_check_timeout(task); break; case -EKEYREJECTED: task->tk_action = call_reserve; rpc_check_timeout(task); rpcauth_invalcred(task); /* Ensure we obtain a new XID if we retry! */ xprt_release(task); } } static int rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; __be32 *p; int error; error = -EMSGSIZE; p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); if (!p) goto out_fail; *p++ = req->rq_xid; *p++ = rpc_call; *p++ = cpu_to_be32(RPC_VERSION); *p++ = cpu_to_be32(clnt->cl_prog); *p++ = cpu_to_be32(clnt->cl_vers); *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); error = rpcauth_marshcred(task, xdr); if (error < 0) goto out_fail; return 0; out_fail: trace_rpc_bad_callhdr(task); rpc_call_rpcerror(task, error); return error; } static noinline int rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) { struct rpc_clnt *clnt = task->tk_client; int error; __be32 *p; /* RFC-1014 says that the representation of XDR data must be a * multiple of four bytes * - if it isn't pointer subtraction in the NFS client may give * undefined results */ if (task->tk_rqstp->rq_rcv_buf.len & 3) goto out_unparsable; p = xdr_inline_decode(xdr, 3 * sizeof(*p)); if (!p) goto out_unparsable; p++; /* skip XID */ if (*p++ != rpc_reply) goto out_unparsable; if (*p++ != rpc_msg_accepted) goto out_msg_denied; error = rpcauth_checkverf(task, xdr); if (error) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; if (!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { rpcauth_invalcred(task); if (!task->tk_cred_retry) goto out_err; task->tk_cred_retry--; trace_rpc__stale_creds(task); return -EKEYREJECTED; } goto out_verifier; } p = xdr_inline_decode(xdr, sizeof(*p)); if (!p) goto out_unparsable; switch (*p) { case rpc_success: return 0; case rpc_prog_unavail: trace_rpc__prog_unavail(task); error = -EPFNOSUPPORT; goto out_err; case rpc_prog_mismatch: trace_rpc__prog_mismatch(task); error = -EPROTONOSUPPORT; goto out_err; case rpc_proc_unavail: trace_rpc__proc_unavail(task); error = -EOPNOTSUPP; goto out_err; case rpc_garbage_args: case rpc_system_err: trace_rpc__garbage_args(task); error = -EIO; break; default: goto out_unparsable; } out_garbage: clnt->cl_stats->rpcgarbage++; if (task->tk_garb_retry) { task->tk_garb_retry--; task->tk_action = call_encode; return -EAGAIN; } out_err: rpc_call_rpcerror(task, error); return error; out_unparsable: trace_rpc__unparsable(task); error = -EIO; goto out_garbage; out_verifier: trace_rpc_bad_verifier(task); switch (error) { case -EPROTONOSUPPORT: goto out_err; case -EACCES: /* Re-encode with a fresh cred */ fallthrough; default: goto out_garbage; } out_msg_denied: error = -EACCES; p = xdr_inline_decode(xdr, sizeof(*p)); if (!p) goto out_unparsable; switch (*p++) { case rpc_auth_error: break; case rpc_mismatch: trace_rpc__mismatch(task); error = -EPROTONOSUPPORT; goto out_err; default: goto out_unparsable; } p = xdr_inline_decode(xdr, sizeof(*p)); if (!p) goto out_unparsable; switch (*p++) { case rpc_autherr_rejectedcred: case rpc_autherr_rejectedverf: case rpcsec_gsserr_credproblem: case rpcsec_gsserr_ctxproblem: rpcauth_invalcred(task); if (!task->tk_cred_retry) break; task->tk_cred_retry--; trace_rpc__stale_creds(task); return -EKEYREJECTED; case rpc_autherr_badcred: case rpc_autherr_badverf: /* possibly garbled cred/verf? */ if (!task->tk_garb_retry) break; task->tk_garb_retry--; trace_rpc__bad_creds(task); task->tk_action = call_encode; return -EAGAIN; case rpc_autherr_tooweak: trace_rpc__auth_tooweak(task); pr_warn("RPC: server %s requires stronger authentication.\n", task->tk_xprt->servername); break; default: goto out_unparsable; } goto out_err; } static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, const void *obj) { } static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *obj) { return 0; } static const struct rpc_procinfo rpcproc_null = { .p_encode = rpcproc_encode_null, .p_decode = rpcproc_decode_null, }; static const struct rpc_procinfo rpcproc_null_noreply = { .p_encode = rpcproc_encode_null, }; static void rpc_null_call_prepare(struct rpc_task *task, void *data) { task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; rpc_call_start(task); } static const struct rpc_call_ops rpc_null_ops = { .rpc_call_prepare = rpc_null_call_prepare, .rpc_call_done = rpc_default_callback, }; static struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, const struct rpc_call_ops *ops, void *data) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_xprt = xprt, .rpc_message = &msg, .rpc_op_cred = cred, .callback_ops = ops ?: &rpc_null_ops, .callback_data = data, .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, }; return rpc_run_task(&task_setup_data); } struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) { return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); } EXPORT_SYMBOL_GPL(rpc_call_null); static int rpc_ping(struct rpc_clnt *clnt) { struct rpc_task *task; int status; if (clnt->cl_auth->au_ops->ping) return clnt->cl_auth->au_ops->ping(clnt); task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } static int rpc_ping_noreply(struct rpc_clnt *clnt) { struct rpc_message msg = { .rpc_proc = &rpcproc_null_noreply, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = &msg, .callback_ops = &rpc_null_ops, .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, }; struct rpc_task *task; int status; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } struct rpc_cb_add_xprt_calldata { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; }; static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) { struct rpc_cb_add_xprt_calldata *data = calldata; if (task->tk_status == 0) rpc_xprt_switch_add_xprt(data->xps, data->xprt); } static void rpc_cb_add_xprt_release(void *calldata) { struct rpc_cb_add_xprt_calldata *data = calldata; xprt_put(data->xprt); xprt_switch_put(data->xps); kfree(data); } static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { .rpc_call_prepare = rpc_null_call_prepare, .rpc_call_done = rpc_cb_add_xprt_done, .rpc_release = rpc_cb_add_xprt_release, }; /** * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt * @clnt: pointer to struct rpc_clnt * @xps: pointer to struct rpc_xprt_switch, * @xprt: pointer struct rpc_xprt * @in_max_connect: pointer to the max_connect value for the passed in xprt transport */ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, void *in_max_connect) { struct rpc_cb_add_xprt_calldata *data; struct rpc_task *task; int max_connect = clnt->cl_max_connect; if (in_max_connect) max_connect = *(int *)in_max_connect; if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) { rcu_read_lock(); pr_warn("SUNRPC: reached max allowed number (%d) did not add " "transport to server: %s\n", max_connect, rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); rcu_read_unlock(); return -EINVAL; } data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->xps = xprt_switch_get(xps); data->xprt = xprt_get(xprt); if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) { rpc_cb_add_xprt_release(data); goto success; } task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, &rpc_cb_add_xprt_call_ops, data); if (IS_ERR(task)) return PTR_ERR(task); data->xps->xps_nunique_destaddr_xprts++; rpc_put_task(task); success: return 1; } EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, struct rpc_xprt *xprt, struct rpc_add_xprt_test *data) { struct rpc_task *task; int status = -EADDRINUSE; /* Test the connection */ task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); if (status < 0) return status; /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ data->add_xprt_test(clnt, xprt, data->data); return 0; } /** * rpc_clnt_setup_test_and_add_xprt() * * This is an rpc_clnt_add_xprt setup() function which returns 1 so: * 1) caller of the test function must dereference the rpc_xprt_switch * and the rpc_xprt. * 2) test function must call rpc_xprt_switch_add_xprt, usually in * the rpc_call_done routine. * * Upon success (return of 1), the test function adds the new * transport to the rpc_clnt xprt switch * * @clnt: struct rpc_clnt to get the new transport * @xps: the rpc_xprt_switch to hold the new transport * @xprt: the rpc_xprt to test * @data: a struct rpc_add_xprt_test pointer that holds the test function * and test function call data */ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, void *data) { int status = -EADDRINUSE; xprt = xprt_get(xprt); xprt_switch_get(xps); if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) goto out_err; status = rpc_clnt_add_xprt_helper(clnt, xprt, data); if (status < 0) goto out_err; status = 1; out_err: xprt_put(xprt); xprt_switch_put(xps); if (status < 0) pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " "added\n", status, xprt->address_strings[RPC_DISPLAY_ADDR]); /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ return status; } EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); /** * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt * @clnt: pointer to struct rpc_clnt * @xprtargs: pointer to struct xprt_create * @setup: callback to test and/or set up the connection * @data: pointer to setup function data * * Creates a new transport using the parameters set in args and * adds it to clnt. * If ping is set, then test that connectivity succeeds before * adding the new transport. * */ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, struct xprt_create *xprtargs, int (*setup)(struct rpc_clnt *, struct rpc_xprt_switch *, struct rpc_xprt *, void *), void *data) { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; unsigned long connect_timeout; unsigned long reconnect_timeout; unsigned char resvport, reuseport; int ret = 0, ident; rcu_read_lock(); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); xprt = xprt_iter_xprt(&clnt->cl_xpi); if (xps == NULL || xprt == NULL) { rcu_read_unlock(); xprt_switch_put(xps); return -EAGAIN; } resvport = xprt->resvport; reuseport = xprt->reuseport; connect_timeout = xprt->connect_timeout; reconnect_timeout = xprt->max_reconnect_timeout; ident = xprt->xprt_class->ident; rcu_read_unlock(); if (!xprtargs->ident) xprtargs->ident = ident; xprtargs->xprtsec = clnt->cl_xprtsec; xprt = xprt_create_transport(xprtargs); if (IS_ERR(xprt)) { ret = PTR_ERR(xprt); goto out_put_switch; } xprt->resvport = resvport; xprt->reuseport = reuseport; if (xprtargs->connect_timeout) connect_timeout = xprtargs->connect_timeout; if (xprtargs->reconnect_timeout) reconnect_timeout = xprtargs->reconnect_timeout; if (xprt->ops->set_connect_timeout != NULL) xprt->ops->set_connect_timeout(xprt, connect_timeout, reconnect_timeout); rpc_xprt_switch_set_roundrobin(xps); if (setup) { ret = setup(clnt, xps, xprt, data); if (ret != 0) goto out_put_xprt; } rpc_xprt_switch_add_xprt(xps, xprt); out_put_xprt: xprt_put(xprt); out_put_switch: xprt_switch_put(xps); return ret; } EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, struct rpc_xprt *xprt, struct rpc_add_xprt_test *data) { struct rpc_xprt *main_xprt; int status = 0; xprt_get(xprt); rcu_read_lock(); main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, (struct sockaddr *)&main_xprt->addr); rcu_read_unlock(); xprt_put(main_xprt); if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) goto out; status = rpc_clnt_add_xprt_helper(clnt, xprt, data); out: xprt_put(xprt); return status; } /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking * @clnt rpc_clnt structure * * For each offlined transport found in the rpc_clnt structure call * the function rpc_xprt_probe_trunked() which will determine if this * transport still belongs to the trunking group. */ void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, struct rpc_add_xprt_test *data) { struct rpc_xprt_iter xpi; int ret; ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi); if (ret) return; for (;;) { struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); if (!xprt) break; ret = rpc_xprt_probe_trunked(clnt, xprt, data); xprt_put(xprt); if (ret < 0) break; xprt_iter_rewind(&xpi); } xprt_iter_destroy(&xpi); } EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); static int rpc_xprt_offline(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data) { struct rpc_xprt *main_xprt; struct rpc_xprt_switch *xps; int err = 0; xprt_get(xprt); rcu_read_lock(); main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, (struct sockaddr *)&main_xprt->addr); rcu_read_unlock(); xprt_put(main_xprt); if (err) goto out; if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { err = -EINTR; goto out; } xprt_set_offline_locked(xprt, xps); xprt_release_write(xprt, NULL); out: xprt_put(xprt); xprt_switch_put(xps); return err; } /* rpc_clnt_manage_trunked_xprts -- offline trunked transports * @clnt rpc_clnt structure * * For each active transport found in the rpc_clnt structure call * the function rpc_xprt_offline() which will identify trunked transports * and will mark them offline. */ void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) { rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); } EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); struct connect_timeout_data { unsigned long connect_timeout; unsigned long reconnect_timeout; }; static int rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data) { struct connect_timeout_data *timeo = data; if (xprt->ops->set_connect_timeout) xprt->ops->set_connect_timeout(xprt, timeo->connect_timeout, timeo->reconnect_timeout); return 0; } void rpc_set_connect_timeout(struct rpc_clnt *clnt, unsigned long connect_timeout, unsigned long reconnect_timeout) { struct connect_timeout_data timeout = { .connect_timeout = connect_timeout, .reconnect_timeout = reconnect_timeout, }; rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_set_connect_timeout, &timeout); } EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; xps = rpc_clnt_xprt_switch_get(clnt); xprt_set_online_locked(xprt, xps); xprt_switch_put(xps); } void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; if (rpc_clnt_xprt_switch_has_addr(clnt, (const struct sockaddr *)&xprt->addr)) { return rpc_clnt_xprt_set_online(clnt, xprt); } xps = rpc_clnt_xprt_switch_get(clnt); rpc_xprt_switch_add_xprt(xps, xprt); xprt_switch_put(xps); } EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), xprt, 0); xps->xps_nunique_destaddr_xprts--; rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap) { struct rpc_xprt_switch *xps; bool ret; rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); ret = rpc_xprt_switch_has_addr(xps, sap); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static void rpc_show_header(void) { printk(KERN_INFO "-pid- flgs status -client- --rqstp- " "-timeout ---ops--\n"); } static void rpc_show_task(const struct rpc_clnt *clnt, const struct rpc_task *task) { const char *rpc_waitq = "none"; if (RPC_IS_QUEUED(task)) rpc_waitq = rpc_qname(task->tk_waitqueue); printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", task->tk_pid, task->tk_flags, task->tk_status, clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), task->tk_action, rpc_waitq); } void rpc_show_tasks(struct net *net) { struct rpc_clnt *clnt; struct rpc_task *task; int header = 0; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!header) { rpc_show_header(); header++; } rpc_show_task(clnt, task); } spin_unlock(&clnt->cl_lock); } spin_unlock(&sn->rpc_client_lock); } #endif #if IS_ENABLED(CONFIG_SUNRPC_SWAP) static int rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *dummy) { return xprt_enable_swap(xprt); } int rpc_clnt_swap_activate(struct rpc_clnt *clnt) { while (clnt != clnt->cl_parent) clnt = clnt->cl_parent; if (atomic_inc_return(&clnt->cl_swapper) == 1) return rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_activate_callback, NULL); return 0; } EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); static int rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *dummy) { xprt_disable_swap(xprt); return 0; } void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) { while (clnt != clnt->cl_parent) clnt = clnt->cl_parent; if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_deactivate_callback, NULL); } EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); #endif /* CONFIG_SUNRPC_SWAP */
27 862 756 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 /* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/xattr.h Extended attributes handling. Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org> Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> */ #ifndef _LINUX_XATTR_H #define _LINUX_XATTR_H #include <linux/slab.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/user_namespace.h> #include <uapi/linux/xattr.h> /* List of all open_how "versions". */ #define XATTR_ARGS_SIZE_VER0 16 /* sizeof first published struct */ #define XATTR_ARGS_SIZE_LATEST XATTR_ARGS_SIZE_VER0 struct inode; struct dentry; static inline bool is_posix_acl_xattr(const char *name) { return (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0); } /* * struct xattr_handler: When @name is set, match attributes with exactly that * name. When @prefix is set instead, match attributes with that prefix and * with a non-empty suffix. */ struct xattr_handler { const char *name; const char *prefix; int flags; /* fs private flags */ bool (*list)(struct dentry *dentry); int (*get)(const struct xattr_handler *, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); int (*set)(const struct xattr_handler *, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags); }; /** * xattr_handler_can_list - check whether xattr can be listed * @handler: handler for this type of xattr * @dentry: dentry whose inode xattr to list * * Determine whether the xattr associated with @dentry can be listed given * @handler. * * Return: true if xattr can be listed, false if not. */ static inline bool xattr_handler_can_list(const struct xattr_handler *handler, struct dentry *dentry) { return handler && (!handler->list || handler->list(dentry)); } const char *xattr_full_name(const struct xattr_handler *, const char *); struct xattr { const char *name; void *value; size_t value_len; }; ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t); ssize_t vfs_getxattr(struct mnt_idmap *, struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int); int __vfs_setxattr_locked(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *); int __vfs_removexattr_locked(struct mnt_idmap *, struct dentry *, const char *, struct inode **); int vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); int vfs_getxattr_alloc(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); int xattr_supports_user_prefix(struct inode *inode); static inline const char *xattr_prefix(const struct xattr_handler *handler) { return handler->prefix ?: handler->name; } struct simple_xattrs { struct rb_root rb_root; rwlock_t lock; }; struct simple_xattr { struct rb_node rb_node; char *name; size_t size; char value[]; }; void simple_xattrs_init(struct simple_xattrs *xattrs); void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space); size_t simple_xattr_space(const char *name, size_t size); struct simple_xattr *simple_xattr_alloc(const void *value, size_t size); void simple_xattr_free(struct simple_xattr *xattr); int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs, const char *name, const void *value, size_t size, int flags); ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_add(struct simple_xattrs *xattrs, struct simple_xattr *new_xattr); int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name); #endif /* _LINUX_XATTR_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MEMORY_HOTPLUG_H #define __LINUX_MEMORY_HOTPLUG_H #include <linux/mmzone.h> #include <linux/spinlock.h> #include <linux/notifier.h> #include <linux/bug.h> struct page; struct zone; struct pglist_data; struct mem_section; struct memory_group; struct resource; struct vmem_altmap; struct dev_pagemap; #ifdef CONFIG_MEMORY_HOTPLUG struct page *pfn_to_online_page(unsigned long pfn); /* Types for control the zone type of onlined and offlined memory */ enum { /* Offline the memory. */ MMOP_OFFLINE = 0, /* Online the memory. Zone depends, see default_zone_for_pfn(). */ MMOP_ONLINE, /* Online the memory to ZONE_NORMAL. */ MMOP_ONLINE_KERNEL, /* Online the memory to ZONE_MOVABLE. */ MMOP_ONLINE_MOVABLE, }; /* Flags for add_memory() and friends to specify memory hotplug details. */ typedef int __bitwise mhp_t; /* No special request */ #define MHP_NONE ((__force mhp_t)0) /* * Allow merging of the added System RAM resource with adjacent, * mergeable resources. After a successful call to add_memory_resource() * with this flag set, the resource pointer must no longer be used as it * might be stale, or the resource might have changed. */ #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) /* * We want memmap (struct page array) to be self contained. * To do so, we will use the beginning of the hot-added range to build * the page tables for the memmap array that describes the entire range. * Only selected architectures support it with SPARSE_VMEMMAP. * This is only a hint, the core kernel can decide to not do this based on * different alignment checks. */ #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) /* * The nid field specifies a memory group id (mgid) instead. The memory group * implies the node id (nid). */ #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) /* * The hotplugged memory is completely inaccessible while the memory is * offline. The memory provider will handle MEM_PREPARE_ONLINE / * MEM_FINISH_OFFLINE notifications and make the memory accessible. * * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY, * because the altmap cannot be written (e.g., poisoned) when adding * memory -- before it is set online. * * This allows for adding memory with an altmap that is not currently * made available by a hypervisor. When onlining that memory, the * hypervisor can be instructed to make that memory available, and * the onlining phase will not require any memory allocations, which is * helpful in low-memory situations. */ #define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3)) /* * Extended parameters for memory hotplug: * altmap: alternative allocator for memmap array (optional) * pgprot: page protection flags to apply to newly created page tables * (required) */ struct mhp_params { struct vmem_altmap *altmap; pgprot_t pgprot; struct dev_pagemap *pgmap; }; bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); struct range mhp_get_pluggable_range(bool need_mapping); bool mhp_supports_memmap_on_memory(void); /* * Zone resizing functions * * Note: any attempt to resize a zone should has pgdat_resize_lock() * zone_span_writelock() both held. This ensure the size of a zone * can't be changed while pgdat_resize_lock() held. */ static inline unsigned zone_span_seqbegin(struct zone *zone) { return read_seqbegin(&zone->span_seqlock); } static inline int zone_span_seqretry(struct zone *zone, unsigned iv) { return read_seqretry(&zone->span_seqlock, iv); } static inline void zone_span_writelock(struct zone *zone) { write_seqlock(&zone->span_seqlock); } static inline void zone_span_writeunlock(struct zone *zone) { write_sequnlock(&zone->span_seqlock); } static inline void zone_seqlock_init(struct zone *zone) { seqlock_init(&zone->span_seqlock); } extern void adjust_present_page_count(struct page *page, struct memory_group *group, long nr_pages); /* VM interface that may be used by firmware interface */ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, struct zone *zone, bool mhp_off_inaccessible); extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group); extern unsigned long __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn); typedef void (*online_page_callback_t)(struct page *page, unsigned int order); extern void generic_online_page(struct page *page, unsigned int order); extern int set_online_page_callback(online_page_callback_t callback); extern int restore_online_page_callback(online_page_callback_t callback); extern int try_online_node(int nid); extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params); extern u64 max_mem_size; extern int mhp_online_type_from_str(const char *str); /* Default online_type (MMOP_*) when new memory blocks are added. */ extern int mhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) { return movable_node_enabled; } extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_params *params); #ifndef CONFIG_ARCH_HAS_ADD_PAGES static inline int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_params *params) { return __add_pages(nid, start_pfn, nr_pages, params); } #else /* ARCH_HAS_ADD_PAGES */ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_params *params); #endif /* ARCH_HAS_ADD_PAGES */ void get_online_mems(void); void put_online_mems(void); void mem_hotplug_begin(void); void mem_hotplug_done(void); /* See kswapd_is_running() */ static inline void pgdat_kswapd_lock(pg_data_t *pgdat) { mutex_lock(&pgdat->kswapd_lock); } static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) { mutex_unlock(&pgdat->kswapd_lock); } static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) { mutex_init(&pgdat->kswapd_lock); } #else /* ! CONFIG_MEMORY_HOTPLUG */ #define pfn_to_online_page(pfn) \ ({ \ struct page *___page = NULL; \ if (pfn_valid(pfn)) \ ___page = pfn_to_page(pfn); \ ___page; \ }) static inline unsigned zone_span_seqbegin(struct zone *zone) { return 0; } static inline int zone_span_seqretry(struct zone *zone, unsigned iv) { return 0; } static inline void zone_span_writelock(struct zone *zone) {} static inline void zone_span_writeunlock(struct zone *zone) {} static inline void zone_seqlock_init(struct zone *zone) {} static inline int try_online_node(int nid) { return 0; } static inline void get_online_mems(void) {} static inline void put_online_mems(void) {} static inline void mem_hotplug_begin(void) {} static inline void mem_hotplug_done(void) {} static inline bool movable_node_is_enabled(void) { return false; } static inline bool mhp_supports_memmap_on_memory(void) { return false; } static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} #endif /* ! CONFIG_MEMORY_HOTPLUG */ /* * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some * platforms might override and use arch_get_mappable_range() * for internal non memory hotplug purposes. */ struct range arch_get_mappable_range(void); #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * pgdat resizing functions */ static inline void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) { spin_lock_irqsave(&pgdat->node_size_lock, *flags); } static inline void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) { spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); } static inline void pgdat_resize_init(struct pglist_data *pgdat) { spin_lock_init(&pgdat->node_size_lock); } #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ /* * Stub functions for when hotplug is off */ static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} static inline void pgdat_resize_init(struct pglist_data *pgdat) {} #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ #ifdef CONFIG_MEMORY_HOTREMOVE extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group); extern int remove_memory(u64 start, u64 size); extern void __remove_memory(u64 start, u64 size); extern int offline_and_remove_memory(u64 start, u64 size); #else static inline void try_offline_node(int nid) {} static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group) { return -EINVAL; } static inline int remove_memory(u64 start, u64 size) { return -EBUSY; } static inline void __remove_memory(u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_MEMORY_HOTPLUG extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory_resource(int nid, struct resource *resource, mhp_t mhp_flags); extern int add_memory_driver_managed(int nid, u64 start, u64 size, const char *resource_name, mhp_t mhp_flags); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap, int migratetype); extern void remove_pfn_range_from_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); extern int sparse_add_section(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); extern struct zone *zone_for_pfn_range(int online_type, int nid, struct memory_group *group, unsigned long start_pfn, unsigned long nr_pages); extern int arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params); void arch_remove_linear_mapping(u64 start, u64 size); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __LINUX_MEMORY_HOTPLUG_H */
2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 // SPDX-License-Identifier: GPL-2.0 // // Register cache access API // // Copyright 2011 Wolfson Microelectronics plc // // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> #include <linux/bsearch.h> #include <linux/device.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/sort.h> #include "trace.h" #include "internal.h" static const struct regcache_ops *cache_types[] = { &regcache_rbtree_ops, &regcache_maple_ops, &regcache_flat_ops, }; static int regcache_hw_init(struct regmap *map) { int i, j; int ret; int count; unsigned int reg, val; void *tmp_buf; if (!map->num_reg_defaults_raw) return -EINVAL; /* calculate the size of reg_defaults */ for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) if (regmap_readable(map, i * map->reg_stride) && !regmap_volatile(map, i * map->reg_stride)) count++; /* all registers are unreadable or volatile, so just bypass */ if (!count) { map->cache_bypass = true; return 0; } map->num_reg_defaults = count; map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default), GFP_KERNEL); if (!map->reg_defaults) return -ENOMEM; if (!map->reg_defaults_raw) { bool cache_bypass = map->cache_bypass; dev_warn(map->dev, "No cache defaults, reading back from HW\n"); /* Bypass the cache access till data read from HW */ map->cache_bypass = true; tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); if (!tmp_buf) { ret = -ENOMEM; goto err_free; } ret = regmap_raw_read(map, 0, tmp_buf, map->cache_size_raw); map->cache_bypass = cache_bypass; if (ret == 0) { map->reg_defaults_raw = tmp_buf; map->cache_free = true; } else { kfree(tmp_buf); } } /* fill the reg_defaults */ for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { reg = i * map->reg_stride; if (!regmap_readable(map, reg)) continue; if (regmap_volatile(map, reg)) continue; if (map->reg_defaults_raw) { val = regcache_get_val(map, map->reg_defaults_raw, i); } else { bool cache_bypass = map->cache_bypass; map->cache_bypass = true; ret = regmap_read(map, reg, &val); map->cache_bypass = cache_bypass; if (ret != 0) { dev_err(map->dev, "Failed to read %d: %d\n", reg, ret); goto err_free; } } map->reg_defaults[j].reg = reg; map->reg_defaults[j].def = val; j++; } return 0; err_free: kfree(map->reg_defaults); return ret; } int regcache_init(struct regmap *map, const struct regmap_config *config) { int ret; int i; void *tmp_buf; if (map->cache_type == REGCACHE_NONE) { if (config->reg_defaults || config->num_reg_defaults_raw) dev_warn(map->dev, "No cache used with register defaults set!\n"); map->cache_bypass = true; return 0; } if (config->reg_defaults && !config->num_reg_defaults) { dev_err(map->dev, "Register defaults are set without the number!\n"); return -EINVAL; } if (config->num_reg_defaults && !config->reg_defaults) { dev_err(map->dev, "Register defaults number are set without the reg!\n"); return -EINVAL; } for (i = 0; i < config->num_reg_defaults; i++) if (config->reg_defaults[i].reg % map->reg_stride) return -EINVAL; for (i = 0; i < ARRAY_SIZE(cache_types); i++) if (cache_types[i]->type == map->cache_type) break; if (i == ARRAY_SIZE(cache_types)) { dev_err(map->dev, "Could not match cache type: %d\n", map->cache_type); return -EINVAL; } map->num_reg_defaults = config->num_reg_defaults; map->num_reg_defaults_raw = config->num_reg_defaults_raw; map->reg_defaults_raw = config->reg_defaults_raw; map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; map->cache = NULL; map->cache_ops = cache_types[i]; if (!map->cache_ops->read || !map->cache_ops->write || !map->cache_ops->name) return -EINVAL; /* We still need to ensure that the reg_defaults * won't vanish from under us. We'll need to make * a copy of it. */ if (config->reg_defaults) { tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults, sizeof(*map->reg_defaults), GFP_KERNEL); if (!tmp_buf) return -ENOMEM; map->reg_defaults = tmp_buf; } else if (map->num_reg_defaults_raw) { /* Some devices such as PMICs don't have cache defaults, * we cope with this by reading back the HW registers and * crafting the cache defaults by hand. */ ret = regcache_hw_init(map); if (ret < 0) return ret; if (map->cache_bypass) return 0; } if (!map->max_register_is_set && map->num_reg_defaults_raw) { map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride; map->max_register_is_set = true; } if (map->cache_ops->init) { dev_dbg(map->dev, "Initializing %s cache\n", map->cache_ops->name); map->lock(map->lock_arg); ret = map->cache_ops->init(map); map->unlock(map->lock_arg); if (ret) goto err_free; } return 0; err_free: kfree(map->reg_defaults); if (map->cache_free) kfree(map->reg_defaults_raw); return ret; } void regcache_exit(struct regmap *map) { if (map->cache_type == REGCACHE_NONE) return; BUG_ON(!map->cache_ops); kfree(map->reg_defaults); if (map->cache_free) kfree(map->reg_defaults_raw); if (map->cache_ops->exit) { dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name); map->lock(map->lock_arg); map->cache_ops->exit(map); map->unlock(map->lock_arg); } } /** * regcache_read - Fetch the value of a given register from the cache. * * @map: map to configure. * @reg: The register index. * @value: The value to be returned. * * Return a negative value on failure, 0 on success. */ int regcache_read(struct regmap *map, unsigned int reg, unsigned int *value) { int ret; if (map->cache_type == REGCACHE_NONE) return -EINVAL; BUG_ON(!map->cache_ops); if (!regmap_volatile(map, reg)) { ret = map->cache_ops->read(map, reg, value); if (ret == 0) trace_regmap_reg_read_cache(map, reg, *value); return ret; } return -EINVAL; } /** * regcache_write - Set the value of a given register in the cache. * * @map: map to configure. * @reg: The register index. * @value: The new register value. * * Return a negative value on failure, 0 on success. */ int regcache_write(struct regmap *map, unsigned int reg, unsigned int value) { if (map->cache_type == REGCACHE_NONE) return 0; BUG_ON(!map->cache_ops); if (!regmap_volatile(map, reg)) return map->cache_ops->write(map, reg, value); return 0; } bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, unsigned int val) { int ret; if (!regmap_writeable(map, reg)) return false; /* If we don't know the chip just got reset, then sync everything. */ if (!map->no_sync_defaults) return true; /* Is this the hardware default? If so skip. */ ret = regcache_lookup_reg(map, reg); if (ret >= 0 && val == map->reg_defaults[ret].def) return false; return true; } static int regcache_default_sync(struct regmap *map, unsigned int min, unsigned int max) { unsigned int reg; for (reg = min; reg <= max; reg += map->reg_stride) { unsigned int val; int ret; if (regmap_volatile(map, reg) || !regmap_writeable(map, reg)) continue; ret = regcache_read(map, reg, &val); if (ret == -ENOENT) continue; if (ret) return ret; if (!regcache_reg_needs_sync(map, reg, val)) continue; map->cache_bypass = true; ret = _regmap_write(map, reg, val); map->cache_bypass = false; if (ret) { dev_err(map->dev, "Unable to sync register %#x. %d\n", reg, ret); return ret; } dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); } return 0; } static int rbtree_all(const void *key, const struct rb_node *node) { return 0; } /** * regcache_sync - Sync the register cache with the hardware. * * @map: map to configure. * * Any registers that should not be synced should be marked as * volatile. In general drivers can choose not to use the provided * syncing functionality if they so require. * * Return a negative value on failure, 0 on success. */ int regcache_sync(struct regmap *map) { int ret = 0; unsigned int i; const char *name; bool bypass; struct rb_node *node; if (WARN_ON(map->cache_type == REGCACHE_NONE)) return -EINVAL; BUG_ON(!map->cache_ops); map->lock(map->lock_arg); /* Remember the initial bypass state */ bypass = map->cache_bypass; dev_dbg(map->dev, "Syncing %s cache\n", map->cache_ops->name); name = map->cache_ops->name; trace_regcache_sync(map, name, "start"); if (!map->cache_dirty) goto out; /* Apply any patch first */ map->cache_bypass = true; for (i = 0; i < map->patch_regs; i++) { ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); if (ret != 0) { dev_err(map->dev, "Failed to write %x = %x: %d\n", map->patch[i].reg, map->patch[i].def, ret); goto out; } } map->cache_bypass = false; if (map->cache_ops->sync) ret = map->cache_ops->sync(map, 0, map->max_register); else ret = regcache_default_sync(map, 0, map->max_register); if (ret == 0) map->cache_dirty = false; out: /* Restore the bypass state */ map->cache_bypass = bypass; map->no_sync_defaults = false; /* * If we did any paging with cache bypassed and a cached * paging register then the register and cache state might * have gone out of sync, force writes of all the paging * registers. */ rb_for_each(node, NULL, &map->range_tree, rbtree_all) { struct regmap_range_node *this = rb_entry(node, struct regmap_range_node, node); /* If there's nothing in the cache there's nothing to sync */ if (regcache_read(map, this->selector_reg, &i) != 0) continue; ret = _regmap_write(map, this->selector_reg, i); if (ret != 0) { dev_err(map->dev, "Failed to write %x = %x: %d\n", this->selector_reg, i, ret); break; } } map->unlock(map->lock_arg); regmap_async_complete(map); trace_regcache_sync(map, name, "stop"); return ret; } EXPORT_SYMBOL_GPL(regcache_sync); /** * regcache_sync_region - Sync part of the register cache with the hardware. * * @map: map to sync. * @min: first register to sync * @max: last register to sync * * Write all non-default register values in the specified region to * the hardware. * * Return a negative value on failure, 0 on success. */ int regcache_sync_region(struct regmap *map, unsigned int min, unsigned int max) { int ret = 0; const char *name; bool bypass; if (WARN_ON(map->cache_type == REGCACHE_NONE)) return -EINVAL; BUG_ON(!map->cache_ops); map->lock(map->lock_arg); /* Remember the initial bypass state */ bypass = map->cache_bypass; name = map->cache_ops->name; dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); trace_regcache_sync(map, name, "start region"); if (!map->cache_dirty) goto out; map->async = true; if (map->cache_ops->sync) ret = map->cache_ops->sync(map, min, max); else ret = regcache_default_sync(map, min, max); out: /* Restore the bypass state */ map->cache_bypass = bypass; map->async = false; map->no_sync_defaults = false; map->unlock(map->lock_arg); regmap_async_complete(map); trace_regcache_sync(map, name, "stop region"); return ret; } EXPORT_SYMBOL_GPL(regcache_sync_region); /** * regcache_drop_region - Discard part of the register cache * * @map: map to operate on * @min: first register to discard * @max: last register to discard * * Discard part of the register cache. * * Return a negative value on failure, 0 on success. */ int regcache_drop_region(struct regmap *map, unsigned int min, unsigned int max) { int ret = 0; if (!map->cache_ops || !map->cache_ops->drop) return -EINVAL; map->lock(map->lock_arg); trace_regcache_drop_region(map, min, max); ret = map->cache_ops->drop(map, min, max); map->unlock(map->lock_arg); return ret; } EXPORT_SYMBOL_GPL(regcache_drop_region); /** * regcache_cache_only - Put a register map into cache only mode * * @map: map to configure * @enable: flag if changes should be written to the hardware * * When a register map is marked as cache only writes to the register * map API will only update the register cache, they will not cause * any hardware changes. This is useful for allowing portions of * drivers to act as though the device were functioning as normal when * it is disabled for power saving reasons. */ void regcache_cache_only(struct regmap *map, bool enable) { map->lock(map->lock_arg); WARN_ON(map->cache_type != REGCACHE_NONE && map->cache_bypass && enable); map->cache_only = enable; trace_regmap_cache_only(map, enable); map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_only); /** * regcache_mark_dirty - Indicate that HW registers were reset to default values * * @map: map to mark * * Inform regcache that the device has been powered down or reset, so that * on resume, regcache_sync() knows to write out all non-default values * stored in the cache. * * If this function is not called, regcache_sync() will assume that * the hardware state still matches the cache state, modulo any writes that * happened when cache_only was true. */ void regcache_mark_dirty(struct regmap *map) { map->lock(map->lock_arg); map->cache_dirty = true; map->no_sync_defaults = true; map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_mark_dirty); /** * regcache_cache_bypass - Put a register map into cache bypass mode * * @map: map to configure * @enable: flag if changes should not be written to the cache * * When a register map is marked with the cache bypass option, writes * to the register map API will only update the hardware and not * the cache directly. This is useful when syncing the cache back to * the hardware. */ void regcache_cache_bypass(struct regmap *map, bool enable) { map->lock(map->lock_arg); WARN_ON(map->cache_only && enable); map->cache_bypass = enable; trace_regmap_cache_bypass(map, enable); map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_bypass); /** * regcache_reg_cached - Check if a register is cached * * @map: map to check * @reg: register to check * * Reports if a register is cached. */ bool regcache_reg_cached(struct regmap *map, unsigned int reg) { unsigned int val; int ret; map->lock(map->lock_arg); ret = regcache_read(map, reg, &val); map->unlock(map->lock_arg); return ret == 0; } EXPORT_SYMBOL_GPL(regcache_reg_cached); void regcache_set_val(struct regmap *map, void *base, unsigned int idx, unsigned int val) { /* Use device native format if possible */ if (map->format.format_val) { map->format.format_val(base + (map->cache_word_size * idx), val, 0); return; } switch (map->cache_word_size) { case 1: { u8 *cache = base; cache[idx] = val; break; } case 2: { u16 *cache = base; cache[idx] = val; break; } case 4: { u32 *cache = base; cache[idx] = val; break; } default: BUG(); } } unsigned int regcache_get_val(struct regmap *map, const void *base, unsigned int idx) { if (!base) return -EINVAL; /* Use device native format if possible */ if (map->format.parse_val) return map->format.parse_val(regcache_get_val_addr(map, base, idx)); switch (map->cache_word_size) { case 1: { const u8 *cache = base; return cache[idx]; } case 2: { const u16 *cache = base; return cache[idx]; } case 4: { const u32 *cache = base; return cache[idx]; } default: BUG(); } /* unreachable */ return -1; } static int regcache_default_cmp(const void *a, const void *b) { const struct reg_default *_a = a; const struct reg_default *_b = b; return _a->reg - _b->reg; } int regcache_lookup_reg(struct regmap *map, unsigned int reg) { struct reg_default key; struct reg_default *r; key.reg = reg; key.def = 0; r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, sizeof(struct reg_default), regcache_default_cmp); if (r) return r - map->reg_defaults; else return -ENOENT; } static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) { if (!cache_present) return true; return test_bit(idx, cache_present); } int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val) { int ret; if (!regcache_reg_needs_sync(map, reg, val)) return 0; map->cache_bypass = true; ret = _regmap_write(map, reg, val); map->cache_bypass = false; if (ret != 0) { dev_err(map->dev, "Unable to sync register %#x. %d\n", reg, ret); return ret; } dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); return 0; } static int regcache_sync_block_single(struct regmap *map, void *block, unsigned long *cache_present, unsigned int block_base, unsigned int start, unsigned int end) { unsigned int i, regtmp, val; int ret; for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); if (!regcache_reg_present(cache_present, i) || !regmap_writeable(map, regtmp)) continue; val = regcache_get_val(map, block, i); ret = regcache_sync_val(map, regtmp, val); if (ret != 0) return ret; } return 0; } static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, unsigned int base, unsigned int cur) { size_t val_bytes = map->format.val_bytes; int ret, count; if (*data == NULL) return 0; count = (cur - base) / map->reg_stride; dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", count * val_bytes, count, base, cur - map->reg_stride); map->cache_bypass = true; ret = _regmap_raw_write(map, base, *data, count * val_bytes, false); if (ret) dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", base, cur - map->reg_stride, ret); map->cache_bypass = false; *data = NULL; return ret; } static int regcache_sync_block_raw(struct regmap *map, void *block, unsigned long *cache_present, unsigned int block_base, unsigned int start, unsigned int end) { unsigned int i, val; unsigned int regtmp = 0; unsigned int base = 0; const void *data = NULL; int ret; for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); if (!regcache_reg_present(cache_present, i) || !regmap_writeable(map, regtmp)) { ret = regcache_sync_block_raw_flush(map, &data, base, regtmp); if (ret != 0) return ret; continue; } val = regcache_get_val(map, block, i); if (!regcache_reg_needs_sync(map, regtmp, val)) { ret = regcache_sync_block_raw_flush(map, &data, base, regtmp); if (ret != 0) return ret; continue; } if (!data) { data = regcache_get_val_addr(map, block, i); base = regtmp; } } return regcache_sync_block_raw_flush(map, &data, base, regtmp + map->reg_stride); } int regcache_sync_block(struct regmap *map, void *block, unsigned long *cache_present, unsigned int block_base, unsigned int start, unsigned int end) { if (regmap_can_raw_write(map) && !map->use_single_write) return regcache_sync_block_raw(map, block, cache_present, block_base, start, end); else return regcache_sync_block_single(map, block, cache_present, block_base, start, end); }
9 2 2 9 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/spinlock.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/init.h> static LIST_HEAD(ax25_dev_list); DEFINE_SPINLOCK(ax25_dev_lock); ax25_dev *ax25_addr_ax25dev(ax25_address *addr) { ax25_dev *ax25_dev, *res = NULL; spin_lock_bh(&ax25_dev_lock); list_for_each_entry(ax25_dev, &ax25_dev_list, list) if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) { res = ax25_dev; ax25_dev_hold(ax25_dev); break; } spin_unlock_bh(&ax25_dev_lock); return res; } /* * This is called when an interface is brought up. These are * reasonable defaults. */ void ax25_dev_device_up(struct net_device *dev) { ax25_dev *ax25_dev; ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_KERNEL); if (!ax25_dev) { printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); return; } refcount_set(&ax25_dev->refcount, 1); ax25_dev->dev = dev; netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL); ax25_dev->forward = NULL; ax25_dev->device_up = true; ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE; ax25_dev->values[AX25_VALUES_AXDEFMODE] = AX25_DEF_AXDEFMODE; ax25_dev->values[AX25_VALUES_BACKOFF] = AX25_DEF_BACKOFF; ax25_dev->values[AX25_VALUES_CONMODE] = AX25_DEF_CONMODE; ax25_dev->values[AX25_VALUES_WINDOW] = AX25_DEF_WINDOW; ax25_dev->values[AX25_VALUES_EWINDOW] = AX25_DEF_EWINDOW; ax25_dev->values[AX25_VALUES_T1] = AX25_DEF_T1; ax25_dev->values[AX25_VALUES_T2] = AX25_DEF_T2; ax25_dev->values[AX25_VALUES_T3] = AX25_DEF_T3; ax25_dev->values[AX25_VALUES_IDLE] = AX25_DEF_IDLE; ax25_dev->values[AX25_VALUES_N2] = AX25_DEF_N2; ax25_dev->values[AX25_VALUES_PACLEN] = AX25_DEF_PACLEN; ax25_dev->values[AX25_VALUES_PROTOCOL] = AX25_DEF_PROTOCOL; #ifdef CONFIG_AX25_DAMA_SLAVE ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT; #endif #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_ds_setup_timer(ax25_dev); #endif spin_lock_bh(&ax25_dev_lock); list_add(&ax25_dev->list, &ax25_dev_list); dev->ax25_ptr = ax25_dev; spin_unlock_bh(&ax25_dev_lock); ax25_register_dev_sysctl(ax25_dev); } void ax25_dev_device_down(struct net_device *dev) { ax25_dev *s, *ax25_dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; ax25_unregister_dev_sysctl(ax25_dev); spin_lock_bh(&ax25_dev_lock); #ifdef CONFIG_AX25_DAMA_SLAVE timer_shutdown_sync(&ax25_dev->dama.slave_timer); #endif /* * Remove any packet forwarding that points to this device. */ list_for_each_entry(s, &ax25_dev_list, list) if (s->forward == dev) s->forward = NULL; list_for_each_entry(s, &ax25_dev_list, list) { if (s == ax25_dev) { list_del(&s->list); break; } } dev->ax25_ptr = NULL; spin_unlock_bh(&ax25_dev_lock); netdev_put(dev, &ax25_dev->dev_tracker); ax25_dev_put(ax25_dev); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) { ax25_dev *ax25_dev, *fwd_dev; if ((ax25_dev = ax25_addr_ax25dev(&fwd->port_from)) == NULL) return -EINVAL; switch (cmd) { case SIOCAX25ADDFWD: fwd_dev = ax25_addr_ax25dev(&fwd->port_to); if (!fwd_dev) { ax25_dev_put(ax25_dev); return -EINVAL; } if (ax25_dev->forward) { ax25_dev_put(fwd_dev); ax25_dev_put(ax25_dev); return -EINVAL; } ax25_dev->forward = fwd_dev->dev; ax25_dev_put(fwd_dev); ax25_dev_put(ax25_dev); break; case SIOCAX25DELFWD: if (!ax25_dev->forward) { ax25_dev_put(ax25_dev); return -EINVAL; } ax25_dev->forward = NULL; ax25_dev_put(ax25_dev); break; default: ax25_dev_put(ax25_dev); return -EINVAL; } return 0; } struct net_device *ax25_fwd_dev(struct net_device *dev) { ax25_dev *ax25_dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return dev; if (ax25_dev->forward == NULL) return dev; return ax25_dev->forward; } /* * Free all memory associated with device structures. */ void __exit ax25_dev_free(void) { ax25_dev *s, *n; spin_lock_bh(&ax25_dev_lock); list_for_each_entry_safe(s, n, &ax25_dev_list, list) { netdev_put(s->dev, &s->dev_tracker); list_del(&s->list); ax25_dev_put(s); } spin_unlock_bh(&ax25_dev_lock); }
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 // SPDX-License-Identifier: GPL-2.0 or MIT /* * Copyright 2018 Noralf Trønnes */ #include <linux/iosys-map.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <drm/drm_client.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_mode.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" #include "drm_internal.h" /** * DOC: overview * * This library provides support for clients running in the kernel like fbdev and bootsplash. * * GEM drivers which provide a GEM based dumb buffer with a virtual address are supported. */ static int drm_client_open(struct drm_client_dev *client) { struct drm_device *dev = client->dev; struct drm_file *file; file = drm_file_alloc(dev->primary); if (IS_ERR(file)) return PTR_ERR(file); mutex_lock(&dev->filelist_mutex); list_add(&file->lhead, &dev->filelist_internal); mutex_unlock(&dev->filelist_mutex); client->file = file; return 0; } static void drm_client_close(struct drm_client_dev *client) { struct drm_device *dev = client->dev; mutex_lock(&dev->filelist_mutex); list_del(&client->file->lhead); mutex_unlock(&dev->filelist_mutex); drm_file_free(client->file); } /** * drm_client_init - Initialise a DRM client * @dev: DRM device * @client: DRM client * @name: Client name * @funcs: DRM client functions (optional) * * This initialises the client and opens a &drm_file. * Use drm_client_register() to complete the process. * The caller needs to hold a reference on @dev before calling this function. * The client is freed when the &drm_device is unregistered. See drm_client_release(). * * Returns: * Zero on success or negative error code on failure. */ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, const char *name, const struct drm_client_funcs *funcs) { int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create) return -EOPNOTSUPP; client->dev = dev; client->name = name; client->funcs = funcs; ret = drm_client_modeset_create(client); if (ret) return ret; ret = drm_client_open(client); if (ret) goto err_free; drm_dev_get(dev); return 0; err_free: drm_client_modeset_free(client); return ret; } EXPORT_SYMBOL(drm_client_init); /** * drm_client_register - Register client * @client: DRM client * * Add the client to the &drm_device client list to activate its callbacks. * @client must be initialized by a call to drm_client_init(). After * drm_client_register() it is no longer permissible to call drm_client_release() * directly (outside the unregister callback), instead cleanup will happen * automatically on driver unload. * * Registering a client generates a hotplug event that allows the client * to set up its display from pre-existing outputs. The client must have * initialized its state to able to handle the hotplug event successfully. */ void drm_client_register(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret; mutex_lock(&dev->clientlist_mutex); list_add(&client->list, &dev->clientlist); if (client->funcs && client->funcs->hotplug) { /* * Perform an initial hotplug event to pick up the * display configuration for the client. This step * has to be performed *after* registering the client * in the list of clients, or a concurrent hotplug * event might be lost; leaving the display off. * * Hold the clientlist_mutex as for a regular hotplug * event. */ ret = client->funcs->hotplug(client); if (ret) drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); } mutex_unlock(&dev->clientlist_mutex); } EXPORT_SYMBOL(drm_client_register); /** * drm_client_release - Release DRM client resources * @client: DRM client * * Releases resources by closing the &drm_file that was opened by drm_client_init(). * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set. * * This function should only be called from the unregister callback. An exception * is fbdev which cannot free the buffer if userspace has open file descriptors. * * Note: * Clients cannot initiate a release by themselves. This is done to keep the code simple. * The driver has to be unloaded before the client can be unloaded. */ void drm_client_release(struct drm_client_dev *client) { struct drm_device *dev = client->dev; drm_dbg_kms(dev, "%s\n", client->name); drm_client_modeset_free(client); drm_client_close(client); drm_dev_put(dev); } EXPORT_SYMBOL(drm_client_release); static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { if (buffer->gem) { drm_gem_vunmap_unlocked(buffer->gem, &buffer->map); drm_gem_object_put(buffer->gem); } kfree(buffer); } static struct drm_client_buffer * drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format, u32 *handle) { const struct drm_format_info *info = drm_format_info(format); struct drm_mode_create_dumb dumb_args = { }; struct drm_device *dev = client->dev; struct drm_client_buffer *buffer; struct drm_gem_object *obj; int ret; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); buffer->client = client; dumb_args.width = width; dumb_args.height = height; dumb_args.bpp = drm_format_info_bpp(info, 0); ret = drm_mode_create_dumb(dev, &dumb_args, client->file); if (ret) goto err_delete; obj = drm_gem_object_lookup(client->file, dumb_args.handle); if (!obj) { ret = -ENOENT; goto err_delete; } buffer->pitch = dumb_args.pitch; buffer->gem = obj; *handle = dumb_args.handle; return buffer; err_delete: drm_client_buffer_delete(buffer); return ERR_PTR(ret); } /** * drm_client_buffer_vmap_local - Map DRM client buffer into address space * @buffer: DRM client buffer * @map_copy: Returns the mapped memory's address * * This function maps a client buffer into kernel address space. If the * buffer is already mapped, it returns the existing mapping's address. * * Client buffer mappings are not ref'counted. Each call to * drm_client_buffer_vmap_local() should be closely followed by a call to * drm_client_buffer_vunmap_local(). See drm_client_buffer_vmap() for * long-term mappings. * * The returned address is a copy of the internal value. In contrast to * other vmap interfaces, you don't need it for the client's vunmap * function. So you can modify it at will during blit and draw operations. * * Returns: * 0 on success, or a negative errno code otherwise. */ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; int ret; drm_gem_lock(gem); ret = drm_gem_vmap(gem, map); if (ret) goto err_drm_gem_vmap_unlocked; *map_copy = *map; return 0; err_drm_gem_vmap_unlocked: drm_gem_unlock(gem); return ret; } EXPORT_SYMBOL(drm_client_buffer_vmap_local); /** * drm_client_buffer_vunmap_local - Unmap DRM client buffer * @buffer: DRM client buffer * * This function removes a client buffer's memory mapping established * with drm_client_buffer_vunmap_local(). Calling this function is only * required by clients that manage their buffer mappings by themselves. */ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer) { struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; drm_gem_vunmap(gem, map); drm_gem_unlock(gem); } EXPORT_SYMBOL(drm_client_buffer_vunmap_local); /** * drm_client_buffer_vmap - Map DRM client buffer into address space * @buffer: DRM client buffer * @map_copy: Returns the mapped memory's address * * This function maps a client buffer into kernel address space. If the * buffer is already mapped, it returns the existing mapping's address. * * Client buffer mappings are not ref'counted. Each call to * drm_client_buffer_vmap() should be followed by a call to * drm_client_buffer_vunmap(); or the client buffer should be mapped * throughout its lifetime. * * The returned address is a copy of the internal value. In contrast to * other vmap interfaces, you don't need it for the client's vunmap * function. So you can modify it at will during blit and draw operations. * * Returns: * 0 on success, or a negative errno code otherwise. */ int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; int ret; drm_gem_lock(gem); ret = drm_gem_pin_locked(gem); if (ret) goto err_drm_gem_pin_locked; ret = drm_gem_vmap(gem, map); if (ret) goto err_drm_gem_vmap; drm_gem_unlock(gem); *map_copy = *map; return 0; err_drm_gem_vmap: drm_gem_unpin_locked(buffer->gem); err_drm_gem_pin_locked: drm_gem_unlock(gem); return ret; } EXPORT_SYMBOL(drm_client_buffer_vmap); /** * drm_client_buffer_vunmap - Unmap DRM client buffer * @buffer: DRM client buffer * * This function removes a client buffer's memory mapping. Calling this * function is only required by clients that manage their buffer mappings * by themselves. */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; drm_gem_lock(gem); drm_gem_vunmap(gem, map); drm_gem_unpin_locked(gem); drm_gem_unlock(gem); } EXPORT_SYMBOL(drm_client_buffer_vunmap); static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) { int ret; if (!buffer->fb) return; ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); if (ret) drm_err(buffer->client->dev, "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); buffer->fb = NULL; } static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, u32 width, u32 height, u32 format, u32 handle) { struct drm_client_dev *client = buffer->client; struct drm_mode_fb_cmd2 fb_req = { }; int ret; fb_req.width = width; fb_req.height = height; fb_req.pixel_format = format; fb_req.handles[0] = handle; fb_req.pitches[0] = buffer->pitch; ret = drm_mode_addfb2(client->dev, &fb_req, client->file); if (ret) return ret; buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id); if (WARN_ON(!buffer->fb)) return -ENOENT; /* drop the reference we picked up in framebuffer lookup */ drm_framebuffer_put(buffer->fb); strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN); return 0; } /** * drm_client_framebuffer_create - Create a client framebuffer * @client: DRM client * @width: Framebuffer width * @height: Framebuffer height * @format: Buffer format * * This function creates a &drm_client_buffer which consists of a * &drm_framebuffer backed by a dumb buffer. * Call drm_client_framebuffer_delete() to free the buffer. * * Returns: * Pointer to a client buffer or an error pointer on failure. */ struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format) { struct drm_client_buffer *buffer; u32 handle; int ret; buffer = drm_client_buffer_create(client, width, height, format, &handle); if (IS_ERR(buffer)) return buffer; ret = drm_client_buffer_addfb(buffer, width, height, format, handle); /* * The handle is only needed for creating the framebuffer, destroy it * again to solve a circular dependency should anybody export the GEM * object as DMA-buf. The framebuffer and our buffer structure are still * holding references to the GEM object to prevent its destruction. */ drm_mode_destroy_dumb(client->dev, handle, client->file); if (ret) { drm_client_buffer_delete(buffer); return ERR_PTR(ret); } return buffer; } EXPORT_SYMBOL(drm_client_framebuffer_create); /** * drm_client_framebuffer_delete - Delete a client framebuffer * @buffer: DRM client buffer (can be NULL) */ void drm_client_framebuffer_delete(struct drm_client_buffer *buffer) { if (!buffer) return; drm_client_buffer_rmfb(buffer); drm_client_buffer_delete(buffer); } EXPORT_SYMBOL(drm_client_framebuffer_delete); /** * drm_client_framebuffer_flush - Manually flush client framebuffer * @buffer: DRM client buffer (can be NULL) * @rect: Damage rectangle (if NULL flushes all) * * This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes * for drivers that need it. * * Returns: * Zero on success or negative error code on failure. */ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) { if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty) return 0; if (rect) { struct drm_clip_rect clip = { .x1 = rect->x1, .y1 = rect->y1, .x2 = rect->x2, .y2 = rect->y2, }; return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file, 0, 0, &clip, 1); } return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file, 0, 0, NULL, 0); } EXPORT_SYMBOL(drm_client_framebuffer_flush);
7 7 7 7 3 7 7 7 5 5 5 5 5 5 2 2 2 2 2 2 2 2 2 2 2 2 11 11 11 1 11 6 4 1 9 9 2 11 11 11 11 11 2 2 2 1 2 11 6 6 11 11 11 11 11 11 11 11 11 11 11 5 11 11 11 18 18 18 18 18 2 2 2 2 2 2 2 2 18 18 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 // SPDX-License-Identifier: GPL-2.0 #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/wext.h> #include <net/hotdata.h> #include "dev.h" static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos) { unsigned long ifindex = *pos; struct net_device *dev; for_each_netdev_dump(seq_file_net(seq), dev, ifindex) { *pos = dev->ifindex; return dev; } return NULL; } static void *dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); if (!*pos) return SEQ_START_TOKEN; return dev_seq_from_index(seq, pos); } static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return dev_seq_from_index(seq, pos); } static void dev_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", dev->name, stats->rx_bytes, stats->rx_packets, stats->rx_errors, stats->rx_dropped + stats->rx_missed_errors, stats->rx_fifo_errors, stats->rx_length_errors + stats->rx_over_errors + stats->rx_crc_errors + stats->rx_frame_errors, stats->rx_compressed, stats->multicast, stats->tx_bytes, stats->tx_packets, stats->tx_errors, stats->tx_dropped, stats->tx_fifo_errors, stats->collisions, stats->tx_carrier_errors + stats->tx_aborted_errors + stats->tx_window_errors + stats->tx_heartbeat_errors, stats->tx_compressed); } /* * Called from the PROCfs module. This now uses the new arbitrary sized * /proc/net interface to create /proc/net/dev */ static int dev_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Inter-| Receive " " | Transmit\n" " face |bytes packets errs drop fifo frame " "compressed multicast|bytes packets errs " "drop fifo colls carrier compressed\n"); else dev_seq_printf_stats(seq, v); return 0; } static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) { return skb_queue_len_lockless(&sd->input_pkt_queue); } static u32 softnet_process_queue_len(struct softnet_data *sd) { return skb_queue_len_lockless(&sd->process_queue); } static struct softnet_data *softnet_get_online(loff_t *pos) { struct softnet_data *sd = NULL; while (*pos < nr_cpu_ids) if (cpu_online(*pos)) { sd = &per_cpu(softnet_data, *pos); break; } else ++*pos; return sd; } static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) { return softnet_get_online(pos); } static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return softnet_get_online(pos); } static void softnet_seq_stop(struct seq_file *seq, void *v) { } static int softnet_seq_show(struct seq_file *seq, void *v) { struct softnet_data *sd = v; u32 input_qlen = softnet_input_pkt_queue_len(sd); u32 process_qlen = softnet_process_queue_len(sd); unsigned int flow_limit_count = 0; #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit *fl; rcu_read_lock(); fl = rcu_dereference(sd->flow_limit); if (fl) flow_limit_count = fl->count; rcu_read_unlock(); #endif /* the index is the CPU id owing this sd. Since offline CPUs are not * displayed, it would be othrwise not trivial for the user-space * mapping the data a specific CPU */ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " "%08x %08x\n", sd->processed, atomic_read(&sd->dropped), sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ 0, /* was cpu_collision */ sd->received_rps, flow_limit_count, input_qlen + process_qlen, (int)seq->index, input_qlen, process_qlen); return 0; } static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = dev_seq_show, }; static const struct seq_operations softnet_seq_ops = { .start = softnet_seq_start, .next = softnet_seq_next, .stop = softnet_seq_stop, .show = softnet_seq_show, }; static void *ptype_get_idx(struct seq_file *seq, loff_t pos) { struct list_head *ptype_list = NULL; struct packet_type *pt = NULL; struct net_device *dev; loff_t i = 0; int t; for_each_netdev_rcu(seq_file_net(seq), dev) { ptype_list = &dev->ptype_all; list_for_each_entry_rcu(pt, ptype_list, list) { if (i == pos) return pt; ++i; } } list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) { if (i == pos) return pt; ++i; } for (t = 0; t < PTYPE_HASH_SIZE; t++) { list_for_each_entry_rcu(pt, &ptype_base[t], list) { if (i == pos) return pt; ++i; } } return NULL; } static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev; struct packet_type *pt; struct list_head *nxt; int hash; ++*pos; if (v == SEQ_START_TOKEN) return ptype_get_idx(seq, 0); pt = v; nxt = pt->list.next; if (pt->dev) { if (nxt != &pt->dev->ptype_all) goto found; dev = pt->dev; for_each_netdev_continue_rcu(seq_file_net(seq), dev) { if (!list_empty(&dev->ptype_all)) { nxt = dev->ptype_all.next; goto found; } } nxt = net_hotdata.ptype_all.next; goto ptype_all; } if (pt->type == htons(ETH_P_ALL)) { ptype_all: if (nxt != &net_hotdata.ptype_all) goto found; hash = 0; nxt = ptype_base[0].next; } else hash = ntohs(pt->type) & PTYPE_HASH_MASK; while (nxt == &ptype_base[hash]) { if (++hash >= PTYPE_HASH_SIZE) return NULL; nxt = ptype_base[hash].next; } found: return list_entry(nxt, struct packet_type, list); } static void ptype_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int ptype_seq_show(struct seq_file *seq, void *v) { struct packet_type *pt = v; if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else seq_printf(seq, "%04x", ntohs(pt->type)); seq_printf(seq, " %-8s %ps\n", pt->dev ? pt->dev->name : "", pt->func); } return 0; } static const struct seq_operations ptype_seq_ops = { .start = ptype_seq_start, .next = ptype_seq_next, .stop = ptype_seq_stop, .show = ptype_seq_show, }; static int __net_init dev_proc_net_init(struct net *net) { int rc = -ENOMEM; if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops, sizeof(struct seq_net_private))) goto out; if (!proc_create_seq("softnet_stat", 0444, net->proc_net, &softnet_seq_ops)) goto out_dev; if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, sizeof(struct seq_net_private))) goto out_softnet; if (wext_proc_init(net)) goto out_ptype; rc = 0; out: return rc; out_ptype: remove_proc_entry("ptype", net->proc_net); out_softnet: remove_proc_entry("softnet_stat", net->proc_net); out_dev: remove_proc_entry("dev", net->proc_net); goto out; } static void __net_exit dev_proc_net_exit(struct net *net) { wext_proc_exit(net); remove_proc_entry("ptype", net->proc_net); remove_proc_entry("softnet_stat", net->proc_net); remove_proc_entry("dev", net->proc_net); } static struct pernet_operations __net_initdata dev_proc_ops = { .init = dev_proc_net_init, .exit = dev_proc_net_exit, }; static int dev_mc_seq_show(struct seq_file *seq, void *v) { struct netdev_hw_addr *ha; struct net_device *dev = v; if (v == SEQ_START_TOKEN) return 0; netif_addr_lock_bh(dev); netdev_for_each_mc_addr(ha, dev) { seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n", dev->ifindex, dev->name, ha->refcount, ha->global_use, (int)dev->addr_len, ha->addr); } netif_addr_unlock_bh(dev); return 0; } static const struct seq_operations dev_mc_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = dev_mc_seq_show, }; static int __net_init dev_mc_net_init(struct net *net) { if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops, sizeof(struct seq_net_private))) return -ENOMEM; return 0; } static void __net_exit dev_mc_net_exit(struct net *net) { remove_proc_entry("dev_mcast", net->proc_net); } static struct pernet_operations __net_initdata dev_mc_net_ops = { .init = dev_mc_net_init, .exit = dev_mc_net_exit, }; int __init dev_proc_init(void) { int ret = register_pernet_subsys(&dev_proc_ops); if (!ret) return register_pernet_subsys(&dev_mc_net_ops); return ret; }
7 6 6 7 7 6 2 6 1 6 1 6 6 6 5 5 3 3 2 3 1 3 3 1 3 2 3 3 7 2 3 1 2 2 2 1 2 2 2 2 2 18 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/act_skbmod.c skb data modifier * * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com> */ #include <linux/module.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/inet_ecn.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tc_wrapper.h> #include <linux/tc_act/tc_skbmod.h> #include <net/tc_act/tc_skbmod.h> static struct tc_action_ops act_skbmod_ops; TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbmod *d = to_skbmod(a); int action, max_edit_len, err; struct tcf_skbmod_params *p; u64 flags; tcf_lastuse_update(&d->tcf_tm); bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb); action = READ_ONCE(d->tcf_action); if (unlikely(action == TC_ACT_SHOT)) goto drop; max_edit_len = skb_mac_header_len(skb); p = rcu_dereference_bh(d->skbmod_p); flags = p->flags; /* tcf_skbmod_init() guarantees "flags" to be one of the following: * 1. a combination of SKBMOD_F_{DMAC,SMAC,ETYPE} * 2. SKBMOD_F_SWAPMAC * 3. SKBMOD_F_ECN * SKBMOD_F_ECN only works with IP packets; all other flags only work with Ethernet * packets. */ if (flags == SKBMOD_F_ECN) { switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): case cpu_to_be16(ETH_P_IPV6): max_edit_len += skb_network_header_len(skb); break; default: goto out; } } else if (!skb->dev || skb->dev->type != ARPHRD_ETHER) { goto out; } err = skb_ensure_writable(skb, max_edit_len); if (unlikely(err)) /* best policy is to drop on the floor */ goto drop; if (flags & SKBMOD_F_DMAC) ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst); if (flags & SKBMOD_F_SMAC) ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src); if (flags & SKBMOD_F_ETYPE) eth_hdr(skb)->h_proto = p->eth_type; if (flags & SKBMOD_F_SWAPMAC) { u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */ /*XXX: I am sure we can come up with more efficient swapping*/ ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest); ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source); ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr); } if (flags & SKBMOD_F_ECN) INET_ECN_set_ce(skb); out: return action; drop: qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); return TC_ACT_SHOT; } static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = { [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) }, [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN }, [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN }, [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 }, }; static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); bool ovr = flags & TCA_ACT_FLAGS_REPLACE; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_SKBMOD_MAX + 1]; struct tcf_skbmod_params *p, *p_old; struct tcf_chain *goto_ch = NULL; struct tc_skbmod *parm; u32 lflags = 0, index; struct tcf_skbmod *d; bool exists = false; u8 *daddr = NULL; u8 *saddr = NULL; u16 eth_type = 0; int ret = 0, err; if (!nla) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_SKBMOD_MAX, nla, skbmod_policy, NULL); if (err < 0) return err; if (!tb[TCA_SKBMOD_PARMS]) return -EINVAL; if (tb[TCA_SKBMOD_DMAC]) { daddr = nla_data(tb[TCA_SKBMOD_DMAC]); lflags |= SKBMOD_F_DMAC; } if (tb[TCA_SKBMOD_SMAC]) { saddr = nla_data(tb[TCA_SKBMOD_SMAC]); lflags |= SKBMOD_F_SMAC; } if (tb[TCA_SKBMOD_ETYPE]) { eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]); lflags |= SKBMOD_F_ETYPE; } parm = nla_data(tb[TCA_SKBMOD_PARMS]); index = parm->index; if (parm->flags & SKBMOD_F_SWAPMAC) lflags = SKBMOD_F_SWAPMAC; if (parm->flags & SKBMOD_F_ECN) lflags = SKBMOD_F_ECN; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; exists = err; if (exists && bind) return ACT_P_BOUND; if (!lflags) { if (exists) tcf_idr_release(*a, bind); else tcf_idr_cleanup(tn, index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (!ovr) { tcf_idr_release(*a, bind); return -EEXIST; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; d = to_skbmod(*a); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { err = -ENOMEM; goto put_chain; } p->flags = lflags; if (ovr) spin_lock_bh(&d->tcf_lock); /* Protected by tcf_lock if overwriting existing action. */ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); p_old = rcu_dereference_protected(d->skbmod_p, 1); if (lflags & SKBMOD_F_DMAC) ether_addr_copy(p->eth_dst, daddr); if (lflags & SKBMOD_F_SMAC) ether_addr_copy(p->eth_src, saddr); if (lflags & SKBMOD_F_ETYPE) p->eth_type = htons(eth_type); rcu_assign_pointer(d->skbmod_p, p); if (ovr) spin_unlock_bh(&d->tcf_lock); if (p_old) kfree_rcu(p_old, rcu); if (goto_ch) tcf_chain_put_by_act(goto_ch); return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } static void tcf_skbmod_cleanup(struct tc_action *a) { struct tcf_skbmod *d = to_skbmod(a); struct tcf_skbmod_params *p; p = rcu_dereference_protected(d->skbmod_p, 1); if (p) kfree_rcu(p, rcu); } static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { struct tcf_skbmod *d = to_skbmod(a); unsigned char *b = skb_tail_pointer(skb); struct tcf_skbmod_params *p; struct tc_skbmod opt; struct tcf_t t; memset(&opt, 0, sizeof(opt)); opt.index = d->tcf_index; opt.refcnt = refcount_read(&d->tcf_refcnt) - ref; opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind; spin_lock_bh(&d->tcf_lock); opt.action = d->tcf_action; p = rcu_dereference_protected(d->skbmod_p, lockdep_is_held(&d->tcf_lock)); opt.flags = p->flags; if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if ((p->flags & SKBMOD_F_DMAC) && nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst)) goto nla_put_failure; if ((p->flags & SKBMOD_F_SMAC) && nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src)) goto nla_put_failure; if ((p->flags & SKBMOD_F_ETYPE) && nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type))) goto nla_put_failure; tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD)) goto nla_put_failure; spin_unlock_bh(&d->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&d->tcf_lock); nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .id = TCA_ACT_SKBMOD, .owner = THIS_MODULE, .act = tcf_skbmod_act, .dump = tcf_skbmod_dump, .init = tcf_skbmod_init, .cleanup = tcf_skbmod_cleanup, .size = sizeof(struct tcf_skbmod), }; MODULE_ALIAS_NET_ACT("skbmod"); static __net_init int skbmod_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); return tc_action_net_init(net, tn, &act_skbmod_ops); } static void __net_exit skbmod_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_skbmod_ops.net_id); } static struct pernet_operations skbmod_net_ops = { .init = skbmod_init_net, .exit_batch = skbmod_exit_net, .id = &act_skbmod_ops.net_id, .size = sizeof(struct tc_action_net), }; MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>"); MODULE_DESCRIPTION("SKB data mod-ing"); MODULE_LICENSE("GPL"); static int __init skbmod_init_module(void) { return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops); } static void __exit skbmod_cleanup_module(void) { tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops); } module_init(skbmod_init_module); module_exit(skbmod_cleanup_module);
767 60 64 66 66 81 82 36 721 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* delayacct.h - per-task delay accounting * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 */ #ifndef _LINUX_DELAYACCT_H #define _LINUX_DELAYACCT_H #include <uapi/linux/taskstats.h> #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info { raw_spinlock_t lock; /* For each stat XXX, add following, aligned appropriately * * struct timespec XXX_start, XXX_end; * u64 XXX_delay; * u32 XXX_count; * * Atomicity of updates to XXX_delay, XXX_count protected by * single lock above (split into XXX_lock if contention is an issue). */ /* * XXX_count is incremented on every XXX operation, the delay * associated with the operation is added to XXX_delay. * XXX_delay contains the accumulated delay time in nanoseconds. */ u64 blkio_start; u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_start; u64 swapin_delay; /* wait for swapin */ u32 blkio_count; /* total count of the number of sync block */ /* io operations performed */ u32 swapin_count; /* total count of swapin */ u64 freepages_start; u64 freepages_delay; /* wait for memory reclaim */ u64 thrashing_start; u64 thrashing_delay; /* wait for thrashing page */ u64 compact_start; u64 compact_delay; /* wait for memory compact */ u64 wpcopy_start; u64 wpcopy_delay; /* wait for write-protect copy */ u64 irq_delay; /* wait for IRQ/SOFTIRQ */ u32 freepages_count; /* total count of memory reclaim */ u32 thrashing_count; /* total count of thrash waits */ u32 compact_count; /* total count of memory compact */ u32 wpcopy_count; /* total count of write-protect copy */ u32 irq_count; /* total count of IRQ/SOFTIRQ */ }; #endif #include <linux/sched.h> #include <linux/slab.h> #include <linux/jump_label.h> #ifdef CONFIG_TASK_DELAY_ACCT DECLARE_STATIC_KEY_FALSE(delayacct_key); extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); extern void __delayacct_blkio_start(void); extern void __delayacct_blkio_end(struct task_struct *); extern int delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); extern void __delayacct_freepages_end(void); extern void __delayacct_thrashing_start(bool *in_thrashing); extern void __delayacct_thrashing_end(bool *in_thrashing); extern void __delayacct_swapin_start(void); extern void __delayacct_swapin_end(void); extern void __delayacct_compact_start(void); extern void __delayacct_compact_end(void); extern void __delayacct_wpcopy_start(void); extern void __delayacct_wpcopy_end(void); extern void __delayacct_irq(struct task_struct *task, u32 delta); static inline void delayacct_tsk_init(struct task_struct *tsk) { /* reinitialize in case parent's non-null pointer was dup'ed*/ tsk->delays = NULL; if (delayacct_on) __delayacct_tsk_init(tsk); } /* Free tsk->delays. Called from bad fork and __put_task_struct * where there's no risk of tsk->delays being accessed elsewhere */ static inline void delayacct_tsk_free(struct task_struct *tsk) { if (tsk->delays) kmem_cache_free(delayacct_cache, tsk->delays); tsk->delays = NULL; } static inline void delayacct_blkio_start(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_blkio_start(); } static inline void delayacct_blkio_end(struct task_struct *p) { if (!static_branch_unlikely(&delayacct_key)) return; if (p->delays) __delayacct_blkio_end(p); } static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { if (tsk->delays) return __delayacct_blkio_ticks(tsk); return 0; } static inline void delayacct_freepages_start(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_freepages_start(); } static inline void delayacct_freepages_end(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_freepages_end(); } static inline void delayacct_thrashing_start(bool *in_thrashing) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_thrashing_start(in_thrashing); } static inline void delayacct_thrashing_end(bool *in_thrashing) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_thrashing_end(in_thrashing); } static inline void delayacct_swapin_start(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_swapin_start(); } static inline void delayacct_swapin_end(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_swapin_end(); } static inline void delayacct_compact_start(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_compact_start(); } static inline void delayacct_compact_end(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_compact_end(); } static inline void delayacct_wpcopy_start(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_wpcopy_start(); } static inline void delayacct_wpcopy_end(void) { if (!static_branch_unlikely(&delayacct_key)) return; if (current->delays) __delayacct_wpcopy_end(); } static inline void delayacct_irq(struct task_struct *task, u32 delta) { if (!static_branch_unlikely(&delayacct_key)) return; if (task->delays) __delayacct_irq(task, delta); } #else static inline void delayacct_init(void) {} static inline void delayacct_tsk_init(struct task_struct *tsk) {} static inline void delayacct_tsk_free(struct task_struct *tsk) {} static inline void delayacct_blkio_start(void) {} static inline void delayacct_blkio_end(struct task_struct *p) {} static inline int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { return 0; } static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { return 0; } static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { return 0; } static inline void delayacct_freepages_start(void) {} static inline void delayacct_freepages_end(void) {} static inline void delayacct_thrashing_start(bool *in_thrashing) {} static inline void delayacct_thrashing_end(bool *in_thrashing) {} static inline void delayacct_swapin_start(void) {} static inline void delayacct_swapin_end(void) {} static inline void delayacct_compact_start(void) {} static inline void delayacct_compact_end(void) {} static inline void delayacct_wpcopy_start(void) {} static inline void delayacct_wpcopy_end(void) {} static inline void delayacct_irq(struct task_struct *task, u32 delta) {} #endif /* CONFIG_TASK_DELAY_ACCT */ #endif
139 32 99 99 44 135 103 139 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_PORT_H #define _LINUX_TTY_PORT_H #include <linux/kfifo.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/tty_buffer.h> #include <linux/wait.h> struct attribute_group; struct tty_driver; struct tty_port; struct tty_struct; /** * struct tty_port_operations -- operations on tty_port * @carrier_raised: return true if the carrier is raised on @port * @dtr_rts: raise the DTR line if @active is true, otherwise lower DTR * @shutdown: called when the last close completes or a hangup finishes IFF the * port was initialized. Do not use to free resources. Turn off the device * only. Called under the port mutex to serialize against @activate and * @shutdown. * @activate: called under the port mutex from tty_port_open(), serialized using * the port mutex. Supposed to turn on the device. * * FIXME: long term getting the tty argument *out* of this would be good * for consoles. * * @destruct: called on the final put of a port. Free resources, possibly incl. * the port itself. */ struct tty_port_operations { bool (*carrier_raised)(struct tty_port *port); void (*dtr_rts)(struct tty_port *port, bool active); void (*shutdown)(struct tty_port *port); int (*activate)(struct tty_port *port, struct tty_struct *tty); void (*destruct)(struct tty_port *port); }; struct tty_port_client_operations { size_t (*receive_buf)(struct tty_port *port, const u8 *cp, const u8 *fp, size_t count); void (*lookahead_buf)(struct tty_port *port, const u8 *cp, const u8 *fp, size_t count); void (*write_wakeup)(struct tty_port *port); }; extern const struct tty_port_client_operations tty_port_default_client_ops; /** * struct tty_port -- port level information * * @buf: buffer for this port, locked internally * @tty: back pointer to &struct tty_struct, valid only if the tty is open. Use * tty_port_tty_get() to obtain it (and tty_kref_put() to release). * @itty: internal back pointer to &struct tty_struct. Avoid this. It should be * eliminated in the long term. * @ops: tty port operations (like activate, shutdown), see &struct * tty_port_operations * @client_ops: tty port client operations (like receive_buf, write_wakeup). * By default, tty_port_default_client_ops is used. * @lock: lock protecting @tty * @blocked_open: # of procs waiting for open in tty_port_block_til_ready() * @count: usage count * @open_wait: open waiters queue (waiting e.g. for a carrier) * @delta_msr_wait: modem status change queue (waiting for MSR changes) * @flags: user TTY flags (%ASYNC_) * @iflags: internal flags (%TTY_PORT_) * @console: when set, the port is a console * @mutex: locking, for open, shutdown and other port operations * @buf_mutex: @xmit_buf alloc lock * @xmit_buf: optional xmit buffer used by some drivers * @xmit_fifo: optional xmit buffer used by some drivers * @close_delay: delay in jiffies to wait when closing the port * @closing_wait: delay in jiffies for output to be sent before closing * @drain_delay: set to zero if no pure time based drain is needed else set to * size of fifo * @kref: references counter. Reaching zero calls @ops->destruct() if non-%NULL * or frees the port otherwise. * @client_data: pointer to private data, for @client_ops * * Each device keeps its own port level information. &struct tty_port was * introduced as a common structure for such information. As every TTY device * shall have a backing tty_port structure, every driver can use these members. * * The tty port has a different lifetime to the tty so must be kept apart. * In addition be careful as tty -> port mappings are valid for the life * of the tty object but in many cases port -> tty mappings are valid only * until a hangup so don't use the wrong path. * * Tty port shall be initialized by tty_port_init() and shut down either by * tty_port_destroy() (refcounting not used), or tty_port_put() (refcounting). * * There is a lot of helpers around &struct tty_port too. To name the most * significant ones: tty_port_open(), tty_port_close() (or * tty_port_close_start() and tty_port_close_end() separately if need be), and * tty_port_hangup(). These call @ops->activate() and @ops->shutdown() as * needed. */ struct tty_port { struct tty_bufhead buf; struct tty_struct *tty; struct tty_struct *itty; const struct tty_port_operations *ops; const struct tty_port_client_operations *client_ops; spinlock_t lock; int blocked_open; int count; wait_queue_head_t open_wait; wait_queue_head_t delta_msr_wait; unsigned long flags; unsigned long iflags; unsigned char console:1; struct mutex mutex; struct mutex buf_mutex; u8 *xmit_buf; DECLARE_KFIFO_PTR(xmit_fifo, u8); unsigned int close_delay; unsigned int closing_wait; int drain_delay; struct kref kref; void *client_data; }; /* tty_port::iflags bits -- use atomic bit ops */ #define TTY_PORT_INITIALIZED 0 /* device is initialized */ #define TTY_PORT_SUSPENDED 1 /* device is suspended */ #define TTY_PORT_ACTIVE 2 /* device is open */ /* * uart drivers: use the uart_port::status field and the UPSTAT_* defines * for s/w-based flow control steering and carrier detection status */ #define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ #define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ #define TTY_PORT_KOPENED 5 /* device exclusively opened by kernel */ void tty_port_init(struct tty_port *port); void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, unsigned index); struct device *tty_port_register_device(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device); struct device *tty_port_register_device_attr(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp); struct device *tty_port_register_device_serdev(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *host, struct device *parent); struct device *tty_port_register_device_attr_serdev(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *host, struct device *parent, void *drvdata, const struct attribute_group **attr_grp); void tty_port_unregister_device(struct tty_port *port, struct tty_driver *driver, unsigned index); int tty_port_alloc_xmit_buf(struct tty_port *port); void tty_port_free_xmit_buf(struct tty_port *port); void tty_port_destroy(struct tty_port *port); void tty_port_put(struct tty_port *port); static inline struct tty_port *tty_port_get(struct tty_port *port) { if (port && kref_get_unless_zero(&port->kref)) return port; return NULL; } /* If the cts flow control is enabled, return true. */ static inline bool tty_port_cts_enabled(const struct tty_port *port) { return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); } static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) { assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); } static inline bool tty_port_active(const struct tty_port *port) { return test_bit(TTY_PORT_ACTIVE, &port->iflags); } static inline void tty_port_set_active(struct tty_port *port, bool val) { assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); } static inline bool tty_port_check_carrier(const struct tty_port *port) { return test_bit(TTY_PORT_CHECK_CD, &port->iflags); } static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) { assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); } static inline bool tty_port_suspended(const struct tty_port *port) { return test_bit(TTY_PORT_SUSPENDED, &port->iflags); } static inline void tty_port_set_suspended(struct tty_port *port, bool val) { assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); } static inline bool tty_port_initialized(const struct tty_port *port) { return test_bit(TTY_PORT_INITIALIZED, &port->iflags); } static inline void tty_port_set_initialized(struct tty_port *port, bool val) { assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); } static inline bool tty_port_kopened(const struct tty_port *port) { return test_bit(TTY_PORT_KOPENED, &port->iflags); } static inline void tty_port_set_kopened(struct tty_port *port, bool val) { assign_bit(TTY_PORT_KOPENED, &port->iflags, val); } struct tty_struct *tty_port_tty_get(struct tty_port *port); void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); bool tty_port_carrier_raised(struct tty_port *port); void tty_port_raise_dtr_rts(struct tty_port *port); void tty_port_lower_dtr_rts(struct tty_port *port); void tty_port_hangup(struct tty_port *port); void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); void tty_port_tty_wakeup(struct tty_port *port); int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, struct file *filp); int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct file *filp); void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); void tty_port_close(struct tty_port *port, struct tty_struct *tty, struct file *filp); int tty_port_install(struct tty_port *port, struct tty_driver *driver, struct tty_struct *tty); int tty_port_open(struct tty_port *port, struct tty_struct *tty, struct file *filp); static inline int tty_port_users(struct tty_port *port) { return port->count + port->blocked_open; } #endif
2 1 1 1 1 5 5 5 31 31 1 1 22 22 21 21 22 22 9 21 5 5 5 16 1 1 1 1 1 1 9 9 9 9 8 1 9 9 12 12 12 4 4 1 12 18 18 9 11 10 10 1 11 18 9 8 9 4 3 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 29 29 29 29 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 // SPDX-License-Identifier: GPL-2.0-only /* * Fence mechanism for dma-buf and to allow for asynchronous dma access * * Copyright (C) 2012 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> */ #include <linux/slab.h> #include <linux/export.h> #include <linux/atomic.h> #include <linux/dma-fence.h> #include <linux/sched/signal.h> #include <linux/seq_file.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); static DEFINE_SPINLOCK(dma_fence_stub_lock); static struct dma_fence dma_fence_stub; /* * fence context counter: each execution context should have its own * fence context, this allows checking if fences belong to the same * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); /** * DOC: DMA fences overview * * DMA fences, represented by &struct dma_fence, are the kernel internal * synchronization primitive for DMA operations like GPU rendering, video * encoding/decoding, or displaying buffers on a screen. * * A fence is initialized using dma_fence_init() and completed using * dma_fence_signal(). Fences are associated with a context, allocated through * dma_fence_context_alloc(), and all fences on the same context are * fully ordered. * * Since the purposes of fences is to facilitate cross-device and * cross-application synchronization, there's multiple ways to use one: * * - Individual fences can be exposed as a &sync_file, accessed as a file * descriptor from userspace, created by calling sync_file_create(). This is * called explicit fencing, since userspace passes around explicit * synchronization points. * * - Some subsystems also have their own explicit fencing primitives, like * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying * fence to be updated. * * - Then there's also implicit fencing, where the synchronization points are * implicitly passed around as part of shared &dma_buf instances. Such * implicit fences are stored in &struct dma_resv through the * &dma_buf.resv pointer. */ /** * DOC: fence cross-driver contract * * Since &dma_fence provide a cross driver contract, all drivers must follow the * same rules: * * * Fences must complete in a reasonable time. Fences which represent kernels * and shaders submitted by userspace, which could run forever, must be backed * up by timeout and gpu hang recovery code. Minimally that code must prevent * further command submission and force complete all in-flight fences, e.g. * when the driver or hardware do not support gpu reset, or if the gpu reset * failed for some reason. Ideally the driver supports gpu recovery which only * affects the offending userspace context, and no other userspace * submissions. * * * Drivers may have different ideas of what completion within a reasonable * time means. Some hang recovery code uses a fixed timeout, others a mix * between observing forward progress and increasingly strict timeouts. * Drivers should not try to second guess timeout handling of fences from * other drivers. * * * To ensure there's no deadlocks of dma_fence_wait() against other locks * drivers should annotate all code required to reach dma_fence_signal(), * which completes the fences, with dma_fence_begin_signalling() and * dma_fence_end_signalling(). * * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock(). * This means any code required for fence completion cannot acquire a * &dma_resv lock. Note that this also pulls in the entire established * locking hierarchy around dma_resv_lock() and dma_resv_unlock(). * * * Drivers are allowed to call dma_fence_wait() from their &shrinker * callbacks. This means any code required for fence completion cannot * allocate memory with GFP_KERNEL. * * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier * respectively &mmu_interval_notifier callbacks. This means any code required * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO. * Only GFP_ATOMIC is permissible, which might fail. * * Note that only GPU drivers have a reasonable excuse for both requiring * &mmu_interval_notifier and &shrinker callbacks at the same time as having to * track asynchronous compute work using &dma_fence. No driver outside of * drivers/gpu should ever call dma_fence_wait() in such contexts. */ static const char *dma_fence_stub_get_name(struct dma_fence *fence) { return "stub"; } static const struct dma_fence_ops dma_fence_stub_ops = { .get_driver_name = dma_fence_stub_get_name, .get_timeline_name = dma_fence_stub_get_name, }; /** * dma_fence_get_stub - return a signaled fence * * Return a stub fence which is already signaled. The fence's * timestamp corresponds to the first time after boot this * function is called. */ struct dma_fence *dma_fence_get_stub(void) { spin_lock(&dma_fence_stub_lock); if (!dma_fence_stub.ops) { dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &dma_fence_stub.flags); dma_fence_signal_locked(&dma_fence_stub); } spin_unlock(&dma_fence_stub_lock); return dma_fence_get(&dma_fence_stub); } EXPORT_SYMBOL(dma_fence_get_stub); /** * dma_fence_allocate_private_stub - return a private, signaled fence * @timestamp: timestamp when the fence was signaled * * Return a newly allocated and signaled stub fence. */ struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) { struct dma_fence *fence; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (fence == NULL) return NULL; dma_fence_init(fence, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); dma_fence_signal_timestamp(fence, timestamp); return fence; } EXPORT_SYMBOL(dma_fence_allocate_private_stub); /** * dma_fence_context_alloc - allocate an array of fence contexts * @num: amount of contexts to allocate * * This function will return the first index of the number of fence contexts * allocated. The fence context is used for setting &dma_fence.context to a * unique number by passing the context to dma_fence_init(). */ u64 dma_fence_context_alloc(unsigned num) { WARN_ON(!num); return atomic64_fetch_add(num, &dma_fence_context_counter); } EXPORT_SYMBOL(dma_fence_context_alloc); /** * DOC: fence signalling annotation * * Proving correctness of all the kernel code around &dma_fence through code * review and testing is tricky for a few reasons: * * * It is a cross-driver contract, and therefore all drivers must follow the * same rules for lock nesting order, calling contexts for various functions * and anything else significant for in-kernel interfaces. But it is also * impossible to test all drivers in a single machine, hence brute-force N vs. * N testing of all combinations is impossible. Even just limiting to the * possible combinations is infeasible. * * * There is an enormous amount of driver code involved. For render drivers * there's the tail of command submission, after fences are published, * scheduler code, interrupt and workers to process job completion, * and timeout, gpu reset and gpu hang recovery code. Plus for integration * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier, * and &shrinker. For modesetting drivers there's the commit tail functions * between when fences for an atomic modeset are published, and when the * corresponding vblank completes, including any interrupt processing and * related workers. Auditing all that code, across all drivers, is not * feasible. * * * Due to how many other subsystems are involved and the locking hierarchies * this pulls in there is extremely thin wiggle-room for driver-specific * differences. &dma_fence interacts with almost all of the core memory * handling through page fault handlers via &dma_resv, dma_resv_lock() and * dma_resv_unlock(). On the other side it also interacts through all * allocation sites through &mmu_notifier and &shrinker. * * Furthermore lockdep does not handle cross-release dependencies, which means * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught * at runtime with some quick testing. The simplest example is one thread * waiting on a &dma_fence while holding a lock:: * * lock(A); * dma_fence_wait(B); * unlock(A); * * while the other thread is stuck trying to acquire the same lock, which * prevents it from signalling the fence the previous thread is stuck waiting * on:: * * lock(A); * unlock(A); * dma_fence_signal(B); * * By manually annotating all code relevant to signalling a &dma_fence we can * teach lockdep about these dependencies, which also helps with the validation * headache since now lockdep can check all the rules for us:: * * cookie = dma_fence_begin_signalling(); * lock(A); * unlock(A); * dma_fence_signal(B); * dma_fence_end_signalling(cookie); * * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to * annotate critical sections the following rules need to be observed: * * * All code necessary to complete a &dma_fence must be annotated, from the * point where a fence is accessible to other threads, to the point where * dma_fence_signal() is called. Un-annotated code can contain deadlock issues, * and due to the very strict rules and many corner cases it is infeasible to * catch these just with review or normal stress testing. * * * &struct dma_resv deserves a special note, since the readers are only * protected by rcu. This means the signalling critical section starts as soon * as the new fences are installed, even before dma_resv_unlock() is called. * * * The only exception are fast paths and opportunistic signalling code, which * calls dma_fence_signal() purely as an optimization, but is not required to * guarantee completion of a &dma_fence. The usual example is a wait IOCTL * which calls dma_fence_signal(), while the mandatory completion path goes * through a hardware interrupt and possible job completion worker. * * * To aid composability of code, the annotations can be freely nested, as long * as the overall locking hierarchy is consistent. The annotations also work * both in interrupt and process context. Due to implementation details this * requires that callers pass an opaque cookie from * dma_fence_begin_signalling() to dma_fence_end_signalling(). * * * Validation against the cross driver contract is implemented by priming * lockdep with the relevant hierarchy at boot-up. This means even just * testing with a single device is enough to validate a driver, at least as * far as deadlocks with dma_fence_wait() against dma_fence_signal() are * concerned. */ #ifdef CONFIG_LOCKDEP static struct lockdep_map dma_fence_lockdep_map = { .name = "dma_fence_map" }; /** * dma_fence_begin_signalling - begin a critical DMA fence signalling section * * Drivers should use this to annotate the beginning of any code section * required to eventually complete &dma_fence by calling dma_fence_signal(). * * The end of these critical sections are annotated with * dma_fence_end_signalling(). * * Returns: * * Opaque cookie needed by the implementation, which needs to be passed to * dma_fence_end_signalling(). */ bool dma_fence_begin_signalling(void) { /* explicitly nesting ... */ if (lock_is_held_type(&dma_fence_lockdep_map, 1)) return true; /* rely on might_sleep check for soft/hardirq locks */ if (in_atomic()) return true; /* ... and non-recursive successful read_trylock */ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_); return false; } EXPORT_SYMBOL(dma_fence_begin_signalling); /** * dma_fence_end_signalling - end a critical DMA fence signalling section * @cookie: opaque cookie from dma_fence_begin_signalling() * * Closes a critical section annotation opened by dma_fence_begin_signalling(). */ void dma_fence_end_signalling(bool cookie) { if (cookie) return; lock_release(&dma_fence_lockdep_map, _RET_IP_); } EXPORT_SYMBOL(dma_fence_end_signalling); void __dma_fence_might_wait(void) { bool tmp; tmp = lock_is_held_type(&dma_fence_lockdep_map, 1); if (tmp) lock_release(&dma_fence_lockdep_map, _THIS_IP_); lock_map_acquire(&dma_fence_lockdep_map); lock_map_release(&dma_fence_lockdep_map); if (tmp) lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_); } #endif /** * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Unlike dma_fence_signal_timestamp(), this function must be called with * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; lockdep_assert_held(fence->lock); if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) return -EINVAL; /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); list_for_each_entry_safe(cur, tmp, &cb_list, node) { INIT_LIST_HEAD(&cur->node); cur->func(fence, cur); } return 0; } EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); /** * dma_fence_signal_timestamp - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) { unsigned long flags; int ret; if (WARN_ON(!fence)) return -EINVAL; spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, timestamp); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_signal_timestamp); /** * dma_fence_signal_locked - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock * held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_locked(struct dma_fence *fence) { return dma_fence_signal_timestamp_locked(fence, ktime_get()); } EXPORT_SYMBOL(dma_fence_signal_locked); /** * dma_fence_signal - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; int ret; bool tmp; if (WARN_ON(!fence)) return -EINVAL; tmp = dma_fence_begin_signalling(); spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); dma_fence_end_signalling(tmp); return ret; } EXPORT_SYMBOL(dma_fence_signal); /** * dma_fence_wait_timeout - sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. Other error values may be * returned on custom implementations. * * Performs a synchronous wait on this fence. It is assumed the caller * directly or indirectly (buf-mgr between reservation and committing) * holds a reference to the fence, otherwise the fence might be * freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_any_timeout(). */ signed long dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) { signed long ret; if (WARN_ON(timeout < 0)) return -EINVAL; might_sleep(); __dma_fence_might_wait(); dma_fence_enable_sw_signaling(fence); trace_dma_fence_wait_start(fence); if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); else ret = dma_fence_default_wait(fence, intr, timeout); trace_dma_fence_wait_end(fence); return ret; } EXPORT_SYMBOL(dma_fence_wait_timeout); /** * dma_fence_release - default release function for fences * @kref: &dma_fence.recfount * * This is the default release functions for &dma_fence. Drivers shouldn't call * this directly, but instead call dma_fence_put(). */ void dma_fence_release(struct kref *kref) { struct dma_fence *fence = container_of(kref, struct dma_fence, refcount); trace_dma_fence_destroy(fence); if (WARN(!list_empty(&fence->cb_list) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), "Fence %s:%s:%llx:%llx released with pending signals!\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno)) { unsigned long flags; /* * Failed to signal before release, likely a refcounting issue. * * This should never happen, but if it does make sure that we * don't leave chains dangling. We set the error flag first * so that the callbacks know this signal is due to an error. */ spin_lock_irqsave(fence->lock, flags); fence->error = -EDEADLK; dma_fence_signal_locked(fence); spin_unlock_irqrestore(fence->lock, flags); } if (fence->ops->release) fence->ops->release(fence); else dma_fence_free(fence); } EXPORT_SYMBOL(dma_fence_release); /** * dma_fence_free - default release function for &dma_fence. * @fence: fence to release * * This is the default implementation for &dma_fence_ops.release. It calls * kfree_rcu() on @fence. */ void dma_fence_free(struct dma_fence *fence) { kfree_rcu(fence, rcu); } EXPORT_SYMBOL(dma_fence_free); static bool __dma_fence_enable_signaling(struct dma_fence *fence) { bool was_set; lockdep_assert_held(fence->lock); was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return false; if (!was_set && fence->ops->enable_signaling) { trace_dma_fence_enable_signal(fence); if (!fence->ops->enable_signaling(fence)) { dma_fence_signal_locked(fence); return false; } } return true; } /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable * * This will request for sw signaling to be enabled, to make the fence * complete as soon as possible. This calls &dma_fence_ops.enable_signaling * internally. */ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; spin_lock_irqsave(fence->lock, flags); __dma_fence_enable_signaling(fence); spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling); /** * dma_fence_add_callback - add a callback to be called when the fence * is signaled * @fence: the fence to wait on * @cb: the callback to register * @func: the function to call * * Add a software callback to the fence. The caller should keep a reference to * the fence. * * @cb will be initialized by dma_fence_add_callback(), no initialization * by the caller is required. Any number of callbacks can be registered * to a fence, but a callback can only be registered to one fence at a time. * * If fence is already signaled, this function will return -ENOENT (and * *not* call the callback). * * Note that the callback can be called from an atomic context or irq context. * * Returns 0 in case of success, -ENOENT if the fence is already signaled * and -EINVAL in case of error. */ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func) { unsigned long flags; int ret = 0; if (WARN_ON(!fence || !func)) return -EINVAL; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { INIT_LIST_HEAD(&cb->node); return -ENOENT; } spin_lock_irqsave(fence->lock, flags); if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); } else { INIT_LIST_HEAD(&cb->node); ret = -ENOENT; } spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_add_callback); /** * dma_fence_get_status - returns the status upon completion * @fence: the dma_fence to query * * This wraps dma_fence_get_status_locked() to return the error status * condition on a signaled fence. See dma_fence_get_status_locked() for more * details. * * Returns 0 if the fence has not yet been signaled, 1 if the fence has * been signaled without an error condition, or a negative error code * if the fence has been completed in err. */ int dma_fence_get_status(struct dma_fence *fence) { unsigned long flags; int status; spin_lock_irqsave(fence->lock, flags); status = dma_fence_get_status_locked(fence); spin_unlock_irqrestore(fence->lock, flags); return status; } EXPORT_SYMBOL(dma_fence_get_status); /** * dma_fence_remove_callback - remove a callback from the signaling list * @fence: the fence to wait on * @cb: the callback to remove * * Remove a previously queued callback from the fence. This function returns * true if the callback is successfully removed, or false if the fence has * already been signaled. * * *WARNING*: * Cancelling a callback should only be done if you really know what you're * doing, since deadlocks and race conditions could occur all too easily. For * this reason, it should only ever be done on hardware lockup recovery, * with a reference held to the fence. * * Behaviour is undefined if @cb has not been added to @fence using * dma_fence_add_callback() beforehand. */ bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { unsigned long flags; bool ret; spin_lock_irqsave(fence->lock, flags); ret = !list_empty(&cb->node); if (ret) list_del_init(&cb->node); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_remove_callback); struct default_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; static void dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct default_wait_cb *wait = container_of(cb, struct default_wait_cb, base); wake_up_state(wait->task, TASK_NORMAL); } /** * dma_fence_default_wait - default sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. If timeout is zero the value one is * returned if the fence is already signaled for consistency with other * functions taking a jiffies timeout. */ signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; spin_lock_irqsave(fence->lock, flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) goto out; if (intr && signal_pending(current)) { ret = -ERESTARTSYS; goto out; } if (!timeout) { ret = 0; goto out; } cb.base.func = dma_fence_default_wait_cb; cb.task = current; list_add(&cb.base.node, &fence->cb_list); while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { if (intr) __set_current_state(TASK_INTERRUPTIBLE); else __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(fence->lock, flags); ret = schedule_timeout(ret); spin_lock_irqsave(fence->lock, flags); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } if (!list_empty(&cb.base.node)) list_del(&cb.base.node); __set_current_state(TASK_RUNNING); out: spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_default_wait); static bool dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, uint32_t *idx) { int i; for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (idx) *idx = i; return true; } } return false; } /** * dma_fence_wait_any_timeout - sleep until any fence gets signaled * or until timeout elapses * @fences: array of fences to wait on * @count: number of fences to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * @idx: used to store the first signaled fence index, meaningful only on * positive return * * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies * on success. * * Synchronous waits for the first fence in the array to be signaled. The * caller needs to hold a reference to all fences in the array, otherwise a * fence might be freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_timeout(). */ signed long dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout, uint32_t *idx) { struct default_wait_cb *cb; signed long ret = timeout; unsigned i; if (WARN_ON(!fences || !count || timeout < 0)) return -EINVAL; if (timeout == 0) { for (i = 0; i < count; ++i) if (dma_fence_is_signaled(fences[i])) { if (idx) *idx = i; return 1; } return 0; } cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); if (cb == NULL) { ret = -ENOMEM; goto err_free_cb; } for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; cb[i].task = current; if (dma_fence_add_callback(fence, &cb[i].base, dma_fence_default_wait_cb)) { /* This fence is already signaled */ if (idx) *idx = i; goto fence_rm_cb; } } while (ret > 0) { if (intr) set_current_state(TASK_INTERRUPTIBLE); else set_current_state(TASK_UNINTERRUPTIBLE); if (dma_fence_test_signaled_any(fences, count, idx)) break; ret = schedule_timeout(ret); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } __set_current_state(TASK_RUNNING); fence_rm_cb: while (i-- > 0) dma_fence_remove_callback(fences[i], &cb[i].base); err_free_cb: kfree(cb); return ret; } EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** * DOC: deadline hints * * In an ideal world, it would be possible to pipeline a workload sufficiently * that a utilization based device frequency governor could arrive at a minimum * frequency that meets the requirements of the use-case, in order to minimize * power consumption. But in the real world there are many workloads which * defy this ideal. For example, but not limited to: * * * Workloads that ping-pong between device and CPU, with alternating periods * of CPU waiting for device, and device waiting on CPU. This can result in * devfreq and cpufreq seeing idle time in their respective domains and in * result reduce frequency. * * * Workloads that interact with a periodic time based deadline, such as double * buffered GPU rendering vs vblank sync'd page flipping. In this scenario, * missing a vblank deadline results in an *increase* in idle time on the GPU * (since it has to wait an additional vblank period), sending a signal to * the GPU's devfreq to reduce frequency, when in fact the opposite is what is * needed. * * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline * (or indirectly via userspace facing ioctls like &sync_set_deadline). * The deadline hint provides a way for the waiting driver, or userspace, to * convey an appropriate sense of urgency to the signaling driver. * * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace * facing APIs). The time could either be some point in the future (such as * the vblank based deadline for page-flipping, or the start of a compositor's * composition cycle), or the current time to indicate an immediate deadline * hint (Ie. forward progress cannot be made until this fence is signaled). * * Multiple deadlines may be set on a given fence, even in parallel. See the * documentation for &dma_fence_ops.set_deadline. * * The deadline hint is just that, a hint. The driver that created the fence * may react by increasing frequency, making different scheduling choices, etc. * Or doing nothing at all. */ /** * dma_fence_set_deadline - set desired fence-wait deadline hint * @fence: the fence that is to be waited on * @deadline: the time by which the waiter hopes for the fence to be * signaled * * Give the fence signaler a hint about an upcoming deadline, such as * vblank, by which point the waiter would prefer the fence to be * signaled by. This is intended to give feedback to the fence signaler * to aid in power management decisions, such as boosting GPU frequency * if a periodic vblank deadline is approaching but the fence is not * yet signaled.. */ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) { if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) fence->ops->set_deadline(fence, deadline); } EXPORT_SYMBOL(dma_fence_set_deadline); /** * dma_fence_describe - Dump fence description into seq_file * @fence: the fence to describe * @seq: the seq_file to put the textual description into * * Dump a textual description of the fence and it's state into the seq_file. */ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { seq_printf(seq, "%s %s seq %llu %ssignalled\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->seqno, dma_fence_is_signaled(fence) ? "" : "un"); } EXPORT_SYMBOL(dma_fence_describe); /** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize * @ops: the dma_fence_ops for operations on this fence * @lock: the irqsafe spinlock to use for locking this fence * @context: the execution context this fence is run on * @seqno: a linear increasing sequence number for this context * * Initializes an allocated fence, the caller doesn't have to keep its * refcount after committing with this fence, but it will need to hold a * refcount again if &dma_fence_ops.enable_signaling gets called. * * context and seqno are used for easy comparison between fences, allowing * to check which fence is later by simply using dma_fence_later(). */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); kref_init(&fence->refcount); fence->ops = ops; INIT_LIST_HEAD(&fence->cb_list); fence->lock = lock; fence->context = context; fence->seqno = seqno; fence->flags = 0UL; fence->error = 0; trace_dma_fence_init(fence); } EXPORT_SYMBOL(dma_fence_init);
236 235 123 79 13 211 164 164 22 164 164 164 164 162 150 150 34 123 123 11 1 1 1 1 79 79 78 47 47 47 35 47 47 47 47 47 47 25 47 47 34 25 47 47 47 47 47 47 47 47 47 47 47 47 2 2 5 5 5 5 5 5 5 5 2 2 5 5 45 45 45 45 4 45 45 45 44 12 153 154 154 80 80 80 151 150 151 149 4 150 150 150 40 41 41 41 41 22 41 41 41 41 41 41 34 41 32 17 41 43 43 43 41 32 17 78 77 78 58 78 17 46 37 46 37 46 46 45 37 21 22 21 21 21 21 21 21 45 46 76 25 14 14 25 76 76 76 76 76 76 75 76 76 76 13 75 76 76 76 55 76 76 75 75 76 19 6 6 5 19 18 13 18 15 18 13 17 10 19 10 9 10 5 3 3 4 2 2 2 2 1 1 1 1 5 12 5 5 5 7 7 7 7 7 7 7 7 14 8 14 11 14 12 3 10 7 14 13 13 8 8 8 9 5 9 12 7 11 11 11 11 11 11 11 11 11 11 11 11 10 10 7 7 7 7 11 4 3 1 3 4 126 126 126 79 79 79 78 35 35 35 25 23 35 11 11 11 11 11 11 11 11 11 11 11 4 4 4 4 4 4 4 4 4 4 4 11 4 7 32 32 32 32 32 32 32 32 32 31 4 4 4 4 4 4 2 2 2 2 2 2 1 1 2 5 4 3 2 1 2 1 5 3 2 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 /* * linux/drivers/video/fbcon.c -- Low level frame buffer based console driver * * Copyright (C) 1995 Geert Uytterhoeven * * * This file is based on the original Amiga console driver (amicon.c): * * Copyright (C) 1993 Hamish Macdonald * Greg Harp * Copyright (C) 1994 David Carter [carter@compsci.bristol.ac.uk] * * with work by William Rucklidge (wjr@cs.cornell.edu) * Geert Uytterhoeven * Jes Sorensen (jds@kom.auc.dk) * Martin Apel * * and on the original Atari console driver (atacon.c): * * Copyright (C) 1993 Bjoern Brauel * Roman Hodek * * with work by Guenther Kelleter * Martin Schaller * Andreas Schwab * * Hardware cursor support added by Emmanuel Marty (core@ggi-project.org) * Smart redraw scrolling, arbitrary font width support, 512char font support * and software scrollback added by * Jakub Jelinek (jj@ultra.linux.cz) * * Random hacking by Martin Mares <mj@ucw.cz> * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * The low level operations for the various display memory organizations are * now in separate source files. * * Currently the following organizations are supported: * * o afb Amiga bitplanes * o cfb{2,4,8,16,24,32} Packed pixels * o ilbm Amiga interleaved bitplanes * o iplan2p[248] Atari interleaved bitplanes * o mfb Monochrome * o vga VGA characters/attributes * * To do: * * - Implement 16 plane mode (iplan2p16) * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/delay.h> /* MSch: for IRQ probe */ #include <linux/console.h> #include <linux/string.h> #include <linux/kd.h> #include <linux/panic.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/fbcon.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/font.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/crc32.h> /* For counting font checksums */ #include <linux/uaccess.h> #include <asm/irq.h> #include "fbcon.h" #include "fb_internal.h" /* * FIXME: Locking * * - fbcon state itself is protected by the console_lock, and the code does a * pretty good job at making sure that lock is held everywhere it's needed. * * - fbcon doesn't bother with fb_lock/unlock at all. This is buggy, since it * means concurrent access to the same fbdev from both fbcon and userspace * will blow up. To fix this all fbcon calls from fbmem.c need to be moved out * of fb_lock/unlock protected sections, since otherwise we'll recurse and * deadlock eventually. Aside: Due to these deadlock issues the fbdev code in * fbmem.c cannot use locking asserts, and there's lots of callers which get * the rules wrong, e.g. fbsysfs.c entirely missed fb_lock/unlock calls too. */ enum { FBCON_LOGO_CANSHOW = -1, /* the logo can be shown */ FBCON_LOGO_DRAW = -2, /* draw the logo to a console */ FBCON_LOGO_DONTSHOW = -3 /* do not show the logo */ }; static struct fbcon_display fb_display[MAX_NR_CONSOLES]; static struct fb_info *fbcon_registered_fb[FB_MAX]; static int fbcon_num_registered_fb; #define fbcon_for_each_registered_fb(i) \ for (i = 0; WARN_CONSOLE_UNLOCKED(), i < FB_MAX; i++) \ if (!fbcon_registered_fb[i]) {} else static signed char con2fb_map[MAX_NR_CONSOLES]; static signed char con2fb_map_boot[MAX_NR_CONSOLES]; static struct fb_info *fbcon_info_from_console(int console) { WARN_CONSOLE_UNLOCKED(); return fbcon_registered_fb[con2fb_map[console]]; } static int logo_lines; /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO enums. */ static int logo_shown = FBCON_LOGO_CANSHOW; /* console mappings */ static unsigned int first_fb_vc; static unsigned int last_fb_vc = MAX_NR_CONSOLES - 1; static int fbcon_is_default = 1; static int primary_device = -1; static int fbcon_has_console_bind; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY static int map_override; static inline void fbcon_map_override(void) { map_override = 1; } #else static inline void fbcon_map_override(void) { } #endif /* CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY */ #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER static bool deferred_takeover = true; #else #define deferred_takeover false #endif /* font data */ static char fontname[40]; /* current fb_info */ static int info_idx = -1; /* console rotation */ static int initial_rotation = -1; static int fbcon_has_sysfs; static int margin_color; static const struct consw fb_con; #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) static int fbcon_cursor_noblink; #define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1) /* * Interface used by the world */ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); /* * Internal routines */ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit); static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); static struct device *fbcon_device; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION static inline void fbcon_set_rotation(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; if (!(info->flags & FBINFO_MISC_TILEBLITTING) && ops->p->con_rotate < 4) ops->rotate = ops->p->con_rotate; else ops->rotate = 0; } static void fbcon_rotate(struct fb_info *info, u32 rotate) { struct fbcon_ops *ops= info->fbcon_par; struct fb_info *fb_info; if (!ops || ops->currcon == -1) return; fb_info = fbcon_info_from_console(ops->currcon); if (info == fb_info) { struct fbcon_display *p = &fb_display[ops->currcon]; if (rotate < 4) p->con_rotate = rotate; else p->con_rotate = 0; fbcon_modechanged(info); } } static void fbcon_rotate_all(struct fb_info *info, u32 rotate) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; struct fbcon_display *p; int i; if (!ops || ops->currcon < 0 || rotate > 3) return; for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (!vc || vc->vc_mode != KD_TEXT || fbcon_info_from_console(i) != info) continue; p = &fb_display[vc->vc_num]; p->con_rotate = rotate; } fbcon_set_all_vcs(info); } #else static inline void fbcon_set_rotation(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; ops->rotate = FB_ROTATE_UR; } static void fbcon_rotate(struct fb_info *info, u32 rotate) { return; } static void fbcon_rotate_all(struct fb_info *info, u32 rotate) { return; } #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ static int fbcon_get_rotate(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; return (ops) ? ops->rotate : 0; } static bool fbcon_skip_panic(struct fb_info *info) { /* panic_cpu is not exported, and can't be used if built as module. Use * oops_in_progress instead, but non-fatal oops won't be printed. */ #if defined(MODULE) return (info->skip_panic && unlikely(oops_in_progress)); #else return (info->skip_panic && unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID)); #endif } static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; return (info->state != FBINFO_STATE_RUNNING || vc->vc_mode != KD_TEXT || ops->graphics || fbcon_skip_panic(info)); } static int get_color(struct vc_data *vc, struct fb_info *info, u16 c, int is_fg) { int depth = fb_get_color_depth(&info->var, &info->fix); int color = 0; if (console_blanked) { unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; c = vc->vc_video_erase_char & charmask; } if (depth != 1) color = (is_fg) ? attr_fgcol((vc->vc_hi_font_mask) ? 9 : 8, c) : attr_bgcol((vc->vc_hi_font_mask) ? 13 : 12, c); switch (depth) { case 1: { int col = mono_col(info); /* 0 or 1 */ int fg = (info->fix.visual != FB_VISUAL_MONO01) ? col : 0; int bg = (info->fix.visual != FB_VISUAL_MONO01) ? 0 : col; if (console_blanked) fg = bg; color = (is_fg) ? fg : bg; break; } case 2: /* * Scale down 16-colors to 4 colors. Default 4-color palette * is grayscale. However, simply dividing the values by 4 * will not work, as colors 1, 2 and 3 will be scaled-down * to zero rendering them invisible. So empirically convert * colors to a sane 4-level grayscale. */ switch (color) { case 0: color = 0; /* black */ break; case 1 ... 6: color = 2; /* white */ break; case 7 ... 8: color = 1; /* gray */ break; default: color = 3; /* intense white */ break; } break; case 3: /* * Last 8 entries of default 16-color palette is a more intense * version of the first 8 (i.e., same chrominance, different * luminance). */ color &= 7; break; } return color; } static void fb_flashcursor(struct work_struct *work) { struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work); struct fb_info *info; struct vc_data *vc = NULL; int c; bool enable; int ret; /* FIXME: we should sort out the unbind locking instead */ /* instead we just fail to flash the cursor if we can't get * the lock instead of blocking fbcon deinit */ ret = console_trylock(); if (ret == 0) return; /* protected by console_lock */ info = ops->info; if (ops->currcon != -1) vc = vc_cons[ops->currcon].d; if (!vc || !con_is_visible(vc) || fbcon_info_from_console(vc->vc_num) != info || vc->vc_deccm != 1) { console_unlock(); return; } c = scr_readw((u16 *) vc->vc_pos); enable = ops->cursor_flash && !ops->cursor_state.enable; ops->cursor(vc, info, enable, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, ops->cur_blink_jiffies); } static void fbcon_add_cursor_work(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_cursor_noblink) queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, ops->cur_blink_jiffies); } static void fbcon_del_cursor_work(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; cancel_delayed_work_sync(&ops->cursor_work); } #ifndef MODULE static int __init fb_console_setup(char *this_opt) { char *options; int i, j; if (!this_opt || !*this_opt) return 1; while ((options = strsep(&this_opt, ",")) != NULL) { if (!strncmp(options, "font:", 5)) { strscpy(fontname, options + 5, sizeof(fontname)); continue; } if (!strncmp(options, "scrollback:", 11)) { pr_warn("Ignoring scrollback size option\n"); continue; } if (!strncmp(options, "map:", 4)) { options += 4; if (*options) { for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) { if (!options[j]) j = 0; con2fb_map_boot[i] = (options[j++]-'0') % FB_MAX; } fbcon_map_override(); } continue; } if (!strncmp(options, "vc:", 3)) { options += 3; if (*options) first_fb_vc = simple_strtoul(options, &options, 10) - 1; if (first_fb_vc >= MAX_NR_CONSOLES) first_fb_vc = 0; if (*options++ == '-') last_fb_vc = simple_strtoul(options, &options, 10) - 1; if (last_fb_vc < first_fb_vc || last_fb_vc >= MAX_NR_CONSOLES) last_fb_vc = MAX_NR_CONSOLES - 1; fbcon_is_default = 0; continue; } if (!strncmp(options, "rotate:", 7)) { options += 7; if (*options) initial_rotation = simple_strtoul(options, &options, 0); if (initial_rotation > 3) initial_rotation = 0; continue; } if (!strncmp(options, "margin:", 7)) { options += 7; if (*options) margin_color = simple_strtoul(options, &options, 0); continue; } #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER if (!strcmp(options, "nodefer")) { deferred_takeover = false; continue; } #endif #ifdef CONFIG_LOGO if (!strncmp(options, "logo-pos:", 9)) { options += 9; if (!strcmp(options, "center")) fb_center_logo = true; continue; } if (!strncmp(options, "logo-count:", 11)) { options += 11; if (*options) fb_logo_count = simple_strtol(options, &options, 0); continue; } #endif } return 1; } __setup("fbcon=", fb_console_setup); #endif static int search_fb_in_map(int idx) { int i, retval = 0; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) { retval = 1; break; } } return retval; } static int search_for_mapped_con(void) { int i, retval = 0; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] != -1) { retval = 1; break; } } return retval; } static int do_fbcon_takeover(int show_logo) { int err, i; if (!fbcon_num_registered_fb) return -ENODEV; if (!show_logo) logo_shown = FBCON_LOGO_DONTSHOW; for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map[i] = info_idx; err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (err) { for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map[i] = -1; info_idx = -1; } else { fbcon_has_console_bind = 1; } return err; } #ifdef MODULE static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, int cols, int rows, int new_cols, int new_rows) { logo_shown = FBCON_LOGO_DONTSHOW; } #else static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, int cols, int rows, int new_cols, int new_rows) { /* Need to make room for the logo */ struct fbcon_ops *ops = info->fbcon_par; int cnt, erase = vc->vc_video_erase_char, step; unsigned short *save = NULL, *r, *q; int logo_height; if (info->fbops->owner) { logo_shown = FBCON_LOGO_DONTSHOW; return; } /* * remove underline attribute from erase character * if black and white framebuffer. */ if (fb_get_color_depth(&info->var, &info->fix) == 1) erase &= ~0x400; logo_height = fb_prepare_logo(info, ops->rotate); logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height); q = (unsigned short *) (vc->vc_origin + vc->vc_size_row * rows); step = logo_lines * cols; for (r = q - logo_lines * cols; r < q; r++) if (scr_readw(r) != vc->vc_video_erase_char) break; if (r != q && new_rows >= rows + logo_lines) { save = kmalloc(array3_size(logo_lines, new_cols, 2), GFP_KERNEL); if (save) { int i = min(cols, new_cols); scr_memsetw(save, erase, array3_size(logo_lines, new_cols, 2)); r = q - step; for (cnt = 0; cnt < logo_lines; cnt++, r += i) scr_memcpyw(save + cnt * new_cols, r, 2 * i); r = q; } } if (r == q) { /* We can scroll screen down */ r = q - step - cols; for (cnt = rows - logo_lines; cnt > 0; cnt--) { scr_memcpyw(r + step, r, vc->vc_size_row); r -= cols; } if (!save) { int lines; if (vc->state.y + logo_lines >= rows) lines = rows - vc->state.y - 1; else lines = logo_lines; vc->state.y += lines; vc->vc_pos += lines * vc->vc_size_row; } } scr_memsetw((unsigned short *) vc->vc_origin, erase, vc->vc_size_row * logo_lines); if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); update_screen(vc); } if (save) { q = (unsigned short *) (vc->vc_origin + vc->vc_size_row * rows); scr_memcpyw(q, save, array3_size(logo_lines, new_cols, 2)); vc->state.y += logo_lines; vc->vc_pos += logo_lines * vc->vc_size_row; kfree(save); } if (logo_shown == FBCON_LOGO_DONTSHOW) return; if (logo_lines > vc->vc_bottom) { logo_shown = FBCON_LOGO_CANSHOW; pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n"); } else { logo_shown = FBCON_LOGO_DRAW; vc->vc_top = logo_lines; } } #endif /* MODULE */ #ifdef CONFIG_FB_TILEBLITTING static void set_blitting_type(struct vc_data *vc, struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; ops->p = &fb_display[vc->vc_num]; if ((info->flags & FBINFO_MISC_TILEBLITTING)) fbcon_set_tileops(vc, info); else { fbcon_set_rotation(info); fbcon_set_bitops(ops); } } static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) { int err = 0; if (info->flags & FBINFO_MISC_TILEBLITTING && info->tileops->fb_get_tilemax(info) < charcount) err = 1; return err; } #else static void set_blitting_type(struct vc_data *vc, struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; info->flags &= ~FBINFO_MISC_TILEBLITTING; ops->p = &fb_display[vc->vc_num]; fbcon_set_rotation(info); fbcon_set_bitops(ops); } static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) { return 0; } #endif /* CONFIG_MISC_TILEBLITTING */ static void fbcon_release(struct fb_info *info) { lock_fb_info(info); if (info->fbops->fb_release) info->fbops->fb_release(info, 0); unlock_fb_info(info); module_put(info->fbops->owner); if (info->fbcon_par) { struct fbcon_ops *ops = info->fbcon_par; fbcon_del_cursor_work(info); kfree(ops->cursor_state.mask); kfree(ops->cursor_data); kfree(ops->cursor_src); kfree(ops->fontbuffer); kfree(info->fbcon_par); info->fbcon_par = NULL; } } static int fbcon_open(struct fb_info *info) { struct fbcon_ops *ops; if (!try_module_get(info->fbops->owner)) return -ENODEV; lock_fb_info(info); if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) { unlock_fb_info(info); module_put(info->fbops->owner); return -ENODEV; } unlock_fb_info(info); ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); if (!ops) { fbcon_release(info); return -ENOMEM; } INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); ops->info = info; info->fbcon_par = ops; ops->cur_blink_jiffies = HZ / 5; return 0; } static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, int unit) { int err; err = fbcon_open(info); if (err) return err; if (vc) set_blitting_type(vc, info); return err; } static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, struct fb_info *newinfo) { int ret; fbcon_release(oldinfo); /* If oldinfo and newinfo are driving the same hardware, the fb_release() method of oldinfo may attempt to restore the hardware state. This will leave the newinfo in an undefined state. Thus, a call to fb_set_par() may be needed for the newinfo. */ if (newinfo && newinfo->fbops->fb_set_par) { ret = newinfo->fbops->fb_set_par(newinfo); if (ret) printk(KERN_ERR "con2fb_release_oldinfo: " "detected unhandled fb_set_par error, " "error code %d\n", ret); } } static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, int unit, int show_logo) { struct fbcon_ops *ops = info->fbcon_par; int ret; ops->currcon = fg_console; if (info->fbops->fb_set_par && !ops->initialized) { ret = info->fbops->fb_set_par(info); if (ret) printk(KERN_ERR "con2fb_init_display: detected " "unhandled fb_set_par error, " "error code %d\n", ret); } ops->initialized = true; ops->graphics = 0; fbcon_set_disp(info, &info->var, unit); if (show_logo) { struct vc_data *fg_vc = vc_cons[fg_console].d; struct fb_info *fg_info = fbcon_info_from_console(fg_console); fbcon_prepare_logo(fg_vc, fg_info, fg_vc->vc_cols, fg_vc->vc_rows, fg_vc->vc_cols, fg_vc->vc_rows); } update_screen(vc_cons[fg_console].d); } /** * set_con2fb_map - map console to frame buffer device * @unit: virtual console number to map * @newidx: frame buffer index to map virtual console to * @user: user request * * Maps a virtual console @unit to a frame buffer device * @newidx. * * This should be called with the console lock held. */ static int set_con2fb_map(int unit, int newidx, int user) { struct vc_data *vc = vc_cons[unit].d; int oldidx = con2fb_map[unit]; struct fb_info *info = fbcon_registered_fb[newidx]; struct fb_info *oldinfo = NULL; int err = 0, show_logo; WARN_CONSOLE_UNLOCKED(); if (oldidx == newidx) return 0; if (!info) return -EINVAL; if (!search_for_mapped_con() || !con_is_bound(&fb_con)) { info_idx = newidx; return do_fbcon_takeover(0); } if (oldidx != -1) oldinfo = fbcon_registered_fb[oldidx]; if (!search_fb_in_map(newidx)) { err = con2fb_acquire_newinfo(vc, info, unit); if (err) return err; fbcon_add_cursor_work(info); } else if (vc) { set_blitting_type(vc, info); } con2fb_map[unit] = newidx; /* * If old fb is not mapped to any of the consoles, * fbcon should release it. */ if (oldinfo && !search_fb_in_map(oldidx)) con2fb_release_oldinfo(vc, oldinfo, info); show_logo = (fg_console == 0 && !user && logo_shown != FBCON_LOGO_DONTSHOW); con2fb_map_boot[unit] = newidx; con2fb_init_display(vc, info, unit, show_logo); if (!search_fb_in_map(info_idx)) info_idx = newidx; return err; } /* * Low Level Operations */ /* NOTE: fbcon cannot be __init: it may be called from do_take_over_console later */ static int var_to_display(struct fbcon_display *disp, struct fb_var_screeninfo *var, struct fb_info *info) { disp->xres_virtual = var->xres_virtual; disp->yres_virtual = var->yres_virtual; disp->bits_per_pixel = var->bits_per_pixel; disp->grayscale = var->grayscale; disp->nonstd = var->nonstd; disp->accel_flags = var->accel_flags; disp->height = var->height; disp->width = var->width; disp->red = var->red; disp->green = var->green; disp->blue = var->blue; disp->transp = var->transp; disp->rotate = var->rotate; disp->mode = fb_match_mode(var, &info->modelist); if (disp->mode == NULL) /* This should not happen */ return -EINVAL; return 0; } static void display_to_var(struct fb_var_screeninfo *var, struct fbcon_display *disp) { fb_videomode_to_var(var, disp->mode); var->xres_virtual = disp->xres_virtual; var->yres_virtual = disp->yres_virtual; var->bits_per_pixel = disp->bits_per_pixel; var->grayscale = disp->grayscale; var->nonstd = disp->nonstd; var->accel_flags = disp->accel_flags; var->height = disp->height; var->width = disp->width; var->red = disp->red; var->green = disp->green; var->blue = disp->blue; var->transp = disp->transp; var->rotate = disp->rotate; } static const char *fbcon_startup(void) { static const char display_desc[] = "frame buffer device"; struct fbcon_display *p = &fb_display[fg_console]; struct vc_data *vc = vc_cons[fg_console].d; const struct font_desc *font = NULL; struct fb_info *info = NULL; struct fbcon_ops *ops; int rows, cols; /* * If num_registered_fb is zero, this is a call for the dummy part. * The frame buffer devices weren't initialized yet. */ if (!fbcon_num_registered_fb || info_idx == -1) return display_desc; /* * Instead of blindly using registered_fb[0], we use info_idx, set by * fbcon_fb_registered(); */ info = fbcon_registered_fb[info_idx]; if (!info) return NULL; if (fbcon_open(info)) return NULL; ops = info->fbcon_par; ops->currcon = -1; ops->graphics = 1; ops->cur_rotate = -1; p->con_rotate = initial_rotation; if (p->con_rotate == -1) p->con_rotate = info->fbcon_rotate_hint; if (p->con_rotate == -1) p->con_rotate = FB_ROTATE_UR; set_blitting_type(vc, info); /* Setup default font */ if (!p->fontdata) { if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, info->var.yres, info->pixmap.blit_x, info->pixmap.blit_y); vc->vc_font.width = font->width; vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = font->charcount; } cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); pr_debug("mode: %s\n", info->fix.id); pr_debug("visual: %d\n", info->fix.visual); pr_debug("res: %dx%d-%d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); fbcon_add_cursor_work(info); return display_desc; } static void fbcon_init(struct vc_data *vc, bool init) { struct fb_info *info; struct fbcon_ops *ops; struct vc_data **default_mode = vc->vc_display_fg; struct vc_data *svc = *default_mode; struct fbcon_display *t, *p = &fb_display[vc->vc_num]; int logo = 1, new_rows, new_cols, rows, cols; int ret; if (WARN_ON(info_idx == -1)) return; if (con2fb_map[vc->vc_num] == -1) con2fb_map[vc->vc_num] = info_idx; info = fbcon_info_from_console(vc->vc_num); if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) logo_shown = FBCON_LOGO_DONTSHOW; if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW || (info->fix.type == FB_TYPE_TEXT)) logo = 0; if (var_to_display(p, &info->var, info)) return; if (!info->fbcon_par) con2fb_acquire_newinfo(vc, info, vc->vc_num); /* If we are not the first console on this fb, copy the font from that console */ t = &fb_display[fg_console]; if (!p->fontdata) { if (t->fontdata) { struct vc_data *fvc = vc_cons[fg_console].d; vc->vc_font.data = (void *)(p->fontdata = fvc->vc_font.data); vc->vc_font.width = fvc->vc_font.width; vc->vc_font.height = fvc->vc_font.height; vc->vc_font.charcount = fvc->vc_font.charcount; p->userfont = t->userfont; if (p->userfont) REFCOUNT(p->fontdata)++; } else { const struct font_desc *font = NULL; if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, info->var.yres, info->pixmap.blit_x, info->pixmap.blit_y); vc->vc_font.width = font->width; vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = font->charcount; } } vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; if (vc->vc_font.charcount == 256) { vc->vc_hi_font_mask = 0; } else { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) vc->vc_complement_mask <<= 1; } if (!*svc->uni_pagedict_loc) con_set_default_unimap(svc); if (!*vc->uni_pagedict_loc) con_copy_unimap(vc, svc); ops = info->fbcon_par; ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); p->con_rotate = initial_rotation; if (p->con_rotate == -1) p->con_rotate = info->fbcon_rotate_hint; if (p->con_rotate == -1) p->con_rotate = FB_ROTATE_UR; set_blitting_type(vc, info); cols = vc->vc_cols; rows = vc->vc_rows; new_cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); new_cols /= vc->vc_font.width; new_rows /= vc->vc_font.height; /* * We must always set the mode. The mode of the previous console * driver could be in the same resolution but we are using different * hardware so we have to initialize the hardware. * * We need to do it in fbcon_init() to prevent screen corruption. */ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { if (info->fbops->fb_set_par && !ops->initialized) { ret = info->fbops->fb_set_par(info); if (ret) printk(KERN_ERR "fbcon_init: detected " "unhandled fb_set_par error, " "error code %d\n", ret); } ops->initialized = true; } ops->graphics = 0; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION if ((info->flags & FBINFO_HWACCEL_COPYAREA) && !(info->flags & FBINFO_HWACCEL_DISABLED)) p->scrollmode = SCROLL_MOVE; else /* default to something safe */ p->scrollmode = SCROLL_REDRAW; #endif /* * ++guenther: console.c:vc_allocate() relies on initializing * vc_{cols,rows}, but we must not set those if we are only * resizing the console. */ if (init) { vc->vc_cols = new_cols; vc->vc_rows = new_rows; } else vc_resize(vc, new_cols, new_rows); if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); } ops->p = &fb_display[fg_console]; } static void fbcon_free_font(struct fbcon_display *p) { if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int)); p->fontdata = NULL; p->userfont = 0; } static void set_vc_hi_font(struct vc_data *vc, bool set); static void fbcon_release_all(void) { struct fb_info *info; int i, j, mapped; fbcon_for_each_registered_fb(i) { mapped = 0; info = fbcon_registered_fb[i]; for (j = first_fb_vc; j <= last_fb_vc; j++) { if (con2fb_map[j] == i) { mapped = 1; con2fb_map[j] = -1; } } if (mapped) fbcon_release(info); } } static void fbcon_deinit(struct vc_data *vc) { struct fbcon_display *p = &fb_display[vc->vc_num]; struct fb_info *info; struct fbcon_ops *ops; int idx; fbcon_free_font(p); idx = con2fb_map[vc->vc_num]; if (idx == -1) goto finished; info = fbcon_registered_fb[idx]; if (!info) goto finished; ops = info->fbcon_par; if (!ops) goto finished; if (con_is_visible(vc)) fbcon_del_cursor_work(info); ops->initialized = false; finished: fbcon_free_font(p); vc->vc_font.data = NULL; if (vc->vc_hi_font_mask && vc->vc_screenbuf) set_vc_hi_font(vc, false); if (!con_is_bound(&fb_con)) fbcon_release_all(); if (vc->vc_num == logo_shown) logo_shown = FBCON_LOGO_CANSHOW; return; } /* ====================================================================== */ /* fbcon_XXX routines - interface used by the world * * This system is now divided into two levels because of complications * caused by hardware scrolling. Top level functions: * * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins() * * handles y values in range [0, scr_height-1] that correspond to real * screen positions. y_wrap shift means that first line of bitmap may be * anywhere on this display. These functions convert lineoffsets to * bitmap offsets and deal with the wrap-around case by splitting blits. * * fbcon_bmove_physical_8() -- These functions fast implementations * fbcon_clear_physical_8() -- of original fbcon_XXX fns. * fbcon_putc_physical_8() -- (font width != 8) may be added later * * WARNING: * * At the moment fbcon_putc() cannot blit across vertical wrap boundary * Implies should only really hardware scroll in rows. Only reason for * restriction is simplicity & efficiency at the moment. */ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, unsigned int height, unsigned int width) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; u_int y_break; if (fbcon_is_inactive(vc, info)) return; if (!height || !width) return; if (sy < vc->vc_top && vc->vc_top == logo_lines) { vc->vc_top = 0; /* * If the font dimensions are not an integral of the display * dimensions then the ops->clear below won't end up clearing * the margins. Call clear_margins here in case the logo * bitmap stretched into the margin area. */ fbcon_clear_margins(vc, 0); } /* Split blits that cross physical y_wrap boundary */ y_break = p->vrows - p->yscroll; if (sy < y_break && sy + height - 1 >= y_break) { u_int b = y_break - sy; ops->clear(vc, info, real_y(p, sy), sx, b, width); ops->clear(vc, info, real_y(p, sy + b), sx, height - b, width); } else ops->clear(vc, info, real_y(p, sy), sx, height, width); } static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, unsigned int width) { __fbcon_clear(vc, sy, sx, 1, width); } static void fbcon_putcs(struct vc_data *vc, const u16 *s, unsigned int count, unsigned int ypos, unsigned int xpos) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) ops->putcs(vc, info, s, count, real_y(p, ypos), xpos, get_color(vc, info, scr_readw(s), 1), get_color(vc, info, scr_readw(s), 0)); } static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) ops->clear_margins(vc, info, margin_color, bottom_only); } static void fbcon_cursor(struct vc_data *vc, bool enable) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; int c = scr_readw((u16 *) vc->vc_pos); ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) return; if (vc->vc_cursor_type & CUR_SW) fbcon_del_cursor_work(info); else fbcon_add_cursor_work(info); ops->cursor_flash = enable; if (!ops->cursor) return; ops->cursor(vc, info, enable, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } static int scrollback_phys_max = 0; static int scrollback_max = 0; static int scrollback_current = 0; static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit) { struct fbcon_display *p, *t; struct vc_data **default_mode, *vc; struct vc_data *svc; struct fbcon_ops *ops = info->fbcon_par; int rows, cols; p = &fb_display[unit]; if (var_to_display(p, var, info)) return; vc = vc_cons[unit].d; if (!vc) return; default_mode = vc->vc_display_fg; svc = *default_mode; t = &fb_display[svc->vc_num]; if (!vc->vc_font.data) { vc->vc_font.data = (void *)(p->fontdata = t->fontdata); vc->vc_font.width = (*default_mode)->vc_font.width; vc->vc_font.height = (*default_mode)->vc_font.height; vc->vc_font.charcount = (*default_mode)->vc_font.charcount; p->userfont = t->userfont; if (p->userfont) REFCOUNT(p->fontdata)++; } var->activate = FB_ACTIVATE_NOW; info->var.activate = var->activate; var->yoffset = info->var.yoffset; var->xoffset = info->var.xoffset; fb_set_var(info, var); ops->var = info->var; vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; if (vc->vc_font.charcount == 256) { vc->vc_hi_font_mask = 0; } else { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) vc->vc_complement_mask <<= 1; } if (!*svc->uni_pagedict_loc) con_set_default_unimap(svc); if (!*vc->uni_pagedict_loc) con_copy_unimap(vc, svc); cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); if (con_is_visible(vc)) { update_screen(vc); } } static __inline__ void ywrap_up(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll += count; if (p->yscroll >= p->vrows) /* Deal with wrap */ p->yscroll -= p->vrows; ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode |= FB_VMODE_YWRAP; ops->update_start(info); scrollback_max += count; if (scrollback_max > scrollback_phys_max) scrollback_max = scrollback_phys_max; scrollback_current = 0; } static __inline__ void ywrap_down(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll -= count; if (p->yscroll < 0) /* Deal with wrap */ p->yscroll += p->vrows; ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode |= FB_VMODE_YWRAP; ops->update_start(info); scrollback_max -= count; if (scrollback_max < 0) scrollback_max = 0; scrollback_current = 0; } static __inline__ void ypan_up(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; p->yscroll += count; if (p->yscroll > p->vrows - vc->vc_rows) { ops->bmove(vc, info, p->vrows - vc->vc_rows, 0, 0, 0, vc->vc_rows, vc->vc_cols); p->yscroll -= p->vrows - vc->vc_rows; } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max += count; if (scrollback_max > scrollback_phys_max) scrollback_max = scrollback_phys_max; scrollback_current = 0; } static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll += count; if (p->yscroll > p->vrows - vc->vc_rows) { p->yscroll -= p->vrows - vc->vc_rows; fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max += count; if (scrollback_max > scrollback_phys_max) scrollback_max = scrollback_phys_max; scrollback_current = 0; } static __inline__ void ypan_down(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; p->yscroll -= count; if (p->yscroll < 0) { ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, 0, vc->vc_rows, vc->vc_cols); p->yscroll += p->vrows - vc->vc_rows; } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max -= count; if (scrollback_max < 0) scrollback_max = 0; scrollback_current = 0; } static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll -= count; if (p->yscroll < 0) { p->yscroll += p->vrows - vc->vc_rows; fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); } ops->var.xoffset = 0; ops->var.yoffset = p->yscroll * vc->vc_font.height; ops->var.vmode &= ~FB_VMODE_YWRAP; ops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max -= count; if (scrollback_max < 0) scrollback_max = 0; scrollback_current = 0; } static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, int line, int count, int dy) { unsigned short *s = (unsigned short *) (vc->vc_origin + vc->vc_size_row * line); while (count--) { unsigned short *start = s; unsigned short *le = advance_row(s, 1); unsigned short c; int x = 0; unsigned short attr = 1; do { c = scr_readw(s); if (attr != (c & 0xff00)) { attr = c & 0xff00; if (s > start) { fbcon_putcs(vc, start, s - start, dy, x); x += s - start; start = s; } } console_conditional_schedule(); s++; } while (s < le); if (s > start) fbcon_putcs(vc, start, s - start, dy, x); console_conditional_schedule(); dy++; } } static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, struct fbcon_display *p, int line, int count, int ycount) { int offset = ycount * vc->vc_cols; unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * line); unsigned short *s = d + offset; struct fbcon_ops *ops = info->fbcon_par; while (count--) { unsigned short *start = s; unsigned short *le = advance_row(s, 1); unsigned short c; int x = 0; do { c = scr_readw(s); if (c == scr_readw(d)) { if (s > start) { ops->bmove(vc, info, line + ycount, x, line, x, 1, s-start); x += s - start + 1; start = s + 1; } else { x++; start++; } } scr_writew(c, d); console_conditional_schedule(); s++; d++; } while (s < le); if (s > start) ops->bmove(vc, info, line + ycount, x, line, x, 1, s-start); console_conditional_schedule(); if (ycount > 0) line++; else { line--; /* NOTE: We subtract two lines from these pointers */ s -= vc->vc_size_row; d -= vc->vc_size_row; } } } static void fbcon_redraw(struct vc_data *vc, int line, int count, int offset) { unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * line); unsigned short *s = d + offset; while (count--) { unsigned short *start = s; unsigned short *le = advance_row(s, 1); unsigned short c; int x = 0; unsigned short attr = 1; do { c = scr_readw(s); if (attr != (c & 0xff00)) { attr = c & 0xff00; if (s > start) { fbcon_putcs(vc, start, s - start, line, x); x += s - start; start = s; } } if (c == scr_readw(d)) { if (s > start) { fbcon_putcs(vc, start, s - start, line, x); x += s - start + 1; start = s + 1; } else { x++; start++; } } scr_writew(c, d); console_conditional_schedule(); s++; d++; } while (s < le); if (s > start) fbcon_putcs(vc, start, s - start, line, x); console_conditional_schedule(); if (offset > 0) line++; else { line--; /* NOTE: We subtract two lines from these pointers */ s -= vc->vc_size_row; d -= vc->vc_size_row; } } } static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, int dy, int dx, int height, int width, u_int y_break) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; u_int b; if (sy < y_break && sy + height > y_break) { b = y_break - sy; if (dy < sy) { /* Avoid trashing self */ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); } else { fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); } return; } if (dy < y_break && dy + height > y_break) { b = y_break - dy; if (dy < sy) { /* Avoid trashing self */ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); } else { fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, height - b, width, y_break); fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, y_break); } return; } ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, height, width); } static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, int height, int width) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; if (fbcon_is_inactive(vc, info)) return; if (!width || !height) return; /* Split blits that cross physical y_wrap case. * Pathological case involves 4 blits, better to use recursive * code rather than unrolled case * * Recursive invocations don't need to erase the cursor over and * over again, so we use fbcon_bmove_rec() */ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, p->vrows - p->yscroll); } static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; if (fbcon_is_inactive(vc, info)) return true; fbcon_cursor(vc, false); /* * ++Geert: Only use ywrap/ypan if the console is in text mode * ++Andrew: Only use ypan on hardware text mode when scrolling the * whole screen (prevents flicker). */ switch (dir) { case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, t, b - t - count, count); __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * (b - count)), vc->vc_video_erase_char, vc->vc_size_row * count); return true; case SCROLL_WRAP_MOVE: if (b - t - count > 3 * vc->vc_rows >> 2) { if (t > 0) fbcon_bmove(vc, 0, 0, count, 0, t, vc->vc_cols); ywrap_up(vc, count); if (vc->vc_rows - b > 0) fbcon_bmove(vc, b - count, 0, b, 0, vc->vc_rows - b, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t + count, 0, t, 0, b - t - count, vc->vc_cols); else goto redraw_up; __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); break; case SCROLL_PAN_REDRAW: if ((p->yscroll + count <= 2 * (p->vrows - vc->vc_rows)) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (t > 0) fbcon_redraw_move(vc, p, 0, t, count); ypan_up_redraw(vc, t, count); if (vc->vc_rows - b > 0) fbcon_redraw_move(vc, p, b, vc->vc_rows - b, b); } else fbcon_redraw_move(vc, p, t + count, b - t - count, t); __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); break; case SCROLL_PAN_MOVE: if ((p->yscroll + count <= 2 * (p->vrows - vc->vc_rows)) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (t > 0) fbcon_bmove(vc, 0, 0, count, 0, t, vc->vc_cols); ypan_up(vc, count); if (vc->vc_rows - b > 0) fbcon_bmove(vc, b - count, 0, b, 0, vc->vc_rows - b, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t + count, 0, t, 0, b - t - count, vc->vc_cols); else goto redraw_up; __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); break; case SCROLL_REDRAW: redraw_up: fbcon_redraw(vc, t, b - t - count, count * vc->vc_cols); __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * (b - count)), vc->vc_video_erase_char, vc->vc_size_row * count); return true; } break; case SM_DOWN: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, -count); __fbcon_clear(vc, t, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * t), vc->vc_video_erase_char, vc->vc_size_row * count); return true; case SCROLL_WRAP_MOVE: if (b - t - count > 3 * vc->vc_rows >> 2) { if (vc->vc_rows - b > 0) fbcon_bmove(vc, b, 0, b - count, 0, vc->vc_rows - b, vc->vc_cols); ywrap_down(vc, count); if (t > 0) fbcon_bmove(vc, count, 0, 0, 0, t, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t, 0, t + count, 0, b - t - count, vc->vc_cols); else goto redraw_down; __fbcon_clear(vc, t, 0, count, vc->vc_cols); break; case SCROLL_PAN_MOVE: if ((count - p->yscroll <= p->vrows - vc->vc_rows) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (vc->vc_rows - b > 0) fbcon_bmove(vc, b, 0, b - count, 0, vc->vc_rows - b, vc->vc_cols); ypan_down(vc, count); if (t > 0) fbcon_bmove(vc, count, 0, 0, 0, t, vc->vc_cols); } else if (info->flags & FBINFO_READS_FAST) fbcon_bmove(vc, t, 0, t + count, 0, b - t - count, vc->vc_cols); else goto redraw_down; __fbcon_clear(vc, t, 0, count, vc->vc_cols); break; case SCROLL_PAN_REDRAW: if ((count - p->yscroll <= p->vrows - vc->vc_rows) && ((!scroll_partial && (b - t == vc->vc_rows)) || (scroll_partial && (b - t - count > 3 * vc->vc_rows >> 2)))) { if (vc->vc_rows - b > 0) fbcon_redraw_move(vc, p, b, vc->vc_rows - b, b - count); ypan_down_redraw(vc, t, count); if (t > 0) fbcon_redraw_move(vc, p, count, t, 0); } else fbcon_redraw_move(vc, p, t, b - t - count, t + count); __fbcon_clear(vc, t, 0, count, vc->vc_cols); break; case SCROLL_REDRAW: redraw_down: fbcon_redraw(vc, b - 1, b - t - count, -count * vc->vc_cols); __fbcon_clear(vc, t, 0, count, vc->vc_cols); scr_memsetw((unsigned short *) (vc->vc_origin + vc->vc_size_row * t), vc->vc_video_erase_char, vc->vc_size_row * count); return true; } } return false; } static void updatescrollmode_accel(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) { #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION struct fbcon_ops *ops = info->fbcon_par; int cap = info->flags; u16 t = 0; int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, info->fix.xpanstep); int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, info->var.xres_virtual); int good_pan = (cap & FBINFO_HWACCEL_YPAN) && divides(ypan, vc->vc_font.height) && vyres > yres; int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && divides(ywrap, vc->vc_font.height) && divides(vc->vc_font.height, vyres) && divides(vc->vc_font.height, yres); int reading_fast = cap & FBINFO_READS_FAST; int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && !(cap & FBINFO_HWACCEL_DISABLED); int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && !(cap & FBINFO_HWACCEL_DISABLED); if (good_wrap || good_pan) { if (reading_fast || fast_copyarea) p->scrollmode = good_wrap ? SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE; else p->scrollmode = good_wrap ? SCROLL_REDRAW : SCROLL_PAN_REDRAW; } else { if (reading_fast || (fast_copyarea && !fast_imageblit)) p->scrollmode = SCROLL_MOVE; else p->scrollmode = SCROLL_REDRAW; } #endif } static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) { struct fbcon_ops *ops = info->fbcon_par; int fh = vc->vc_font.height; int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, info->var.xres_virtual); p->vrows = vyres/fh; if (yres > (fh * (vc->vc_rows + 1))) p->vrows -= (yres - (fh * vc->vc_rows)) / fh; if ((yres % fh) && (vyres % fh < yres % fh)) p->vrows--; /* update scrollmode in case hardware acceleration is used */ updatescrollmode_accel(p, info, vc); } #define PITCH(w) (((w) + 7) >> 3) #define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */ static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, bool from_user) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; if (p->userfont && FNTSIZE(vc->vc_font.data)) { int size; int pitch = PITCH(vc->vc_font.width); /* * If user font, ensure that a possible change to user font * height or width will not allow a font data out-of-bounds access. * NOTE: must use original charcount in calculation as font * charcount can change and cannot be used to determine the * font data allocated size. */ if (pitch <= 0) return -EINVAL; size = CALC_FONTSZ(vc->vc_font.height, pitch, vc->vc_font.charcount); if (size > FNTSIZE(vc->vc_font.data)) return -EINVAL; } virt_w = FBCON_SWAP(ops->rotate, width, height); virt_h = FBCON_SWAP(ops->rotate, height, width); virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, vc->vc_font.height); virt_fh = FBCON_SWAP(ops->rotate, vc->vc_font.height, vc->vc_font.width); var.xres = virt_w * virt_fw; var.yres = virt_h * virt_fh; x_diff = info->var.xres - var.xres; y_diff = info->var.yres - var.yres; if (x_diff < 0 || x_diff > virt_fw || y_diff < 0 || y_diff > virt_fh) { const struct fb_videomode *mode; pr_debug("attempting resize %ix%i\n", var.xres, var.yres); mode = fb_find_best_mode(&var, &info->modelist); if (mode == NULL) return -EINVAL; display_to_var(&var, p); fb_videomode_to_var(&var, mode); if (virt_w > var.xres/virt_fw || virt_h > var.yres/virt_fh) return -EINVAL; pr_debug("resize now %ix%i\n", var.xres, var.yres); if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; fb_set_var(info, &var); } var_to_display(p, &info->var, info); ops->var = info->var; } updatescrollmode(p, info, vc); return 0; } static bool fbcon_switch(struct vc_data *vc) { struct fb_info *info, *old_info = NULL; struct fbcon_ops *ops; struct fbcon_display *p = &fb_display[vc->vc_num]; struct fb_var_screeninfo var; int i, ret, prev_console; info = fbcon_info_from_console(vc->vc_num); ops = info->fbcon_par; if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; if (conp2->vc_top == logo_lines && conp2->vc_bottom == conp2->vc_rows) conp2->vc_top = 0; logo_shown = FBCON_LOGO_CANSHOW; } prev_console = ops->currcon; if (prev_console != -1) old_info = fbcon_info_from_console(prev_console); /* * FIXME: If we have multiple fbdev's loaded, we need to * update all info->currcon. Perhaps, we can place this * in a centralized structure, but this might break some * drivers. * * info->currcon = vc->vc_num; */ fbcon_for_each_registered_fb(i) { if (fbcon_registered_fb[i]->fbcon_par) { struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par; o->currcon = vc->vc_num; } } memset(&var, 0, sizeof(struct fb_var_screeninfo)); display_to_var(&var, p); var.activate = FB_ACTIVATE_NOW; /* * make sure we don't unnecessarily trip the memcmp() * in fb_set_var() */ info->var.activate = var.activate; var.vmode |= info->var.vmode & ~FB_VMODE_MASK; fb_set_var(info, &var); ops->var = info->var; if (old_info != NULL && (old_info != info || info->flags & FBINFO_MISC_ALWAYS_SETPAR)) { if (info->fbops->fb_set_par) { ret = info->fbops->fb_set_par(info); if (ret) printk(KERN_ERR "fbcon_switch: detected " "unhandled fb_set_par error, " "error code %d\n", ret); } if (old_info != info) fbcon_del_cursor_work(old_info); } if (fbcon_is_inactive(vc, info) || ops->blank_state != FB_BLANK_UNBLANK) fbcon_del_cursor_work(info); else fbcon_add_cursor_work(info); set_blitting_type(vc, info); ops->cursor_reset = 1; if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); } vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; if (vc->vc_font.charcount > 256) vc->vc_complement_mask <<= 1; updatescrollmode(p, info, vc); switch (fb_scrollmode(p)) { case SCROLL_WRAP_MOVE: scrollback_phys_max = p->vrows - vc->vc_rows; break; case SCROLL_PAN_MOVE: case SCROLL_PAN_REDRAW: scrollback_phys_max = p->vrows - 2 * vc->vc_rows; if (scrollback_phys_max < 0) scrollback_phys_max = 0; break; default: scrollback_phys_max = 0; break; } scrollback_max = 0; scrollback_current = 0; if (!fbcon_is_inactive(vc, info)) { ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; ops->update_start(info); } fbcon_set_palette(vc, color_table); fbcon_clear_margins(vc, 0); if (logo_shown == FBCON_LOGO_DRAW) { logo_shown = fg_console; fb_show_logo(info, ops->rotate); update_region(vc, vc->vc_origin + vc->vc_size_row * vc->vc_top, vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2); return false; } return true; } static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, int blank) { if (blank) { unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; unsigned short oldc; oldc = vc->vc_video_erase_char; vc->vc_video_erase_char &= charmask; __fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols); vc->vc_video_erase_char = oldc; } } static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank, bool mode_switch) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; if (mode_switch) { struct fb_var_screeninfo var = info->var; ops->graphics = 1; if (!blank) { var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE | FB_ACTIVATE_KD_TEXT; fb_set_var(info, &var); ops->graphics = 0; ops->var = info->var; } } if (!fbcon_is_inactive(vc, info)) { if (ops->blank_state != blank) { ops->blank_state = blank; fbcon_cursor(vc, !blank); ops->cursor_flash = (!blank); if (fb_blank(info, blank)) fbcon_generic_blank(vc, info, blank); } if (!blank) update_screen(vc); } if (mode_switch || fbcon_is_inactive(vc, info) || ops->blank_state != FB_BLANK_UNBLANK) fbcon_del_cursor_work(info); else fbcon_add_cursor_work(info); return false; } static void fbcon_debug_enter(struct vc_data *vc) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; ops->save_graphics = ops->graphics; ops->graphics = 0; if (info->fbops->fb_debug_enter) info->fbops->fb_debug_enter(info); fbcon_set_palette(vc, color_table); } static void fbcon_debug_leave(struct vc_data *vc) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; ops->graphics = ops->save_graphics; if (info->fbops->fb_debug_leave) info->fbops->fb_debug_leave(info); } static int fbcon_get_font(struct vc_data *vc, struct console_font *font, unsigned int vpitch) { u8 *fontdata = vc->vc_font.data; u8 *data = font->data; int i, j; font->width = vc->vc_font.width; font->height = vc->vc_font.height; if (font->height > vpitch) return -ENOSPC; font->charcount = vc->vc_hi_font_mask ? 512 : 256; if (!font->data) return 0; if (font->width <= 8) { j = vc->vc_font.height; if (font->charcount * j > FNTSIZE(fontdata)) return -EINVAL; for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, vpitch - j); data += vpitch; fontdata += j; } } else if (font->width <= 16) { j = vc->vc_font.height * 2; if (font->charcount * j > FNTSIZE(fontdata)) return -EINVAL; for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 2*vpitch - j); data += 2*vpitch; fontdata += j; } } else if (font->width <= 24) { if (font->charcount * (vc->vc_font.height * sizeof(u32)) > FNTSIZE(fontdata)) return -EINVAL; for (i = 0; i < font->charcount; i++) { for (j = 0; j < vc->vc_font.height; j++) { *data++ = fontdata[0]; *data++ = fontdata[1]; *data++ = fontdata[2]; fontdata += sizeof(u32); } memset(data, 0, 3 * (vpitch - j)); data += 3 * (vpitch - j); } } else { j = vc->vc_font.height * 4; if (font->charcount * j > FNTSIZE(fontdata)) return -EINVAL; for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 4 * vpitch - j); data += 4 * vpitch; fontdata += j; } } return 0; } /* set/clear vc_hi_font_mask and update vc attrs accordingly */ static void set_vc_hi_font(struct vc_data *vc, bool set) { if (!set) { vc->vc_hi_font_mask = 0; if (vc->vc_can_do_color) { vc->vc_complement_mask >>= 1; vc->vc_s_complement_mask >>= 1; } /* ++Edmund: reorder the attribute bits */ if (vc->vc_can_do_color) { unsigned short *cp = (unsigned short *) vc->vc_origin; int count = vc->vc_screenbuf_size / 2; unsigned short c; for (; count > 0; count--, cp++) { c = scr_readw(cp); scr_writew(((c & 0xfe00) >> 1) | (c & 0xff), cp); } c = vc->vc_video_erase_char; vc->vc_video_erase_char = ((c & 0xfe00) >> 1) | (c & 0xff); vc->vc_attr >>= 1; } } else { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) { vc->vc_complement_mask <<= 1; vc->vc_s_complement_mask <<= 1; } /* ++Edmund: reorder the attribute bits */ { unsigned short *cp = (unsigned short *) vc->vc_origin; int count = vc->vc_screenbuf_size / 2; unsigned short c; for (; count > 0; count--, cp++) { unsigned short newc; c = scr_readw(cp); if (vc->vc_can_do_color) newc = ((c & 0xff00) << 1) | (c & 0xff); else newc = c & ~0x100; scr_writew(newc, cp); } c = vc->vc_video_erase_char; if (vc->vc_can_do_color) { vc->vc_video_erase_char = ((c & 0xff00) << 1) | (c & 0xff); vc->vc_attr <<= 1; } else vc->vc_video_erase_char = c & ~0x100; } } } static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount, const u8 * data, int userfont) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; int resize, ret, old_userfont, old_width, old_height, old_charcount; u8 *old_data = vc->vc_font.data; resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); vc->vc_font.data = (void *)(p->fontdata = data); old_userfont = p->userfont; if ((p->userfont = userfont)) REFCOUNT(data)++; old_width = vc->vc_font.width; old_height = vc->vc_font.height; old_charcount = vc->vc_font.charcount; vc->vc_font.width = w; vc->vc_font.height = h; vc->vc_font.charcount = charcount; if (vc->vc_hi_font_mask && charcount == 256) set_vc_hi_font(vc, false); else if (!vc->vc_hi_font_mask && charcount == 512) set_vc_hi_font(vc, true); if (resize) { int cols, rows; cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= w; rows /= h; ret = vc_resize(vc, cols, rows); if (ret) goto err_out; } else if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); update_screen(vc); } if (old_userfont && (--REFCOUNT(old_data) == 0)) kfree(old_data - FONT_EXTRA_WORDS * sizeof(int)); return 0; err_out: p->fontdata = old_data; vc->vc_font.data = old_data; if (userfont) { p->userfont = old_userfont; if (--REFCOUNT(data) == 0) kfree(data - FONT_EXTRA_WORDS * sizeof(int)); } vc->vc_font.width = old_width; vc->vc_font.height = old_height; vc->vc_font.charcount = old_charcount; return ret; } /* * User asked to set font; we are guaranteed that charcount does not exceed 512 * but lets not assume that, since charcount of 512 is small for unicode support. */ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font, unsigned int vpitch, unsigned int flags) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); unsigned charcount = font->charcount; int w = font->width; int h = font->height; int size; int i, csum; u8 *new_data, *data = font->data; int pitch = PITCH(font->width); /* Is there a reason why fbconsole couldn't handle any charcount >256? * If not this check should be changed to charcount < 256 */ if (charcount != 256 && charcount != 512) return -EINVAL; /* font bigger than screen resolution ? */ if (w > FBCON_SWAP(info->var.rotate, info->var.xres, info->var.yres) || h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres)) return -EINVAL; if (font->width > FB_MAX_BLIT_WIDTH || font->height > FB_MAX_BLIT_HEIGHT) return -EINVAL; /* Make sure drawing engine can handle the font */ if (!test_bit(font->width - 1, info->pixmap.blit_x) || !test_bit(font->height - 1, info->pixmap.blit_y)) return -EINVAL; /* Make sure driver can handle the font length */ if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; size = CALC_FONTSZ(h, pitch, charcount); new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); if (!new_data) return -ENOMEM; memset(new_data, 0, FONT_EXTRA_WORDS * sizeof(int)); new_data += FONT_EXTRA_WORDS * sizeof(int); FNTSIZE(new_data) = size; REFCOUNT(new_data) = 0; /* usage counter */ for (i=0; i< charcount; i++) { memcpy(new_data + i*h*pitch, data + i*vpitch*pitch, h*pitch); } /* Since linux has a nice crc32 function use it for counting font * checksums. */ csum = crc32(0, new_data, size); FNTSUM(new_data) = csum; /* Check if the same font is on some other console already */ for (i = first_fb_vc; i <= last_fb_vc; i++) { struct vc_data *tmp = vc_cons[i].d; if (fb_display[i].userfont && fb_display[i].fontdata && FNTSUM(fb_display[i].fontdata) == csum && FNTSIZE(fb_display[i].fontdata) == size && tmp->vc_font.width == w && !memcmp(fb_display[i].fontdata, new_data, size)) { kfree(new_data - FONT_EXTRA_WORDS * sizeof(int)); new_data = (u8 *)fb_display[i].fontdata; break; } } return fbcon_do_set_font(vc, font->width, font->height, charcount, new_data, 1); } static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, const char *name) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); const struct font_desc *f; if (!name) f = get_default_font(info->var.xres, info->var.yres, info->pixmap.blit_x, info->pixmap.blit_y); else if (!(f = find_font(name))) return -ENOENT; font->width = f->width; font->height = f->height; return fbcon_do_set_font(vc, f->width, f->height, f->charcount, f->data, 0); } static u16 palette_red[16]; static u16 palette_green[16]; static u16 palette_blue[16]; static struct fb_cmap palette_cmap = { 0, 16, palette_red, palette_green, palette_blue, NULL }; static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); int i, j, k, depth; u8 val; if (fbcon_is_inactive(vc, info)) return; if (!con_is_visible(vc)) return; depth = fb_get_color_depth(&info->var, &info->fix); if (depth > 3) { for (i = j = 0; i < 16; i++) { k = table[i]; val = vc->vc_palette[j++]; palette_red[k] = (val << 8) | val; val = vc->vc_palette[j++]; palette_green[k] = (val << 8) | val; val = vc->vc_palette[j++]; palette_blue[k] = (val << 8) | val; } palette_cmap.len = 16; palette_cmap.start = 0; /* * If framebuffer is capable of less than 16 colors, * use default palette of fbcon. */ } else fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap); fb_set_cmap(&palette_cmap, info); } /* As we might be inside of softback, we may work with non-contiguous buffer, that's why we have to use a separate routine. */ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) { while (cnt--) { u16 a = scr_readw(p); if (!vc->vc_can_do_color) a ^= 0x0800; else if (vc->vc_hi_font_mask == 0x100) a = ((a) & 0x11ff) | (((a) & 0xe000) >> 4) | (((a) & 0x0e00) << 4); else a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); scr_writew(a, p++); } } void fbcon_suspended(struct fb_info *info) { struct vc_data *vc = NULL; struct fbcon_ops *ops = info->fbcon_par; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; /* Clear cursor, restore saved data */ fbcon_cursor(vc, false); } void fbcon_resumed(struct fb_info *info) { struct vc_data *vc; struct fbcon_ops *ops = info->fbcon_par; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; update_screen(vc); } static void fbcon_modechanged(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; struct fbcon_display *p; int rows, cols; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; if (vc->vc_mode != KD_TEXT || fbcon_info_from_console(ops->currcon) != info) return; p = &fb_display[vc->vc_num]; set_blitting_type(vc, info); if (con_is_visible(vc)) { var_to_display(p, &info->var, info); cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); updatescrollmode(p, info, vc); scrollback_max = 0; scrollback_current = 0; if (!fbcon_is_inactive(vc, info)) { ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; ops->update_start(info); } fbcon_set_palette(vc, color_table); update_screen(vc); } } static void fbcon_set_all_vcs(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; struct fbcon_display *p; int i, rows, cols, fg = -1; if (!ops || ops->currcon < 0) return; for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (!vc || vc->vc_mode != KD_TEXT || fbcon_info_from_console(i) != info) continue; if (con_is_visible(vc)) { fg = i; continue; } p = &fb_display[vc->vc_num]; set_blitting_type(vc, info); var_to_display(p, &info->var, info); cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); } if (fg != -1) fbcon_modechanged(info); } void fbcon_update_vcs(struct fb_info *info, bool all) { if (all) fbcon_set_all_vcs(info); else fbcon_modechanged(info); } EXPORT_SYMBOL(fbcon_update_vcs); /* let fbcon check if it supports a new screen resolution */ int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; unsigned int i; WARN_CONSOLE_UNLOCKED(); if (!ops) return 0; /* prevent setting a screen size which is smaller than font size */ for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (!vc || vc->vc_mode != KD_TEXT || fbcon_info_from_console(i) != info) continue; if (vc->vc_font.width > FBCON_SWAP(var->rotate, var->xres, var->yres) || vc->vc_font.height > FBCON_SWAP(var->rotate, var->yres, var->xres)) return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(fbcon_modechange_possible); int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode) { struct fb_info *fb_info; struct fbcon_display *p; int i, j, found = 0; /* before deletion, ensure that mode is not in use */ for (i = first_fb_vc; i <= last_fb_vc; i++) { j = con2fb_map[i]; if (j == -1) continue; fb_info = fbcon_registered_fb[j]; if (fb_info != info) continue; p = &fb_display[i]; if (!p || !p->mode) continue; if (fb_mode_is_equal(p->mode, mode)) { found = 1; break; } } return found; } #ifdef CONFIG_VT_HW_CONSOLE_BINDING static void fbcon_unbind(void) { int ret; ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (!ret) fbcon_has_console_bind = 0; } #else static inline void fbcon_unbind(void) {} #endif /* CONFIG_VT_HW_CONSOLE_BINDING */ void fbcon_fb_unbind(struct fb_info *info) { int i, new_idx = -1; int idx = info->node; console_lock(); if (!fbcon_has_console_bind) { console_unlock(); return; } for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] != idx && con2fb_map[i] != -1) { new_idx = con2fb_map[i]; break; } } if (new_idx != -1) { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) set_con2fb_map(i, new_idx, 0); } } else { struct fb_info *info = fbcon_registered_fb[idx]; /* This is sort of like set_con2fb_map, except it maps * the consoles to no device and then releases the * oldinfo to free memory and cancel the cursor blink * timer. I can imagine this just becoming part of * set_con2fb_map where new_idx is -1 */ for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) { con2fb_map[i] = -1; if (!search_fb_in_map(idx)) { con2fb_release_oldinfo(vc_cons[i].d, info, NULL); } } } fbcon_unbind(); } console_unlock(); } void fbcon_fb_unregistered(struct fb_info *info) { int i, idx; console_lock(); fbcon_registered_fb[info->node] = NULL; fbcon_num_registered_fb--; if (deferred_takeover) { console_unlock(); return; } idx = info->node; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) con2fb_map[i] = -1; } if (idx == info_idx) { info_idx = -1; fbcon_for_each_registered_fb(i) { info_idx = i; break; } } if (info_idx != -1) { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == -1) con2fb_map[i] = info_idx; } } if (primary_device == idx) primary_device = -1; if (!fbcon_num_registered_fb) do_unregister_con_driver(&fb_con); console_unlock(); } void fbcon_remap_all(struct fb_info *info) { int i, idx = info->node; console_lock(); if (deferred_takeover) { for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map_boot[i] = idx; fbcon_map_override(); console_unlock(); return; } for (i = first_fb_vc; i <= last_fb_vc; i++) set_con2fb_map(i, idx, 0); if (con_is_bound(&fb_con)) { printk(KERN_INFO "fbcon: Remapping primary device, " "fb%i, to tty %i-%i\n", idx, first_fb_vc + 1, last_fb_vc + 1); info_idx = idx; } console_unlock(); } #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY static void fbcon_select_primary(struct fb_info *info) { if (!map_override && primary_device == -1 && video_is_primary_device(info->device)) { int i; printk(KERN_INFO "fbcon: %s (fb%i) is primary device\n", info->fix.id, info->node); primary_device = info->node; for (i = first_fb_vc; i <= last_fb_vc; i++) con2fb_map_boot[i] = primary_device; if (con_is_bound(&fb_con)) { printk(KERN_INFO "fbcon: Remapping primary device, " "fb%i, to tty %i-%i\n", info->node, first_fb_vc + 1, last_fb_vc + 1); info_idx = primary_device; } } } #else static inline void fbcon_select_primary(struct fb_info *info) { return; } #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */ static bool lockless_register_fb; module_param_named_unsafe(lockless_register_fb, lockless_register_fb, bool, 0400); MODULE_PARM_DESC(lockless_register_fb, "Lockless framebuffer registration for debugging [default=off]"); /* called with console_lock held */ static int do_fb_registered(struct fb_info *info) { int ret = 0, i, idx; WARN_CONSOLE_UNLOCKED(); fbcon_registered_fb[info->node] = info; fbcon_num_registered_fb++; idx = info->node; fbcon_select_primary(info); if (deferred_takeover) { pr_info("fbcon: Deferring console take-over\n"); return 0; } if (info_idx == -1) { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map_boot[i] == idx) { info_idx = idx; break; } } if (info_idx != -1) ret = do_fbcon_takeover(1); } else { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map_boot[i] == idx) set_con2fb_map(i, idx, 0); } } return ret; } int fbcon_fb_registered(struct fb_info *info) { int ret; if (!lockless_register_fb) console_lock(); else atomic_inc(&ignore_console_lock_warning); ret = do_fb_registered(info); if (!lockless_register_fb) console_unlock(); else atomic_dec(&ignore_console_lock_warning); return ret; } void fbcon_fb_blanked(struct fb_info *info, int blank) { struct fbcon_ops *ops = info->fbcon_par; struct vc_data *vc; if (!ops || ops->currcon < 0) return; vc = vc_cons[ops->currcon].d; if (vc->vc_mode != KD_TEXT || fbcon_info_from_console(ops->currcon) != info) return; if (con_is_visible(vc)) { if (blank) do_blank_screen(0); else do_unblank_screen(0); } ops->blank_state = blank; } void fbcon_new_modelist(struct fb_info *info) { int i; struct vc_data *vc; struct fb_var_screeninfo var; const struct fb_videomode *mode; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (fbcon_info_from_console(i) != info) continue; if (!fb_display[i].mode) continue; vc = vc_cons[i].d; display_to_var(&var, &fb_display[i]); mode = fb_find_nearest_mode(fb_display[i].mode, &info->modelist); fb_videomode_to_var(&var, mode); fbcon_set_disp(info, &var, vc->vc_num); } } void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps) { struct vc_data *vc; if (caps->flags) { int i, charcnt; for (i = first_fb_vc; i <= last_fb_vc; i++) { vc = vc_cons[i].d; if (vc && vc->vc_mode == KD_TEXT && info->node == con2fb_map[i]) { set_bit(vc->vc_font.width - 1, caps->x); set_bit(vc->vc_font.height - 1, caps->y); charcnt = vc->vc_font.charcount; if (caps->len < charcnt) caps->len = charcnt; } } } else { vc = vc_cons[fg_console].d; if (vc && vc->vc_mode == KD_TEXT && info->node == con2fb_map[fg_console]) { bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH); set_bit(vc->vc_font.width - 1, caps->x); bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT); set_bit(vc->vc_font.height - 1, caps->y); caps->len = vc->vc_font.charcount; } } } int fbcon_set_con2fb_map_ioctl(void __user *argp) { struct fb_con2fbmap con2fb; int ret; if (copy_from_user(&con2fb, argp, sizeof(con2fb))) return -EFAULT; if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) return -EINVAL; if (con2fb.framebuffer >= FB_MAX) return -EINVAL; if (!fbcon_registered_fb[con2fb.framebuffer]) request_module("fb%d", con2fb.framebuffer); if (!fbcon_registered_fb[con2fb.framebuffer]) { return -EINVAL; } console_lock(); ret = set_con2fb_map(con2fb.console - 1, con2fb.framebuffer, 1); console_unlock(); return ret; } int fbcon_get_con2fb_map_ioctl(void __user *argp) { struct fb_con2fbmap con2fb; if (copy_from_user(&con2fb, argp, sizeof(con2fb))) return -EFAULT; if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) return -EINVAL; console_lock(); con2fb.framebuffer = con2fb_map[con2fb.console - 1]; console_unlock(); return copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; } /* * The console `switch' structure for the frame buffer based console */ static const struct consw fb_con = { .owner = THIS_MODULE, .con_startup = fbcon_startup, .con_init = fbcon_init, .con_deinit = fbcon_deinit, .con_clear = fbcon_clear, .con_putcs = fbcon_putcs, .con_cursor = fbcon_cursor, .con_scroll = fbcon_scroll, .con_switch = fbcon_switch, .con_blank = fbcon_blank, .con_font_set = fbcon_set_font, .con_font_get = fbcon_get_font, .con_font_default = fbcon_set_def_font, .con_set_palette = fbcon_set_palette, .con_invert_region = fbcon_invert_region, .con_resize = fbcon_resize, .con_debug_enter = fbcon_debug_enter, .con_debug_leave = fbcon_debug_leave, }; static ssize_t store_rotate(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *info; int rotate, idx; char **last = NULL; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; info = fbcon_registered_fb[idx]; rotate = simple_strtoul(buf, last, 0); fbcon_rotate(info, rotate); err: console_unlock(); return count; } static ssize_t store_rotate_all(struct device *device, struct device_attribute *attr,const char *buf, size_t count) { struct fb_info *info; int rotate, idx; char **last = NULL; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; info = fbcon_registered_fb[idx]; rotate = simple_strtoul(buf, last, 0); fbcon_rotate_all(info, rotate); err: console_unlock(); return count; } static ssize_t show_rotate(struct device *device, struct device_attribute *attr,char *buf) { struct fb_info *info; int rotate = 0, idx; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; info = fbcon_registered_fb[idx]; rotate = fbcon_get_rotate(info); err: console_unlock(); return sysfs_emit(buf, "%d\n", rotate); } static ssize_t show_cursor_blink(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *info; struct fbcon_ops *ops; int idx, blink = -1; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; info = fbcon_registered_fb[idx]; ops = info->fbcon_par; if (!ops) goto err; blink = delayed_work_pending(&ops->cursor_work); err: console_unlock(); return sysfs_emit(buf, "%d\n", blink); } static ssize_t store_cursor_blink(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *info; int blink, idx; char **last = NULL; console_lock(); idx = con2fb_map[fg_console]; if (idx == -1 || fbcon_registered_fb[idx] == NULL) goto err; info = fbcon_registered_fb[idx]; if (!info->fbcon_par) goto err; blink = simple_strtoul(buf, last, 0); if (blink) { fbcon_cursor_noblink = 0; fbcon_add_cursor_work(info); } else { fbcon_cursor_noblink = 1; fbcon_del_cursor_work(info); } err: console_unlock(); return count; } static struct device_attribute device_attrs[] = { __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate), __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all), __ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink, store_cursor_blink), }; static int fbcon_init_device(void) { int i, error = 0; fbcon_has_sysfs = 1; for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { error = device_create_file(fbcon_device, &device_attrs[i]); if (error) break; } if (error) { while (--i >= 0) device_remove_file(fbcon_device, &device_attrs[i]); fbcon_has_sysfs = 0; } return 0; } #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER static void fbcon_register_existing_fbs(struct work_struct *work) { int i; console_lock(); deferred_takeover = false; logo_shown = FBCON_LOGO_DONTSHOW; fbcon_for_each_registered_fb(i) do_fb_registered(fbcon_registered_fb[i]); console_unlock(); } static struct notifier_block fbcon_output_nb; static DECLARE_WORK(fbcon_deferred_takeover_work, fbcon_register_existing_fbs); static int fbcon_output_notifier(struct notifier_block *nb, unsigned long action, void *data) { WARN_CONSOLE_UNLOCKED(); pr_info("fbcon: Taking over console\n"); dummycon_unregister_output_notifier(&fbcon_output_nb); /* We may get called in atomic context */ schedule_work(&fbcon_deferred_takeover_work); return NOTIFY_OK; } #endif static void fbcon_start(void) { WARN_CONSOLE_UNLOCKED(); #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER if (conswitchp != &dummy_con) deferred_takeover = false; if (deferred_takeover) { fbcon_output_nb.notifier_call = fbcon_output_notifier; dummycon_register_output_notifier(&fbcon_output_nb); return; } #endif } void __init fb_console_init(void) { int i; console_lock(); fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon"); if (IS_ERR(fbcon_device)) { printk(KERN_WARNING "Unable to create device " "for fbcon; errno = %ld\n", PTR_ERR(fbcon_device)); fbcon_device = NULL; } else fbcon_init_device(); for (i = 0; i < MAX_NR_CONSOLES; i++) con2fb_map[i] = -1; fbcon_start(); console_unlock(); } #ifdef MODULE static void __exit fbcon_deinit_device(void) { int i; if (fbcon_has_sysfs) { for (i = 0; i < ARRAY_SIZE(device_attrs); i++) device_remove_file(fbcon_device, &device_attrs[i]); fbcon_has_sysfs = 0; } } void __exit fb_console_exit(void) { #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER console_lock(); if (deferred_takeover) dummycon_unregister_output_notifier(&fbcon_output_nb); console_unlock(); cancel_work_sync(&fbcon_deferred_takeover_work); #endif console_lock(); fbcon_deinit_device(); device_destroy(fb_class, MKDEV(0, 0)); do_unregister_con_driver(&fb_con); console_unlock(); } #endif
4 4 1 3 4 4 4 4 4 3 3 2 2 2 1 1 4 4 5 5 3 3 3 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 2 2 2 2 2 1 1 2 2 2 2 1 1 1 1 1 1 1 1 1 7 5 15 13 17 13 16 15 13 13 1 12 12 12 10 2 10 10 11 12 1 12 12 11 1 12 2 2 10 12 5 12 8 7 5 5 1 7 4 7 7 8 17 9 9 9 1 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 // SPDX-License-Identifier: GPL-2.0-or-later /****************************************************************************** * usbtouchscreen.c * Driver for USB Touchscreens, supporting those devices: * - eGalax Touchkit * includes eTurboTouch CT-410/510/700 * - 3M/Microtouch EX II series * - ITM * - PanJit TouchSet * - eTurboTouch * - Gunze AHL61 * - DMC TSC-10/25 * - IRTOUCHSYSTEMS/UNITOP * - IdealTEK URTC1000 * - General Touch * - GoTop Super_Q2/GogoPen/PenPower tablets * - JASTEC USB touch controller/DigiTech DTR-02U * - Zytronic capacitive touchscreen * - NEXIO/iNexio * - Elo TouchSystems 2700 IntelliTouch * - EasyTouch USB Dual/Multi touch controller from Data Modul * * Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch> * Copyright (C) by Todd E. Johnson (mtouchusb.c) * * Driver is based on touchkitusb.c * - ITM parts are from itmtouch.c * - 3M parts are from mtouchusb.c * - PanJit parts are from an unmerged driver by Lanslott Gish * - DMC TSC 10/25 are from Holger Schurig, with ideas from an unmerged * driver from Marius Vollmer * *****************************************************************************/ //#define DEBUG #include <linux/kernel.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <linux/hid.h> #include <linux/mutex.h> static bool swap_xy; module_param(swap_xy, bool, 0644); MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped."); static bool hwcalib_xy; module_param(hwcalib_xy, bool, 0644); MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available"); /* device specifc data/functions */ struct usbtouch_usb; struct usbtouch_device_info { int min_xc, max_xc; int min_yc, max_yc; int min_press, max_press; int rept_size; /* * Always service the USB devices irq not just when the input device is * open. This is useful when devices have a watchdog which prevents us * from periodically polling the device. Leave this unset unless your * touchscreen device requires it, as it does consume more of the USB * bandwidth. */ bool irq_always; /* * used to get the packet len. possible return values: * > 0: packet len * = 0: skip one byte * < 0: -return value more bytes needed */ int (*get_pkt_len) (unsigned char *pkt, int len); int (*read_data) (struct usbtouch_usb *usbtouch, unsigned char *pkt); int (*alloc) (struct usbtouch_usb *usbtouch); int (*init) (struct usbtouch_usb *usbtouch); void (*exit) (struct usbtouch_usb *usbtouch); }; /* a usbtouch device */ struct usbtouch_usb { unsigned char *data; dma_addr_t data_dma; int data_size; unsigned char *buffer; int buf_len; struct urb *irq; struct usb_interface *interface; struct input_dev *input; const struct usbtouch_device_info *type; struct mutex pm_mutex; /* serialize access to open/suspend */ bool is_open; char name[128]; char phys[64]; void *priv; int x, y; int touch, press; void (*process_pkt)(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len); }; /***************************************************************************** * e2i Part */ #ifdef CONFIG_TOUCHSCREEN_USB_E2I static int e2i_init(struct usbtouch_usb *usbtouch) { int ret; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, 0x02, 0x0000, 0x0081, NULL, 0, USB_CTRL_SET_TIMEOUT); dev_dbg(&usbtouch->interface->dev, "%s - usb_control_msg - E2I_RESET - bytes|err: %d\n", __func__, ret); return ret; } static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { int tmp = (pkt[0] << 8) | pkt[1]; dev->x = (pkt[2] << 8) | pkt[3]; dev->y = (pkt[4] << 8) | pkt[5]; tmp = tmp - 0xA000; dev->touch = (tmp > 0); dev->press = (tmp > 0 ? tmp : 0); return 1; } static const struct usbtouch_device_info e2i_dev_info = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 6, .init = e2i_init, .read_data = e2i_read_data, }; #endif /***************************************************************************** * eGalax part */ #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX #ifndef MULTI_PACKET #define MULTI_PACKET #endif #define EGALAX_PKT_TYPE_MASK 0xFE #define EGALAX_PKT_TYPE_REPT 0x80 #define EGALAX_PKT_TYPE_DIAG 0x0A static int egalax_init(struct usbtouch_usb *usbtouch) { struct usb_device *udev = interface_to_usbdev(usbtouch->interface); int ret, i; /* * An eGalax diagnostic packet kicks the device into using the right * protocol. We send a "check active" packet. The response will be * read later and ignored. */ u8 *buf __free(kfree) = kmalloc(3, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = EGALAX_PKT_TYPE_DIAG; buf[1] = 1; /* length */ buf[2] = 'A'; /* command - check active */ for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, 3, USB_CTRL_SET_TIMEOUT); if (ret != -EPIPE) break; } return ret < 0 ? ret : 0; } static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if ((pkt[0] & EGALAX_PKT_TYPE_MASK) != EGALAX_PKT_TYPE_REPT) return 0; dev->x = ((pkt[3] & 0x0F) << 7) | (pkt[4] & 0x7F); dev->y = ((pkt[1] & 0x0F) << 7) | (pkt[2] & 0x7F); dev->touch = pkt[0] & 0x01; return 1; } static int egalax_get_pkt_len(unsigned char *buf, int len) { switch (buf[0] & EGALAX_PKT_TYPE_MASK) { case EGALAX_PKT_TYPE_REPT: return 5; case EGALAX_PKT_TYPE_DIAG: if (len < 2) return -1; return buf[1] + 2; } return 0; } static const struct usbtouch_device_info egalax_dev_info = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 16, .get_pkt_len = egalax_get_pkt_len, .read_data = egalax_read_data, .init = egalax_init, }; #endif /***************************************************************************** * EasyTouch part */ #ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH #ifndef MULTI_PACKET #define MULTI_PACKET #endif #define ETOUCH_PKT_TYPE_MASK 0xFE #define ETOUCH_PKT_TYPE_REPT 0x80 #define ETOUCH_PKT_TYPE_REPT2 0xB0 #define ETOUCH_PKT_TYPE_DIAG 0x0A static int etouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if ((pkt[0] & ETOUCH_PKT_TYPE_MASK) != ETOUCH_PKT_TYPE_REPT && (pkt[0] & ETOUCH_PKT_TYPE_MASK) != ETOUCH_PKT_TYPE_REPT2) return 0; dev->x = ((pkt[1] & 0x1F) << 7) | (pkt[2] & 0x7F); dev->y = ((pkt[3] & 0x1F) << 7) | (pkt[4] & 0x7F); dev->touch = pkt[0] & 0x01; return 1; } static int etouch_get_pkt_len(unsigned char *buf, int len) { switch (buf[0] & ETOUCH_PKT_TYPE_MASK) { case ETOUCH_PKT_TYPE_REPT: case ETOUCH_PKT_TYPE_REPT2: return 5; case ETOUCH_PKT_TYPE_DIAG: if (len < 2) return -1; return buf[1] + 2; } return 0; } static const struct usbtouch_device_info etouch_dev_info = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 16, .get_pkt_len = etouch_get_pkt_len, .read_data = etouch_read_data, }; #endif /***************************************************************************** * PanJit Part */ #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info panjit_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .read_data = panjit_read_data, }; #endif /***************************************************************************** * 3M/Microtouch Part */ #ifdef CONFIG_TOUCHSCREEN_USB_3M #define MTOUCHUSB_ASYNC_REPORT 1 #define MTOUCHUSB_RESET 7 #define MTOUCHUSB_REQ_CTRLLR_ID 10 #define MTOUCHUSB_REQ_CTRLLR_ID_LEN 16 static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if (hwcalib_xy) { dev->x = (pkt[4] << 8) | pkt[3]; dev->y = 0xffff - ((pkt[6] << 8) | pkt[5]); } else { dev->x = (pkt[8] << 8) | pkt[7]; dev->y = (pkt[10] << 8) | pkt[9]; } dev->touch = (pkt[2] & 0x40) ? 1 : 0; return 1; } struct mtouch_priv { u8 fw_rev_major; u8 fw_rev_minor; }; static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch) { struct usb_device *udev = interface_to_usbdev(usbtouch->interface); struct mtouch_priv *priv = usbtouch->priv; int ret; u8 *buf __free(kfree) = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO); if (!buf) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), MTOUCHUSB_REQ_CTRLLR_ID, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, MTOUCHUSB_REQ_CTRLLR_ID_LEN, USB_CTRL_SET_TIMEOUT); if (ret != MTOUCHUSB_REQ_CTRLLR_ID_LEN) { dev_warn(&usbtouch->interface->dev, "Failed to read FW rev: %d\n", ret); return ret < 0 ? ret : -EIO; } priv->fw_rev_major = buf[3]; priv->fw_rev_minor = buf[4]; return 0; } static int mtouch_alloc(struct usbtouch_usb *usbtouch) { struct mtouch_priv *priv; priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; usbtouch->priv = priv; return 0; } static int mtouch_init(struct usbtouch_usb *usbtouch) { int ret, i; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); ret = mtouch_get_fw_revision(usbtouch); if (ret) return ret; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), MTOUCHUSB_RESET, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); dev_dbg(&usbtouch->interface->dev, "%s - usb_control_msg - MTOUCHUSB_RESET - bytes|err: %d\n", __func__, ret); if (ret < 0) return ret; msleep(150); for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), MTOUCHUSB_ASYNC_REPORT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT); dev_dbg(&usbtouch->interface->dev, "%s - usb_control_msg - MTOUCHUSB_ASYNC_REPORT - bytes|err: %d\n", __func__, ret); if (ret >= 0) break; if (ret != -EPIPE) return ret; } /* Default min/max xy are the raw values, override if using hw-calib */ if (hwcalib_xy) { input_set_abs_params(usbtouch->input, ABS_X, 0, 0xffff, 0, 0); input_set_abs_params(usbtouch->input, ABS_Y, 0, 0xffff, 0, 0); } return 0; } static void mtouch_exit(struct usbtouch_usb *usbtouch) { struct mtouch_priv *priv = usbtouch->priv; kfree(priv); } static struct usbtouch_device_info mtouch_dev_info = { .min_xc = 0x0, .max_xc = 0x4000, .min_yc = 0x0, .max_yc = 0x4000, .rept_size = 11, .read_data = mtouch_read_data, .alloc = mtouch_alloc, .init = mtouch_init, .exit = mtouch_exit, }; static ssize_t mtouch_firmware_rev_show(struct device *dev, struct device_attribute *attr, char *output) { struct usb_interface *intf = to_usb_interface(dev); struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); struct mtouch_priv *priv = usbtouch->priv; return sysfs_emit(output, "%1x.%1x\n", priv->fw_rev_major, priv->fw_rev_minor); } static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL); static struct attribute *mtouch_attrs[] = { &dev_attr_firmware_rev.attr, NULL }; static bool mtouch_group_visible(struct kobject *kobj) { struct device *dev = kobj_to_dev(kobj); struct usb_interface *intf = to_usb_interface(dev); struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); return usbtouch->type == &mtouch_dev_info; } DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(mtouch); static const struct attribute_group mtouch_attr_group = { .is_visible = SYSFS_GROUP_VISIBLE(mtouch), .attrs = mtouch_attrs, }; #endif /***************************************************************************** * ITM Part */ #ifdef CONFIG_TOUCHSCREEN_USB_ITM static int itm_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { int touch; /* * ITM devices report invalid x/y data if not touched. * if the screen was touched before but is not touched any more * report touch as 0 with the last valid x/y data once. then stop * reporting data until touched again. */ dev->press = ((pkt[2] & 0x01) << 7) | (pkt[5] & 0x7F); touch = ~pkt[7] & 0x20; if (!touch) { if (dev->touch) { dev->touch = 0; return 1; } return 0; } dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[3] & 0x7F); dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F); dev->touch = touch; return 1; } static const struct usbtouch_device_info itm_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .max_press = 0xff, .rept_size = 8, .read_data = itm_read_data, }; #endif /***************************************************************************** * eTurboTouch part */ #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO #ifndef MULTI_PACKET #define MULTI_PACKET #endif static int eturbo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { unsigned int shift; /* packets should start with sync */ if (!(pkt[0] & 0x80)) return 0; shift = (6 - (pkt[0] & 0x03)); dev->x = ((pkt[3] << 7) | pkt[4]) >> shift; dev->y = ((pkt[1] << 7) | pkt[2]) >> shift; dev->touch = (pkt[0] & 0x10) ? 1 : 0; return 1; } static int eturbo_get_pkt_len(unsigned char *buf, int len) { if (buf[0] & 0x80) return 5; if (buf[0] == 0x01) return 3; return 0; } static const struct usbtouch_device_info eturbo_dev_info = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 8, .get_pkt_len = eturbo_get_pkt_len, .read_data = eturbo_read_data, }; #endif /***************************************************************************** * Gunze part */ #ifdef CONFIG_TOUCHSCREEN_USB_GUNZE static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if (!(pkt[0] & 0x80) || ((pkt[1] | pkt[2] | pkt[3]) & 0x80)) return 0; dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[2] & 0x7F); dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[3] & 0x7F); dev->touch = pkt[0] & 0x20; return 1; } static const struct usbtouch_device_info gunze_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 4, .read_data = gunze_read_data, }; #endif /***************************************************************************** * DMC TSC-10/25 Part * * Documentation about the controller and it's protocol can be found at * http://www.dmccoltd.com/files/controler/tsc10usb_pi_e.pdf * http://www.dmccoltd.com/files/controler/tsc25_usb_e.pdf */ #ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 /* supported data rates. currently using 130 */ #define TSC10_RATE_POINT 0x50 #define TSC10_RATE_30 0x40 #define TSC10_RATE_50 0x41 #define TSC10_RATE_80 0x42 #define TSC10_RATE_100 0x43 #define TSC10_RATE_130 0x44 #define TSC10_RATE_150 0x45 /* commands */ #define TSC10_CMD_RESET 0x55 #define TSC10_CMD_RATE 0x05 #define TSC10_CMD_DATA1 0x01 static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) { struct usb_device *dev = interface_to_usbdev(usbtouch->interface); int ret; u8 *buf __free(kfree) = kmalloc(2, GFP_NOIO); if (!buf) return -ENOMEM; /* reset */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_RESET, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) return ret; if (buf[0] != 0x06) return -ENODEV; /* TSC-25 data sheet specifies a delay after the RESET command */ msleep(150); /* set coordinate output rate */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_RATE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) return ret; if (buf[0] != 0x06 && (buf[0] != 0x15 || buf[1] != 0x01)) return -ENODEV; /* start sending data */ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), TSC10_CMD_DATA1, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x03) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x03) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info dmc_tsc10_dev_info = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 5, .init = dmc_tsc10_init, .read_data = dmc_tsc10_read_data, }; #endif /***************************************************************************** * IRTOUCH Part */ #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH static int irtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[3] << 8) | pkt[2]; dev->y = (pkt[5] << 8) | pkt[4]; dev->touch = (pkt[1] & 0x03) ? 1 : 0; return 1; } static const struct usbtouch_device_info irtouch_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .read_data = irtouch_read_data, }; static const struct usbtouch_device_info irtouch_hires_dev_info = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 8, .read_data = irtouch_read_data, }; #endif /***************************************************************************** * ET&T TC5UH/TC4UM part */ #ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB static int tc45usb_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info tc45usb_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 5, .read_data = tc45usb_read_data, }; #endif /***************************************************************************** * IdealTEK URTC1000 Part */ #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK #ifndef MULTI_PACKET #define MULTI_PACKET #endif static int idealtek_get_pkt_len(unsigned char *buf, int len) { if (buf[0] & 0x80) return 5; if (buf[0] == 0x01) return len; return 0; } static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { switch (pkt[0] & 0x98) { case 0x88: /* touch data in IdealTEK mode */ dev->x = (pkt[1] << 5) | (pkt[2] >> 2); dev->y = (pkt[3] << 5) | (pkt[4] >> 2); dev->touch = (pkt[0] & 0x40) ? 1 : 0; return 1; case 0x98: /* touch data in MT emulation mode */ dev->x = (pkt[2] << 5) | (pkt[1] >> 2); dev->y = (pkt[4] << 5) | (pkt[3] >> 2); dev->touch = (pkt[0] & 0x40) ? 1 : 0; return 1; default: return 0; } } static const struct usbtouch_device_info idealtek_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .get_pkt_len = idealtek_get_pkt_len, .read_data = idealtek_read_data, }; #endif /***************************************************************************** * General Touch Part */ #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[2] << 8) | pkt[1]; dev->y = (pkt[4] << 8) | pkt[3]; dev->press = pkt[5] & 0xff; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info general_touch_dev_info = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 7, .read_data = general_touch_read_data, }; #endif /***************************************************************************** * GoTop Part */ #ifdef CONFIG_TOUCHSCREEN_USB_GOTOP static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[1] & 0x38) << 4) | pkt[2]; dev->y = ((pkt[1] & 0x07) << 7) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info gotop_dev_info = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 4, .read_data = gotop_read_data, }; #endif /***************************************************************************** * JASTEC Part */ #ifdef CONFIG_TOUCHSCREEN_USB_JASTEC static int jastec_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[0] & 0x3f) << 6) | (pkt[2] & 0x3f); dev->y = ((pkt[1] & 0x3f) << 6) | (pkt[3] & 0x3f); dev->touch = (pkt[0] & 0x40) >> 6; return 1; } static const struct usbtouch_device_info jastec_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 4, .read_data = jastec_read_data, }; #endif /***************************************************************************** * Zytronic Part */ #ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC static int zytronic_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { struct usb_interface *intf = dev->interface; switch (pkt[0]) { case 0x3A: /* command response */ dev_dbg(&intf->dev, "%s: Command response %d\n", __func__, pkt[1]); break; case 0xC0: /* down */ dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7); dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7); dev->touch = 1; dev_dbg(&intf->dev, "%s: down %d,%d\n", __func__, dev->x, dev->y); return 1; case 0x80: /* up */ dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7); dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7); dev->touch = 0; dev_dbg(&intf->dev, "%s: up %d,%d\n", __func__, dev->x, dev->y); return 1; default: dev_dbg(&intf->dev, "%s: Unknown return %d\n", __func__, pkt[0]); break; } return 0; } static const struct usbtouch_device_info zytronic_dev_info = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 5, .read_data = zytronic_read_data, .irq_always = true, }; #endif /***************************************************************************** * NEXIO Part */ #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO #define NEXIO_TIMEOUT 5000 #define NEXIO_BUFSIZE 1024 #define NEXIO_THRESHOLD 50 struct nexio_priv { struct urb *ack; unsigned char *ack_buf; }; struct nexio_touch_packet { u8 flags; /* 0xe1 = touch, 0xe1 = release */ __be16 data_len; /* total bytes of touch data */ __be16 x_len; /* bytes for X axis */ __be16 y_len; /* bytes for Y axis */ u8 data[]; } __attribute__ ((packed)); static unsigned char nexio_ack_pkt[2] = { 0xaa, 0x02 }; static unsigned char nexio_init_pkt[4] = { 0x82, 0x04, 0x0a, 0x0f }; static void nexio_ack_complete(struct urb *urb) { } static int nexio_alloc(struct usbtouch_usb *usbtouch) { struct nexio_priv *priv; int ret = -ENOMEM; priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto out_buf; usbtouch->priv = priv; priv->ack_buf = kmemdup(nexio_ack_pkt, sizeof(nexio_ack_pkt), GFP_KERNEL); if (!priv->ack_buf) goto err_priv; priv->ack = usb_alloc_urb(0, GFP_KERNEL); if (!priv->ack) { dev_dbg(&usbtouch->interface->dev, "%s - usb_alloc_urb failed: usbtouch->ack\n", __func__); goto err_ack_buf; } return 0; err_ack_buf: kfree(priv->ack_buf); err_priv: kfree(priv); out_buf: return ret; } static int nexio_init(struct usbtouch_usb *usbtouch) { struct usb_device *dev = interface_to_usbdev(usbtouch->interface); struct usb_host_interface *interface = usbtouch->interface->cur_altsetting; struct nexio_priv *priv = usbtouch->priv; int ret = -ENOMEM; int actual_len, i; char *firmware_ver = NULL, *device_name = NULL; int input_ep = 0, output_ep = 0; /* find first input and output endpoint */ for (i = 0; i < interface->desc.bNumEndpoints; i++) { if (!input_ep && usb_endpoint_dir_in(&interface->endpoint[i].desc)) input_ep = interface->endpoint[i].desc.bEndpointAddress; if (!output_ep && usb_endpoint_dir_out(&interface->endpoint[i].desc)) output_ep = interface->endpoint[i].desc.bEndpointAddress; } if (!input_ep || !output_ep) return -ENXIO; u8 *buf __free(kfree) = kmalloc(NEXIO_BUFSIZE, GFP_NOIO); if (!buf) return -ENOMEM; /* two empty reads */ for (i = 0; i < 2; i++) { ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, input_ep), buf, NEXIO_BUFSIZE, &actual_len, NEXIO_TIMEOUT); if (ret < 0) return ret; } /* send init command */ memcpy(buf, nexio_init_pkt, sizeof(nexio_init_pkt)); ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, output_ep), buf, sizeof(nexio_init_pkt), &actual_len, NEXIO_TIMEOUT); if (ret < 0) return ret; /* read replies */ for (i = 0; i < 3; i++) { memset(buf, 0, NEXIO_BUFSIZE); ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, input_ep), buf, NEXIO_BUFSIZE, &actual_len, NEXIO_TIMEOUT); if (ret < 0 || actual_len < 1 || buf[1] != actual_len) continue; switch (buf[0]) { case 0x83: /* firmware version */ if (!firmware_ver) firmware_ver = kstrdup(&buf[2], GFP_NOIO); break; case 0x84: /* device name */ if (!device_name) device_name = kstrdup(&buf[2], GFP_NOIO); break; } } printk(KERN_INFO "Nexio device: %s, firmware version: %s\n", device_name, firmware_ver); kfree(firmware_ver); kfree(device_name); usb_fill_bulk_urb(priv->ack, dev, usb_sndbulkpipe(dev, output_ep), priv->ack_buf, sizeof(nexio_ack_pkt), nexio_ack_complete, usbtouch); return 0; } static void nexio_exit(struct usbtouch_usb *usbtouch) { struct nexio_priv *priv = usbtouch->priv; usb_kill_urb(priv->ack); usb_free_urb(priv->ack); kfree(priv->ack_buf); kfree(priv); } static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) { struct device *dev = &usbtouch->interface->dev; struct nexio_touch_packet *packet = (void *) pkt; struct nexio_priv *priv = usbtouch->priv; unsigned int data_len = be16_to_cpu(packet->data_len); unsigned int x_len = be16_to_cpu(packet->x_len); unsigned int y_len = be16_to_cpu(packet->y_len); int x, y, begin_x, begin_y, end_x, end_y, w, h, ret; /* got touch data? */ if ((pkt[0] & 0xe0) != 0xe0) return 0; if (data_len > 0xff) data_len -= 0x100; if (x_len > 0xff) x_len -= 0x80; /* send ACK */ ret = usb_submit_urb(priv->ack, GFP_ATOMIC); if (ret) dev_warn(dev, "Failed to submit ACK URB: %d\n", ret); if (!input_abs_get_max(usbtouch->input, ABS_X)) { input_set_abs_params(usbtouch->input, ABS_X, 0, 2 * x_len, 0, 0); input_set_abs_params(usbtouch->input, ABS_Y, 0, 2 * y_len, 0, 0); } /* * The device reports state of IR sensors on X and Y axes. * Each byte represents "darkness" percentage (0-100) of one element. * 17" touchscreen reports only 64 x 52 bytes so the resolution is low. * This also means that there's a limited multi-touch capability but * it's disabled (and untested) here as there's no X driver for that. */ begin_x = end_x = begin_y = end_y = -1; for (x = 0; x < x_len; x++) { if (begin_x == -1 && packet->data[x] > NEXIO_THRESHOLD) { begin_x = x; continue; } if (end_x == -1 && begin_x != -1 && packet->data[x] < NEXIO_THRESHOLD) { end_x = x - 1; for (y = x_len; y < data_len; y++) { if (begin_y == -1 && packet->data[y] > NEXIO_THRESHOLD) { begin_y = y - x_len; continue; } if (end_y == -1 && begin_y != -1 && packet->data[y] < NEXIO_THRESHOLD) { end_y = y - 1 - x_len; w = end_x - begin_x; h = end_y - begin_y; #if 0 /* multi-touch */ input_report_abs(usbtouch->input, ABS_MT_TOUCH_MAJOR, max(w,h)); input_report_abs(usbtouch->input, ABS_MT_TOUCH_MINOR, min(x,h)); input_report_abs(usbtouch->input, ABS_MT_POSITION_X, 2*begin_x+w); input_report_abs(usbtouch->input, ABS_MT_POSITION_Y, 2*begin_y+h); input_report_abs(usbtouch->input, ABS_MT_ORIENTATION, w > h); input_mt_sync(usbtouch->input); #endif /* single touch */ usbtouch->x = 2 * begin_x + w; usbtouch->y = 2 * begin_y + h; usbtouch->touch = packet->flags & 0x01; begin_y = end_y = -1; return 1; } } begin_x = end_x = -1; } } return 0; } static const struct usbtouch_device_info nexio_dev_info = { .rept_size = 1024, .irq_always = true, .read_data = nexio_read_data, .alloc = nexio_alloc, .init = nexio_init, .exit = nexio_exit, }; #endif /***************************************************************************** * ELO part */ #ifdef CONFIG_TOUCHSCREEN_USB_ELO static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[3] << 8) | pkt[2]; dev->y = (pkt[5] << 8) | pkt[4]; dev->touch = pkt[6] > 0; dev->press = pkt[6]; return 1; } static const struct usbtouch_device_info elo_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .max_press = 0xff, .rept_size = 8, .read_data = elo_read_data, }; #endif /***************************************************************************** * Generic Part */ static void usbtouch_process_pkt(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { const struct usbtouch_device_info *type = usbtouch->type; if (!type->read_data(usbtouch, pkt)) return; input_report_key(usbtouch->input, BTN_TOUCH, usbtouch->touch); if (swap_xy) { input_report_abs(usbtouch->input, ABS_X, usbtouch->y); input_report_abs(usbtouch->input, ABS_Y, usbtouch->x); } else { input_report_abs(usbtouch->input, ABS_X, usbtouch->x); input_report_abs(usbtouch->input, ABS_Y, usbtouch->y); } if (type->max_press) input_report_abs(usbtouch->input, ABS_PRESSURE, usbtouch->press); input_sync(usbtouch->input); } #ifdef MULTI_PACKET static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { unsigned char *buffer; int pkt_len, pos, buf_len, tmp; /* process buffer */ if (unlikely(usbtouch->buf_len)) { /* try to get size */ pkt_len = usbtouch->type->get_pkt_len( usbtouch->buffer, usbtouch->buf_len); /* drop? */ if (unlikely(!pkt_len)) goto out_flush_buf; /* need to append -pkt_len bytes before able to get size */ if (unlikely(pkt_len < 0)) { int append = -pkt_len; if (unlikely(append > len)) append = len; if (usbtouch->buf_len + append >= usbtouch->type->rept_size) goto out_flush_buf; memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, append); usbtouch->buf_len += append; pkt_len = usbtouch->type->get_pkt_len( usbtouch->buffer, usbtouch->buf_len); if (pkt_len < 0) return; } /* append */ tmp = pkt_len - usbtouch->buf_len; if (usbtouch->buf_len + tmp >= usbtouch->type->rept_size) goto out_flush_buf; memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, tmp); usbtouch_process_pkt(usbtouch, usbtouch->buffer, pkt_len); buffer = pkt + tmp; buf_len = len - tmp; } else { buffer = pkt; buf_len = len; } /* loop over the received packet, process */ pos = 0; while (pos < buf_len) { /* get packet len */ pkt_len = usbtouch->type->get_pkt_len(buffer + pos, buf_len - pos); /* unknown packet: skip one byte */ if (unlikely(!pkt_len)) { pos++; continue; } /* full packet: process */ if (likely((pkt_len > 0) && (pkt_len <= buf_len - pos))) { usbtouch_process_pkt(usbtouch, buffer + pos, pkt_len); } else { /* incomplete packet: save in buffer */ memcpy(usbtouch->buffer, buffer + pos, buf_len - pos); usbtouch->buf_len = buf_len - pos; return; } pos += pkt_len; } out_flush_buf: usbtouch->buf_len = 0; return; } #else static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { dev_WARN_ONCE(&usbtouch->interface->dev, 1, "Protocol has ->get_pkt_len() without #define MULTI_PACKET"); } #endif static void usbtouch_irq(struct urb *urb) { struct usbtouch_usb *usbtouch = urb->context; struct device *dev = &usbtouch->interface->dev; int retval; switch (urb->status) { case 0: /* success */ break; case -ETIME: /* this urb is timing out */ dev_dbg(dev, "%s - urb timed out - was the device unplugged?\n", __func__); return; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -EPIPE: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, urb->status); goto exit; } usbtouch->process_pkt(usbtouch, usbtouch->data, urb->actual_length); exit: usb_mark_last_busy(interface_to_usbdev(usbtouch->interface)); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "%s - usb_submit_urb failed with result: %d\n", __func__, retval); } static int usbtouch_start_io(struct usbtouch_usb *usbtouch) { guard(mutex)(&usbtouch->pm_mutex); if (!usbtouch->type->irq_always) if (usb_submit_urb(usbtouch->irq, GFP_KERNEL)) return -EIO; usbtouch->interface->needs_remote_wakeup = 1; usbtouch->is_open = true; return 0; } static int usbtouch_open(struct input_dev *input) { struct usbtouch_usb *usbtouch = input_get_drvdata(input); int r; usbtouch->irq->dev = interface_to_usbdev(usbtouch->interface); r = usb_autopm_get_interface(usbtouch->interface) ? -EIO : 0; if (r) return r; r = usbtouch_start_io(usbtouch); usb_autopm_put_interface(usbtouch->interface); return r; } static void usbtouch_close(struct input_dev *input) { struct usbtouch_usb *usbtouch = input_get_drvdata(input); int r; scoped_guard(mutex, &usbtouch->pm_mutex) { if (!usbtouch->type->irq_always) usb_kill_urb(usbtouch->irq); usbtouch->is_open = false; } r = usb_autopm_get_interface(usbtouch->interface); usbtouch->interface->needs_remote_wakeup = 0; if (!r) usb_autopm_put_interface(usbtouch->interface); } static int usbtouch_suspend(struct usb_interface *intf, pm_message_t message) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); usb_kill_urb(usbtouch->irq); return 0; } static int usbtouch_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); guard(mutex)(&usbtouch->pm_mutex); if (usbtouch->is_open || usbtouch->type->irq_always) return usb_submit_urb(usbtouch->irq, GFP_NOIO); return 0; } static int usbtouch_reset_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); int err; /* reinit the device */ if (usbtouch->type->init) { err = usbtouch->type->init(usbtouch); if (err) { dev_dbg(&intf->dev, "%s - type->init() failed, err: %d\n", __func__, err); return err; } } /* restart IO if needed */ guard(mutex)(&usbtouch->pm_mutex); if (usbtouch->is_open) return usb_submit_urb(usbtouch->irq, GFP_NOIO); return 0; } static void usbtouch_free_buffers(struct usb_device *udev, struct usbtouch_usb *usbtouch) { usb_free_coherent(udev, usbtouch->data_size, usbtouch->data, usbtouch->data_dma); kfree(usbtouch->buffer); } static struct usb_endpoint_descriptor * usbtouch_get_input_endpoint(struct usb_host_interface *interface) { int i; for (i = 0; i < interface->desc.bNumEndpoints; i++) if (usb_endpoint_dir_in(&interface->endpoint[i].desc)) return &interface->endpoint[i].desc; return NULL; } static int usbtouch_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usbtouch_usb *usbtouch; struct input_dev *input_dev; struct usb_endpoint_descriptor *endpoint; struct usb_device *udev = interface_to_usbdev(intf); const struct usbtouch_device_info *type; int err = -ENOMEM; /* some devices are ignored */ type = (const struct usbtouch_device_info *)id->driver_info; if (!type) return -ENODEV; endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting); if (!endpoint) return -ENXIO; usbtouch = kzalloc(sizeof(*usbtouch), GFP_KERNEL); input_dev = input_allocate_device(); if (!usbtouch || !input_dev) goto out_free; mutex_init(&usbtouch->pm_mutex); usbtouch->type = type; usbtouch->data_size = type->rept_size; if (type->get_pkt_len) { /* * When dealing with variable-length packets we should * not request more than wMaxPacketSize bytes at once * as we do not know if there is more data coming or * we filled exactly wMaxPacketSize bytes and there is * nothing else. */ usbtouch->data_size = min(usbtouch->data_size, usb_endpoint_maxp(endpoint)); } usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size, GFP_KERNEL, &usbtouch->data_dma); if (!usbtouch->data) goto out_free; if (type->get_pkt_len) { usbtouch->buffer = kmalloc(type->rept_size, GFP_KERNEL); if (!usbtouch->buffer) goto out_free_buffers; usbtouch->process_pkt = usbtouch_process_multi; } else { usbtouch->process_pkt = usbtouch_process_pkt; } usbtouch->irq = usb_alloc_urb(0, GFP_KERNEL); if (!usbtouch->irq) { dev_dbg(&intf->dev, "%s - usb_alloc_urb failed: usbtouch->irq\n", __func__); goto out_free_buffers; } usbtouch->interface = intf; usbtouch->input = input_dev; if (udev->manufacturer) strscpy(usbtouch->name, udev->manufacturer, sizeof(usbtouch->name)); if (udev->product) { if (udev->manufacturer) strlcat(usbtouch->name, " ", sizeof(usbtouch->name)); strlcat(usbtouch->name, udev->product, sizeof(usbtouch->name)); } if (!strlen(usbtouch->name)) snprintf(usbtouch->name, sizeof(usbtouch->name), "USB Touchscreen %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); usb_make_path(udev, usbtouch->phys, sizeof(usbtouch->phys)); strlcat(usbtouch->phys, "/input0", sizeof(usbtouch->phys)); input_dev->name = usbtouch->name; input_dev->phys = usbtouch->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, usbtouch); input_dev->open = usbtouch_open; input_dev->close = usbtouch_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, type->min_xc, type->max_xc, 0, 0); input_set_abs_params(input_dev, ABS_Y, type->min_yc, type->max_yc, 0, 0); if (type->max_press) input_set_abs_params(input_dev, ABS_PRESSURE, type->min_press, type->max_press, 0, 0); if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) usb_fill_int_urb(usbtouch->irq, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress), usbtouch->data, usbtouch->data_size, usbtouch_irq, usbtouch, endpoint->bInterval); else usb_fill_bulk_urb(usbtouch->irq, udev, usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), usbtouch->data, usbtouch->data_size, usbtouch_irq, usbtouch); usbtouch->irq->dev = udev; usbtouch->irq->transfer_dma = usbtouch->data_dma; usbtouch->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* device specific allocations */ if (type->alloc) { err = type->alloc(usbtouch); if (err) { dev_dbg(&intf->dev, "%s - type->alloc() failed, err: %d\n", __func__, err); goto out_free_urb; } } /* device specific initialisation*/ if (type->init) { err = type->init(usbtouch); if (err) { dev_dbg(&intf->dev, "%s - type->init() failed, err: %d\n", __func__, err); goto out_do_exit; } } err = input_register_device(usbtouch->input); if (err) { dev_dbg(&intf->dev, "%s - input_register_device failed, err: %d\n", __func__, err); goto out_do_exit; } usb_set_intfdata(intf, usbtouch); if (usbtouch->type->irq_always) { /* this can't fail */ usb_autopm_get_interface(intf); err = usb_submit_urb(usbtouch->irq, GFP_KERNEL); if (err) { usb_autopm_put_interface(intf); dev_err(&intf->dev, "%s - usb_submit_urb failed with result: %d\n", __func__, err); goto out_unregister_input; } } return 0; out_unregister_input: input_unregister_device(input_dev); input_dev = NULL; out_do_exit: if (type->exit) type->exit(usbtouch); out_free_urb: usb_free_urb(usbtouch->irq); out_free_buffers: usbtouch_free_buffers(udev, usbtouch); out_free: input_free_device(input_dev); kfree(usbtouch); return err; } static void usbtouch_disconnect(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); if (!usbtouch) return; dev_dbg(&intf->dev, "%s - usbtouch is initialized, cleaning up\n", __func__); usb_set_intfdata(intf, NULL); /* this will stop IO via close */ input_unregister_device(usbtouch->input); usb_free_urb(usbtouch->irq); if (usbtouch->type->exit) usbtouch->type->exit(usbtouch); usbtouch_free_buffers(interface_to_usbdev(intf), usbtouch); kfree(usbtouch); } static const struct attribute_group *usbtouch_groups[] = { #ifdef CONFIG_TOUCHSCREEN_USB_3M &mtouch_attr_group, #endif NULL }; static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX /* ignore the HID capable devices, handled by usbhid */ { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0001, USB_INTERFACE_CLASS_HID), .driver_info = 0 }, { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0002, USB_INTERFACE_CLASS_HID), .driver_info = 0 }, /* normal device IDs */ { USB_DEVICE(0x3823, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x3823, 0x0002), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x0123, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x0eef, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x0eef, 0x0002), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x1234, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x1234, 0x0002), .driver_info = (kernel_ulong_t)&egalax_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT { USB_DEVICE(0x134c, 0x0001), .driver_info = (kernel_ulong_t)&panjit_dev_info }, { USB_DEVICE(0x134c, 0x0002), .driver_info = (kernel_ulong_t)&panjit_dev_info }, { USB_DEVICE(0x134c, 0x0003), .driver_info = (kernel_ulong_t)&panjit_dev_info }, { USB_DEVICE(0x134c, 0x0004), .driver_info = (kernel_ulong_t)&panjit_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_3M { USB_DEVICE(0x0596, 0x0001), .driver_info = (kernel_ulong_t)&mtouch_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ITM { USB_DEVICE(0x0403, 0xf9e9), .driver_info = (kernel_ulong_t)&itm_dev_info }, { USB_DEVICE(0x16e3, 0xf9e9), .driver_info = (kernel_ulong_t)&itm_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO { USB_DEVICE(0x1234, 0x5678), .driver_info = (kernel_ulong_t)&eturbo_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GUNZE { USB_DEVICE(0x0637, 0x0001), .driver_info = (kernel_ulong_t)&gunze_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 { USB_DEVICE(0x0afa, 0x03e8), .driver_info = (kernel_ulong_t)&dmc_tsc10_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH { USB_DEVICE(0x255e, 0x0001), .driver_info = (kernel_ulong_t)&irtouch_dev_info }, { USB_DEVICE(0x595a, 0x0001), .driver_info = (kernel_ulong_t)&irtouch_dev_info }, { USB_DEVICE(0x6615, 0x0001), .driver_info = (kernel_ulong_t)&irtouch_dev_info }, { USB_DEVICE(0x6615, 0x0012), .driver_info = (kernel_ulong_t)&irtouch_hires_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK { USB_DEVICE(0x1391, 0x1000), .driver_info = (kernel_ulong_t)&idealtek_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH { USB_DEVICE(0x0dfc, 0x0001), .driver_info = (kernel_ulong_t)&general_touch_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GOTOP { USB_DEVICE(0x08f2, 0x007f), .driver_info = (kernel_ulong_t)&gotop_dev_info }, { USB_DEVICE(0x08f2, 0x00ce), .driver_info = (kernel_ulong_t)&gotop_dev_info }, { USB_DEVICE(0x08f2, 0x00f4), .driver_info = (kernel_ulong_t)&gotop_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_JASTEC { USB_DEVICE(0x0f92, 0x0001), .driver_info = (kernel_ulong_t)&jastec_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_E2I { USB_DEVICE(0x1ac7, 0x0001), .driver_info = (kernel_ulong_t)&e2i_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC { USB_DEVICE(0x14c8, 0x0003), .driver_info = (kernel_ulong_t)&zytronic_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB /* TC5UH */ { USB_DEVICE(0x0664, 0x0309), .driver_info = (kernel_ulong_t)&tc45usb_dev_info }, /* TC4UM */ { USB_DEVICE(0x0664, 0x0306), .driver_info = (kernel_ulong_t)&tc45usb_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO /* data interface only */ { USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00), .driver_info = (kernel_ulong_t)&nexio_dev_info }, { USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00), .driver_info = (kernel_ulong_t)&nexio_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ELO { USB_DEVICE(0x04e7, 0x0020), .driver_info = (kernel_ulong_t)&elo_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH { USB_DEVICE(0x7374, 0x0001), .driver_info = (kernel_ulong_t)&etouch_dev_info }, #endif { } }; MODULE_DEVICE_TABLE(usb, usbtouch_devices); static struct usb_driver usbtouch_driver = { .name = "usbtouchscreen", .probe = usbtouch_probe, .disconnect = usbtouch_disconnect, .suspend = usbtouch_suspend, .resume = usbtouch_resume, .reset_resume = usbtouch_reset_resume, .id_table = usbtouch_devices, .dev_groups = usbtouch_groups, .supports_autosuspend = 1, }; module_usb_driver(usbtouch_driver); MODULE_AUTHOR("Daniel Ritz <daniel.ritz@gmx.ch>"); MODULE_DESCRIPTION("USB Touchscreen Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("touchkitusb"); MODULE_ALIAS("itmtouch"); MODULE_ALIAS("mtouchusb");
3 3 3 3 2 1 1 3 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 // SPDX-License-Identifier: GPL-2.0-only #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <asm/string.h> #include <linux/kmod.h> #include <linux/sysctl.h> #include <net/ip_vs.h> /* IPVS pe list */ static LIST_HEAD(ip_vs_pe); /* semaphore for IPVS PEs. */ static DEFINE_MUTEX(ip_vs_pe_mutex); /* Get pe in the pe list by name */ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name) { struct ip_vs_pe *pe; IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__, pe_name); rcu_read_lock(); list_for_each_entry_rcu(pe, &ip_vs_pe, n_list) { /* Test and get the modules atomically */ if (pe->module && !try_module_get(pe->module)) { /* This pe is just deleted */ continue; } if (strcmp(pe_name, pe->name)==0) { /* HIT */ rcu_read_unlock(); return pe; } module_put(pe->module); } rcu_read_unlock(); return NULL; } /* Lookup pe and try to load it if it doesn't exist */ struct ip_vs_pe *ip_vs_pe_getbyname(const char *name) { struct ip_vs_pe *pe; /* Search for the pe by name */ pe = __ip_vs_pe_getbyname(name); /* If pe not found, load the module and search again */ if (!pe) { request_module("ip_vs_pe_%s", name); pe = __ip_vs_pe_getbyname(name); } return pe; } /* Register a pe in the pe list */ int register_ip_vs_pe(struct ip_vs_pe *pe) { struct ip_vs_pe *tmp; /* increase the module use count */ if (!ip_vs_use_count_inc()) return -ENOENT; mutex_lock(&ip_vs_pe_mutex); /* Make sure that the pe with this name doesn't exist * in the pe list. */ list_for_each_entry(tmp, &ip_vs_pe, n_list) { if (strcmp(tmp->name, pe->name) == 0) { mutex_unlock(&ip_vs_pe_mutex); ip_vs_use_count_dec(); pr_err("%s(): [%s] pe already existed " "in the system\n", __func__, pe->name); return -EINVAL; } } /* Add it into the d-linked pe list */ list_add_rcu(&pe->n_list, &ip_vs_pe); mutex_unlock(&ip_vs_pe_mutex); pr_info("[%s] pe registered.\n", pe->name); return 0; } EXPORT_SYMBOL_GPL(register_ip_vs_pe); /* Unregister a pe from the pe list */ int unregister_ip_vs_pe(struct ip_vs_pe *pe) { mutex_lock(&ip_vs_pe_mutex); /* Remove it from the d-linked pe list */ list_del_rcu(&pe->n_list); mutex_unlock(&ip_vs_pe_mutex); /* decrease the module use count */ ip_vs_use_count_dec(); pr_info("[%s] pe unregistered.\n", pe->name); return 0; } EXPORT_SYMBOL_GPL(unregister_ip_vs_pe);
9 5 92 2 2 2 2 2 2 2 3 3 2 3 39 38 39 39 12 7 7 36 58 2 55 3 52 51 52 29 29 52 5 5 5 5 5 30 30 30 16 13 3 3 3 3 3 3 3 16 30 8 8 8 8 7 33 7 34 4 1 1 1 1 1 3 3 3 3 2 2 2 1 1 1 1 1 1 4 4 4 4 4 6 6 6 6 6 5 5 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 31 31 23 3 3 3 8 20 1 30 30 30 30 22 20 7 27 31 30 30 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 2 2 1 1 1 1 2 1 1 1 1 2 3 11 2 2 2 1 1 5 4 1 13 13 1 12 13 2 13 13 13 13 9 5 5 5 5 5 4 4 1 5 4 13 4 10 13 5 5 8 8 5 1 1 4 5 5 3 2 2 2 2 1 1 1 1 5 4 3 8 28 28 28 27 28 28 27 24 23 23 3 20 22 1 28 28 8 20 24 24 19 21 21 21 21 21 16 3 8 28 8 7 8 8 8 8 8 8 8 8 10 4 10 1 24 25 31 31 31 31 31 31 31 31 30 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 // SPDX-License-Identifier: GPL-2.0-only /* L2TP core. * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * * This file contains some code of the original L2TPv2 pppol2tp * driver, which has the following copyright: * * Authors: Martijn van Oosterhout <kleptog@svana.org> * James Chapman (jchapman@katalix.com) * Contributors: * Michal Ostrowski <mostrows@speakeasy.net> * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> * David S. Miller (davem@redhat.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/string.h> #include <linux/list.h> #include <linux/rculist.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/inetdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/udp.h> #include <linux/l2tp.h> #include <linux/sort.h> #include <linux/file.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/dst.h> #include <net/ip.h> #include <net/udp.h> #include <net/udp_tunnel.h> #include <net/inet_common.h> #include <net/xfrm.h> #include <net/protocol.h> #include <net/inet6_connection_sock.h> #include <net/inet_ecn.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <asm/byteorder.h> #include <linux/atomic.h> #include "l2tp_core.h" #define CREATE_TRACE_POINTS #include "trace.h" #define L2TP_DRV_VERSION "V2.0" /* L2TP header constants */ #define L2TP_HDRFLAG_T 0x8000 #define L2TP_HDRFLAG_L 0x4000 #define L2TP_HDRFLAG_S 0x0800 #define L2TP_HDRFLAG_O 0x0200 #define L2TP_HDRFLAG_P 0x0100 #define L2TP_HDR_VER_MASK 0x000F #define L2TP_HDR_VER_2 0x0002 #define L2TP_HDR_VER_3 0x0003 /* L2TPv3 default L2-specific sublayer */ #define L2TP_SLFLAG_S 0x40000000 #define L2TP_SL_SEQ_MASK 0x00ffffff #define L2TP_HDR_SIZE_MAX 14 /* Default trace flags */ #define L2TP_DEFAULT_DEBUG_FLAGS 0 #define L2TP_DEPTH_NESTING 2 #if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING #error "L2TP requires its own lockdep subclass" #endif /* Private data stored for received packets in the skb. */ struct l2tp_skb_cb { u32 ns; u16 has_seq; u16 length; unsigned long expires; }; #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)]) static struct workqueue_struct *l2tp_wq; /* per-net private data for this module */ static unsigned int l2tp_net_id; struct l2tp_net { /* Lock for write access to l2tp_tunnel_idr */ spinlock_t l2tp_tunnel_idr_lock; struct idr l2tp_tunnel_idr; /* Lock for write access to l2tp_v[23]_session_idr/htable */ spinlock_t l2tp_session_idr_lock; struct idr l2tp_v2_session_idr; struct idr l2tp_v3_session_idr; struct hlist_head l2tp_v3_session_htable[16]; }; static u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id) { return ((u32)tunnel_id) << 16 | session_id; } static unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id) { return ((unsigned long)sk) + session_id; } #if IS_ENABLED(CONFIG_IPV6) static bool l2tp_sk_is_v6(struct sock *sk) { return sk->sk_family == PF_INET6 && !ipv6_addr_v4mapped(&sk->sk_v6_daddr); } #endif static struct l2tp_net *l2tp_pernet(const struct net *net) { return net_generic(net, l2tp_net_id); } static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) { struct sock *sk = tunnel->sock; trace_free_tunnel(tunnel); if (sk) { /* Disable udp encapsulation */ switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: /* No longer an encapsulation socket. See net/ipv4/udp.c */ WRITE_ONCE(udp_sk(sk)->encap_type, 0); udp_sk(sk)->encap_rcv = NULL; udp_sk(sk)->encap_destroy = NULL; break; case L2TP_ENCAPTYPE_IP: break; } tunnel->sock = NULL; sock_put(sk); } kfree_rcu(tunnel, rcu); } static void l2tp_session_free(struct l2tp_session *session) { trace_free_session(session); if (session->tunnel) l2tp_tunnel_put(session->tunnel); kfree_rcu(session, rcu); } struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk) { const struct net *net = sock_net(sk); unsigned long tunnel_id, tmp; struct l2tp_tunnel *tunnel; struct l2tp_net *pn; rcu_read_lock_bh(); pn = l2tp_pernet(net); idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { if (tunnel && tunnel->sock == sk && refcount_inc_not_zero(&tunnel->ref_count)) { rcu_read_unlock_bh(); return tunnel; } } rcu_read_unlock_bh(); return NULL; } EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel); void l2tp_tunnel_put(struct l2tp_tunnel *tunnel) { if (refcount_dec_and_test(&tunnel->ref_count)) l2tp_tunnel_free(tunnel); } EXPORT_SYMBOL_GPL(l2tp_tunnel_put); void l2tp_session_put(struct l2tp_session *session) { if (refcount_dec_and_test(&session->ref_count)) l2tp_session_free(session); } EXPORT_SYMBOL_GPL(l2tp_session_put); /* Lookup a tunnel. A new reference is held on the returned tunnel. */ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) { const struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel; rcu_read_lock_bh(); tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id); if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) { rcu_read_unlock_bh(); return tunnel; } rcu_read_unlock_bh(); return NULL; } EXPORT_SYMBOL_GPL(l2tp_tunnel_get); struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel = NULL; rcu_read_lock_bh(); again: tunnel = idr_get_next_ul(&pn->l2tp_tunnel_idr, key); if (tunnel) { if (refcount_inc_not_zero(&tunnel->ref_count)) { rcu_read_unlock_bh(); return tunnel; } (*key)++; goto again; } rcu_read_unlock_bh(); return NULL; } EXPORT_SYMBOL_GPL(l2tp_tunnel_get_next); struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id) { const struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_session *session; rcu_read_lock_bh(); session = idr_find(&pn->l2tp_v3_session_idr, session_id); if (session && !hash_hashed(&session->hlist) && refcount_inc_not_zero(&session->ref_count)) { rcu_read_unlock_bh(); return session; } /* If we get here and session is non-NULL, the session_id * collides with one in another tunnel. If sk is non-NULL, * find the session matching sk. */ if (session && sk) { unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id); hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session, hlist, key) { /* session->tunnel may be NULL if another thread is in * l2tp_session_register and has added an item to * l2tp_v3_session_htable but hasn't yet added the * session to its tunnel's session_list. */ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel); if (session->session_id == session_id && tunnel && tunnel->sock == sk && refcount_inc_not_zero(&session->ref_count)) { rcu_read_unlock_bh(); return session; } } } rcu_read_unlock_bh(); return NULL; } EXPORT_SYMBOL_GPL(l2tp_v3_session_get); struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id) { u32 session_key = l2tp_v2_session_key(tunnel_id, session_id); const struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_session *session; rcu_read_lock_bh(); session = idr_find(&pn->l2tp_v2_session_idr, session_key); if (session && refcount_inc_not_zero(&session->ref_count)) { rcu_read_unlock_bh(); return session; } rcu_read_unlock_bh(); return NULL; } EXPORT_SYMBOL_GPL(l2tp_v2_session_get); struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver, u32 tunnel_id, u32 session_id) { if (pver == L2TP_HDR_VER_2) return l2tp_v2_session_get(net, tunnel_id, session_id); else return l2tp_v3_session_get(net, sk, session_id); } EXPORT_SYMBOL_GPL(l2tp_session_get); static struct l2tp_session *l2tp_v2_session_get_next(const struct net *net, u16 tid, unsigned long *key) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_session *session = NULL; /* Start searching within the range of the tid */ if (*key == 0) *key = l2tp_v2_session_key(tid, 0); rcu_read_lock_bh(); again: session = idr_get_next_ul(&pn->l2tp_v2_session_idr, key); if (session) { struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel); /* ignore sessions with id 0 as they are internal for pppol2tp */ if (session->session_id == 0) { (*key)++; goto again; } if (tunnel->tunnel_id == tid && refcount_inc_not_zero(&session->ref_count)) { rcu_read_unlock_bh(); return session; } (*key)++; if (tunnel->tunnel_id == tid) goto again; } rcu_read_unlock_bh(); return NULL; } static struct l2tp_session *l2tp_v3_session_get_next(const struct net *net, u32 tid, struct sock *sk, unsigned long *key) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_session *session = NULL; rcu_read_lock_bh(); again: session = idr_get_next_ul(&pn->l2tp_v3_session_idr, key); if (session && !hash_hashed(&session->hlist)) { struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel); if (tunnel && tunnel->tunnel_id == tid && refcount_inc_not_zero(&session->ref_count)) { rcu_read_unlock_bh(); return session; } (*key)++; goto again; } /* If we get here and session is non-NULL, the IDR entry may be one * where the session_id collides with one in another tunnel. Check * session_htable for a match. There can only be one session of a given * ID per tunnel so we can return as soon as a match is found. */ if (session && hash_hashed(&session->hlist)) { unsigned long hkey = l2tp_v3_session_hashkey(sk, session->session_id); u32 sid = session->session_id; hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session, hlist, hkey) { struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel); if (session->session_id == sid && tunnel && tunnel->tunnel_id == tid && refcount_inc_not_zero(&session->ref_count)) { rcu_read_unlock_bh(); return session; } } /* If no match found, the colliding session ID isn't in our * tunnel so try the next session ID. */ (*key)++; goto again; } rcu_read_unlock_bh(); return NULL; } struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver, u32 tunnel_id, unsigned long *key) { if (pver == L2TP_HDR_VER_2) return l2tp_v2_session_get_next(net, tunnel_id, key); else return l2tp_v3_session_get_next(net, tunnel_id, sk, key); } EXPORT_SYMBOL_GPL(l2tp_session_get_next); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. */ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, const char *ifname) { struct l2tp_net *pn = l2tp_pernet(net); unsigned long tunnel_id, tmp; struct l2tp_session *session; struct l2tp_tunnel *tunnel; rcu_read_lock_bh(); idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { if (tunnel) { list_for_each_entry_rcu(session, &tunnel->session_list, list) { if (!strcmp(session->ifname, ifname)) { refcount_inc(&session->ref_count); rcu_read_unlock_bh(); return session; } } } } rcu_read_unlock_bh(); return NULL; } EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist, struct l2tp_session *session) { refcount_inc(&session->ref_count); WARN_ON_ONCE(session->coll_list); session->coll_list = clist; spin_lock(&clist->lock); list_add(&session->clist, &clist->list); spin_unlock(&clist->lock); } static int l2tp_session_collision_add(struct l2tp_net *pn, struct l2tp_session *session1, struct l2tp_session *session2) { struct l2tp_session_coll_list *clist; lockdep_assert_held(&pn->l2tp_session_idr_lock); if (!session2) return -EEXIST; /* If existing session is in IP-encap tunnel, refuse new session */ if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP) return -EEXIST; clist = session2->coll_list; if (!clist) { /* First collision. Allocate list to manage the collided sessions * and add the existing session to the list. */ clist = kmalloc(sizeof(*clist), GFP_ATOMIC); if (!clist) return -ENOMEM; spin_lock_init(&clist->lock); INIT_LIST_HEAD(&clist->list); refcount_set(&clist->ref_count, 1); l2tp_session_coll_list_add(clist, session2); } /* If existing session isn't already in the session hlist, add it. */ if (!hash_hashed(&session2->hlist)) hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist, session2->hlist_key); /* Add new session to the hlist and collision list */ hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist, session1->hlist_key); refcount_inc(&clist->ref_count); l2tp_session_coll_list_add(clist, session1); return 0; } static void l2tp_session_collision_del(struct l2tp_net *pn, struct l2tp_session *session) { struct l2tp_session_coll_list *clist = session->coll_list; unsigned long session_key = session->session_id; struct l2tp_session *session2; lockdep_assert_held(&pn->l2tp_session_idr_lock); hash_del_rcu(&session->hlist); if (clist) { /* Remove session from its collision list. If there * are other sessions with the same ID, replace this * session's IDR entry with that session, otherwise * remove the IDR entry. If this is the last session, * the collision list data is freed. */ spin_lock(&clist->lock); list_del_init(&session->clist); session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist); if (session2) { void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key); WARN_ON_ONCE(IS_ERR_VALUE(old)); } else { void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key); WARN_ON_ONCE(removed != session); } session->coll_list = NULL; spin_unlock(&clist->lock); if (refcount_dec_and_test(&clist->ref_count)) kfree(clist); l2tp_session_put(session); } } int l2tp_session_register(struct l2tp_session *session, struct l2tp_tunnel *tunnel) { struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); struct l2tp_session *other_session = NULL; void *old = NULL; u32 session_key; int err; spin_lock_bh(&tunnel->list_lock); spin_lock_bh(&pn->l2tp_session_idr_lock); if (!tunnel->acpt_newsess) { err = -ENODEV; goto out; } if (tunnel->version == L2TP_HDR_VER_3) { session_key = session->session_id; err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL, &session_key, session_key, GFP_ATOMIC); /* IP encap expects session IDs to be globally unique, while * UDP encap doesn't. This isn't per the RFC, which says that * sessions are identified only by the session ID, but is to * support existing userspace which depends on it. */ if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) { other_session = idr_find(&pn->l2tp_v3_session_idr, session_key); err = l2tp_session_collision_add(pn, session, other_session); } } else { session_key = l2tp_v2_session_key(tunnel->tunnel_id, session->session_id); err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL, &session_key, session_key, GFP_ATOMIC); } if (err) { if (err == -ENOSPC) err = -EEXIST; goto out; } refcount_inc(&tunnel->ref_count); WRITE_ONCE(session->tunnel, tunnel); list_add_rcu(&session->list, &tunnel->session_list); /* this makes session available to lockless getters */ if (tunnel->version == L2TP_HDR_VER_3) { if (!other_session) old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key); } else { old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key); } /* old should be NULL, unless something removed or modified * the IDR entry after our idr_alloc_32 above (which shouldn't * happen). */ WARN_ON_ONCE(old); out: spin_unlock_bh(&pn->l2tp_session_idr_lock); spin_unlock_bh(&tunnel->list_lock); if (!err) trace_register_session(session); return err; } EXPORT_SYMBOL_GPL(l2tp_session_register); /***************************************************************************** * Receive data handling *****************************************************************************/ /* Queue a skb in order. We come here only if the skb has an L2TP sequence * number. */ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb) { struct sk_buff *skbp; struct sk_buff *tmp; u32 ns = L2TP_SKB_CB(skb)->ns; spin_lock_bh(&session->reorder_q.lock); skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { if (L2TP_SKB_CB(skbp)->ns > ns) { __skb_queue_before(&session->reorder_q, skbp, skb); atomic_long_inc(&session->stats.rx_oos_packets); goto out; } } __skb_queue_tail(&session->reorder_q, skb); out: spin_unlock_bh(&session->reorder_q.lock); } /* Dequeue a single skb. */ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb) { struct l2tp_tunnel *tunnel = session->tunnel; int length = L2TP_SKB_CB(skb)->length; /* We're about to requeue the skb, so return resources * to its current owner (a socket receive buffer). */ skb_orphan(skb); atomic_long_inc(&tunnel->stats.rx_packets); atomic_long_add(length, &tunnel->stats.rx_bytes); atomic_long_inc(&session->stats.rx_packets); atomic_long_add(length, &session->stats.rx_bytes); if (L2TP_SKB_CB(skb)->has_seq) { /* Bump our Nr */ session->nr++; session->nr &= session->nr_max; trace_session_seqnum_update(session); } /* call private receive handler */ if (session->recv_skb) (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length); else kfree_skb(skb); } /* Dequeue skbs from the session's reorder_q, subject to packet order. * Skbs that have been in the queue for too long are simply discarded. */ static void l2tp_recv_dequeue(struct l2tp_session *session) { struct sk_buff *skb; struct sk_buff *tmp; /* If the pkt at the head of the queue has the nr that we * expect to send up next, dequeue it and any other * in-sequence packets behind it. */ start: spin_lock_bh(&session->reorder_q.lock); skb_queue_walk_safe(&session->reorder_q, skb, tmp) { struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb); /* If the packet has been pending on the queue for too long, discard it */ if (time_after(jiffies, cb->expires)) { atomic_long_inc(&session->stats.rx_seq_discards); atomic_long_inc(&session->stats.rx_errors); trace_session_pkt_expired(session, cb->ns); session->reorder_skip = 1; __skb_unlink(skb, &session->reorder_q); kfree_skb(skb); continue; } if (cb->has_seq) { if (session->reorder_skip) { session->reorder_skip = 0; session->nr = cb->ns; trace_session_seqnum_reset(session); } if (cb->ns != session->nr) goto out; } __skb_unlink(skb, &session->reorder_q); /* Process the skb. We release the queue lock while we * do so to let other contexts process the queue. */ spin_unlock_bh(&session->reorder_q.lock); l2tp_recv_dequeue_skb(session, skb); goto start; } out: spin_unlock_bh(&session->reorder_q.lock); } static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr) { u32 nws; if (nr >= session->nr) nws = nr - session->nr; else nws = (session->nr_max + 1) - (session->nr - nr); return nws < session->nr_window_size; } /* If packet has sequence numbers, queue it if acceptable. Returns 0 if * acceptable, else non-zero. */ static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb) { struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb); if (!l2tp_seq_check_rx_window(session, cb->ns)) { /* Packet sequence number is outside allowed window. * Discard it. */ trace_session_pkt_outside_rx_window(session, cb->ns); goto discard; } if (session->reorder_timeout != 0) { /* Packet reordering enabled. Add skb to session's * reorder queue, in order of ns. */ l2tp_recv_queue_skb(session, skb); goto out; } /* Packet reordering disabled. Discard out-of-sequence packets, while * tracking the number if in-sequence packets after the first OOS packet * is seen. After nr_oos_count_max in-sequence packets, reset the * sequence number to re-enable packet reception. */ if (cb->ns == session->nr) { skb_queue_tail(&session->reorder_q, skb); } else { u32 nr_oos = cb->ns; u32 nr_next = (session->nr_oos + 1) & session->nr_max; if (nr_oos == nr_next) session->nr_oos_count++; else session->nr_oos_count = 0; session->nr_oos = nr_oos; if (session->nr_oos_count > session->nr_oos_count_max) { session->reorder_skip = 1; } if (!session->reorder_skip) { atomic_long_inc(&session->stats.rx_seq_discards); trace_session_pkt_oos(session, cb->ns); goto discard; } skb_queue_tail(&session->reorder_q, skb); } out: return 0; discard: return 1; } /* Do receive processing of L2TP data frames. We handle both L2TPv2 * and L2TPv3 data frames here. * * L2TPv2 Data Message Header * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Tunnel ID | Session ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Ns (opt) | Nr (opt) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Offset Size (opt) | Offset pad... (opt) * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Data frames are marked by T=0. All other fields are the same as * those in L2TP control frames. * * L2TPv3 Data Message Header * * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | L2TP Session Header | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | L2-Specific Sublayer | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Tunnel Payload ... * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * L2TPv3 Session Header Over IP * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Session ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Cookie (optional, maximum 64 bits)... * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * L2TPv3 L2-Specific Sublayer Format * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |x|S|x|x|x|x|x|x| Sequence Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Cookie value and sublayer format are negotiated with the peer when * the session is set up. Unlike L2TPv2, we do not need to parse the * packet header to determine if optional fields are present. * * Caller must already have parsed the frame and determined that it is * a data (not control) frame before coming here. Fields up to the * session-id have already been parsed and ptr points to the data * after the session-id. */ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length) { struct l2tp_tunnel *tunnel = session->tunnel; int offset; /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n", tunnel->name, tunnel->tunnel_id, session->session_id); atomic_long_inc(&session->stats.rx_cookie_discards); goto discard; } ptr += session->peer_cookie_len; } /* Handle the optional sequence numbers. Sequence numbers are * in different places for L2TPv2 and L2TPv3. * * If we are the LAC, enable/disable sequence numbers under * the control of the LNS. If no sequence numbers present but * we were expecting them, discard frame. */ L2TP_SKB_CB(skb)->has_seq = 0; if (tunnel->version == L2TP_HDR_VER_2) { if (hdrflags & L2TP_HDRFLAG_S) { /* Store L2TP info in the skb */ L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr); L2TP_SKB_CB(skb)->has_seq = 1; ptr += 2; /* Skip past nr in the header */ ptr += 2; } } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { u32 l2h = ntohl(*(__be32 *)ptr); if (l2h & 0x40000000) { /* Store L2TP info in the skb */ L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff; L2TP_SKB_CB(skb)->has_seq = 1; } ptr += 4; } if (L2TP_SKB_CB(skb)->has_seq) { /* Received a packet with sequence numbers. If we're the LAC, * check if we sre sending sequence numbers and if not, * configure it so. */ if (!session->lns_mode && !session->send_seq) { trace_session_seqnum_lns_enable(session); session->send_seq = 1; l2tp_session_set_header_len(session, tunnel->version, tunnel->encap); } } else { /* No sequence numbers. * If user has configured mandatory sequence numbers, discard. */ if (session->recv_seq) { pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n", session->name); atomic_long_inc(&session->stats.rx_seq_discards); goto discard; } /* If we're the LAC and we're sending sequence numbers, the * LNS has requested that we no longer send sequence numbers. * If we're the LNS and we're sending sequence numbers, the * LAC is broken. Discard the frame. */ if (!session->lns_mode && session->send_seq) { trace_session_seqnum_lns_disable(session); session->send_seq = 0; l2tp_session_set_header_len(session, tunnel->version, tunnel->encap); } else if (session->send_seq) { pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n", session->name); atomic_long_inc(&session->stats.rx_seq_discards); goto discard; } } /* Session data offset is defined only for L2TPv2 and is * indicated by an optional 16-bit value in the header. */ if (tunnel->version == L2TP_HDR_VER_2) { /* If offset bit set, skip it. */ if (hdrflags & L2TP_HDRFLAG_O) { offset = ntohs(*(__be16 *)ptr); ptr += 2 + offset; } } offset = ptr - optr; if (!pskb_may_pull(skb, offset)) goto discard; __skb_pull(skb, offset); /* Prepare skb for adding to the session's reorder_q. Hold * packets for max reorder_timeout or 1 second if not * reordering. */ L2TP_SKB_CB(skb)->length = length; L2TP_SKB_CB(skb)->expires = jiffies + (session->reorder_timeout ? session->reorder_timeout : HZ); /* Add packet to the session's receive queue. Reordering is done here, if * enabled. Saved L2TP protocol info is stored in skb->sb[]. */ if (L2TP_SKB_CB(skb)->has_seq) { if (l2tp_recv_data_seq(session, skb)) goto discard; } else { /* No sequence numbers. Add the skb to the tail of the * reorder queue. This ensures that it will be * delivered after all previous sequenced skbs. */ skb_queue_tail(&session->reorder_q, skb); } /* Try to dequeue as many skbs from reorder_q as we can. */ l2tp_recv_dequeue(session); return; discard: atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } EXPORT_SYMBOL_GPL(l2tp_recv_common); /* Drop skbs from the session's reorder_q */ static void l2tp_session_queue_purge(struct l2tp_session *session) { struct sk_buff *skb = NULL; while ((skb = skb_dequeue(&session->reorder_q))) { atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } } /* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct l2tp_session *session = NULL; struct l2tp_tunnel *tunnel = NULL; struct net *net = sock_net(sk); unsigned char *ptr, *optr; u16 hdrflags; u16 version; int length; /* UDP has verified checksum */ /* UDP always verifies the packet length. */ __skb_pull(skb, sizeof(struct udphdr)); /* Short packet? */ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) goto pass; /* Point to L2TP header */ optr = skb->data; ptr = skb->data; /* Get L2TP header flags */ hdrflags = ntohs(*(__be16 *)ptr); /* Get protocol version */ version = hdrflags & L2TP_HDR_VER_MASK; /* Get length of L2TP packet */ length = skb->len; /* If type is control packet, it is handled by userspace. */ if (hdrflags & L2TP_HDRFLAG_T) goto pass; /* Skip flags */ ptr += 2; if (version == L2TP_HDR_VER_2) { u16 tunnel_id, session_id; /* If length is present, skip it */ if (hdrflags & L2TP_HDRFLAG_L) ptr += 2; /* Extract tunnel and session ID */ tunnel_id = ntohs(*(__be16 *)ptr); ptr += 2; session_id = ntohs(*(__be16 *)ptr); ptr += 2; session = l2tp_v2_session_get(net, tunnel_id, session_id); } else { u32 session_id; ptr += 2; /* skip reserved bits */ session_id = ntohl(*(__be32 *)ptr); ptr += 4; session = l2tp_v3_session_get(net, sk, session_id); } if (!session || !session->recv_skb) { if (session) l2tp_session_put(session); /* Not found? Pass to userspace to deal with */ goto pass; } tunnel = session->tunnel; /* Check protocol version */ if (version != tunnel->version) goto invalid; if (version == L2TP_HDR_VER_3 && l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) { l2tp_session_put(session); goto invalid; } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); l2tp_session_put(session); return 0; invalid: atomic_long_inc(&tunnel->stats.rx_invalid); pass: /* Put UDP header back */ __skb_push(skb, sizeof(struct udphdr)); return 1; } EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv); /* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */ static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { sk->sk_err = err; sk_error_report(sk); if (ip_hdr(skb)->version == IPVERSION) { if (inet_test_bit(RECVERR, sk)) return ip_icmp_error(sk, skb, err, port, info, payload); #if IS_ENABLED(CONFIG_IPV6) } else { if (inet6_test_bit(RECVERR6, sk)) return ipv6_icmp_error(sk, skb, err, port, info, payload); #endif } } /************************************************************************ * Transmit handling ***********************************************************************/ /* Build an L2TP header for the session into the buffer provided. */ static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf) { struct l2tp_tunnel *tunnel = session->tunnel; __be16 *bufp = buf; __be16 *optr = buf; u16 flags = L2TP_HDR_VER_2; u32 tunnel_id = tunnel->peer_tunnel_id; u32 session_id = session->peer_session_id; if (session->send_seq) flags |= L2TP_HDRFLAG_S; /* Setup L2TP header. */ *bufp++ = htons(flags); *bufp++ = htons(tunnel_id); *bufp++ = htons(session_id); if (session->send_seq) { *bufp++ = htons(session->ns); *bufp++ = 0; session->ns++; session->ns &= 0xffff; trace_session_seqnum_update(session); } return bufp - optr; } static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) { struct l2tp_tunnel *tunnel = session->tunnel; char *bufp = buf; char *optr = bufp; /* Setup L2TP header. The header differs slightly for UDP and * IP encapsulations. For UDP, there is 4 bytes of flags. */ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { u16 flags = L2TP_HDR_VER_3; *((__be16 *)bufp) = htons(flags); bufp += 2; *((__be16 *)bufp) = 0; bufp += 2; } *((__be32 *)bufp) = htonl(session->peer_session_id); bufp += 4; if (session->cookie_len) { memcpy(bufp, &session->cookie[0], session->cookie_len); bufp += session->cookie_len; } if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { u32 l2h = 0; if (session->send_seq) { l2h = 0x40000000 | session->ns; session->ns++; session->ns &= 0xffffff; trace_session_seqnum_update(session); } *((__be32 *)bufp) = htonl(l2h); bufp += 4; } return bufp - optr; } /* Queue the packet to IP for output: tunnel socket lock must be held */ static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl) { int err; skb->ignore_df = 1; skb_dst_drop(skb); #if IS_ENABLED(CONFIG_IPV6) if (l2tp_sk_is_v6(tunnel->sock)) err = inet6_csk_xmit(tunnel->sock, skb, NULL); else #endif err = ip_queue_xmit(tunnel->sock, skb, fl); return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; } static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len) { struct l2tp_tunnel *tunnel = session->tunnel; unsigned int data_len = skb->len; struct sock *sk = tunnel->sock; int headroom, uhlen, udp_len; int ret = NET_XMIT_SUCCESS; struct inet_sock *inet; struct udphdr *uh; /* Check that there's enough headroom in the skb to insert IP, * UDP and L2TP headers. If not enough, expand it to * make room. Adjust truesize. */ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0; headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len; if (skb_cow_head(skb, headroom)) { kfree_skb(skb); return NET_XMIT_DROP; } /* Setup L2TP header */ if (tunnel->version == L2TP_HDR_VER_2) l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len)); else l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len)); /* Reset skb netfilter state */ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); nf_reset_ct(skb); /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by * nested socket calls on the same lockdep socket class. This can * happen when data from a user socket is routed over l2tp, which uses * another userspace socket. */ spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING); if (sock_owned_by_user(sk)) { kfree_skb(skb); ret = NET_XMIT_DROP; goto out_unlock; } /* The user-space may change the connection status for the user-space * provided socket at run time: we must check it under the socket lock */ if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); ret = NET_XMIT_DROP; goto out_unlock; } /* Report transmitted length before we add encap header, which keeps * statistics consistent for both UDP and IP encap tx/rx paths. */ *len = skb->len; inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: /* Setup UDP header */ __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->source = inet->inet_sport; uh->dest = inet->inet_dport; udp_len = uhlen + session->hdr_len + data_len; uh->len = htons(udp_len); /* Calculate UDP checksum if configured to do so */ #if IS_ENABLED(CONFIG_IPV6) if (l2tp_sk_is_v6(sk)) udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, &sk->sk_v6_daddr, udp_len); else #endif udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr, inet->inet_daddr, udp_len); break; case L2TP_ENCAPTYPE_IP: break; } ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl); out_unlock: spin_unlock(&sk->sk_lock.slock); return ret; } /* If caller requires the skb to have a ppp header, the header must be * inserted in the skb data before calling this function. */ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb) { unsigned int len = 0; int ret; ret = l2tp_xmit_core(session, skb, &len); if (ret == NET_XMIT_SUCCESS) { atomic_long_inc(&session->tunnel->stats.tx_packets); atomic_long_add(len, &session->tunnel->stats.tx_bytes); atomic_long_inc(&session->stats.tx_packets); atomic_long_add(len, &session->stats.tx_bytes); } else { atomic_long_inc(&session->tunnel->stats.tx_errors); atomic_long_inc(&session->stats.tx_errors); } return ret; } EXPORT_SYMBOL_GPL(l2tp_xmit_skb); /***************************************************************************** * Tinnel and session create/destroy. *****************************************************************************/ /* Remove an l2tp session from l2tp_core's lists. */ static void l2tp_session_unhash(struct l2tp_session *session) { struct l2tp_tunnel *tunnel = session->tunnel; if (tunnel) { struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); struct l2tp_session *removed = session; spin_lock_bh(&tunnel->list_lock); spin_lock_bh(&pn->l2tp_session_idr_lock); /* Remove from the per-tunnel list */ list_del_init(&session->list); /* Remove from per-net IDR */ if (tunnel->version == L2TP_HDR_VER_3) { if (hash_hashed(&session->hlist)) l2tp_session_collision_del(pn, session); else removed = idr_remove(&pn->l2tp_v3_session_idr, session->session_id); } else { u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id, session->session_id); removed = idr_remove(&pn->l2tp_v2_session_idr, session_key); } WARN_ON_ONCE(removed && removed != session); spin_unlock_bh(&pn->l2tp_session_idr_lock); spin_unlock_bh(&tunnel->list_lock); } } /* When the tunnel is closed, all the attached sessions need to go too. */ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) { struct l2tp_session *session; spin_lock_bh(&tunnel->list_lock); tunnel->acpt_newsess = false; list_for_each_entry(session, &tunnel->session_list, list) l2tp_session_delete(session); spin_unlock_bh(&tunnel->list_lock); } /* Tunnel socket destroy hook for UDP encapsulation */ static void l2tp_udp_encap_destroy(struct sock *sk) { struct l2tp_tunnel *tunnel; tunnel = l2tp_sk_to_tunnel(sk); if (tunnel) { l2tp_tunnel_delete(tunnel); l2tp_tunnel_put(tunnel); } } static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel) { struct l2tp_net *pn = l2tp_pernet(net); spin_lock_bh(&pn->l2tp_tunnel_idr_lock); idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id); spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); } /* Workqueue tunnel deletion function */ static void l2tp_tunnel_del_work(struct work_struct *work) { struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel, del_work); struct sock *sk = tunnel->sock; struct socket *sock = sk->sk_socket; l2tp_tunnel_closeall(tunnel); /* If the tunnel socket was created within the kernel, use * the sk API to release it here. */ if (tunnel->fd < 0) { if (sock) { kernel_sock_shutdown(sock, SHUT_RDWR); sock_release(sock); } } l2tp_tunnel_remove(tunnel->l2tp_net, tunnel); /* drop initial ref */ l2tp_tunnel_put(tunnel); /* drop workqueue ref */ l2tp_tunnel_put(tunnel); } /* Create a socket for the tunnel, if one isn't set up by * userspace. This is used for static tunnels where there is no * managing L2TP daemon. * * Since we don't want these sockets to keep a namespace alive by * themselves, we drop the socket's namespace refcount after creation. * These sockets are freed when the namespace exits using the pernet * exit hook. */ static int l2tp_tunnel_sock_create(struct net *net, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp) { int err = -EINVAL; struct socket *sock = NULL; struct udp_port_cfg udp_conf; switch (cfg->encap) { case L2TP_ENCAPTYPE_UDP: memset(&udp_conf, 0, sizeof(udp_conf)); #if IS_ENABLED(CONFIG_IPV6) if (cfg->local_ip6 && cfg->peer_ip6) { udp_conf.family = AF_INET6; memcpy(&udp_conf.local_ip6, cfg->local_ip6, sizeof(udp_conf.local_ip6)); memcpy(&udp_conf.peer_ip6, cfg->peer_ip6, sizeof(udp_conf.peer_ip6)); udp_conf.use_udp6_tx_checksums = !cfg->udp6_zero_tx_checksums; udp_conf.use_udp6_rx_checksums = !cfg->udp6_zero_rx_checksums; } else #endif { udp_conf.family = AF_INET; udp_conf.local_ip = cfg->local_ip; udp_conf.peer_ip = cfg->peer_ip; udp_conf.use_udp_checksums = cfg->use_udp_checksums; } udp_conf.local_udp_port = htons(cfg->local_udp_port); udp_conf.peer_udp_port = htons(cfg->peer_udp_port); err = udp_sock_create(net, &udp_conf, &sock); if (err < 0) goto out; break; case L2TP_ENCAPTYPE_IP: #if IS_ENABLED(CONFIG_IPV6) if (cfg->local_ip6 && cfg->peer_ip6) { struct sockaddr_l2tpip6 ip6_addr = {0}; err = sock_create_kern(net, AF_INET6, SOCK_DGRAM, IPPROTO_L2TP, &sock); if (err < 0) goto out; ip6_addr.l2tp_family = AF_INET6; memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, sizeof(ip6_addr.l2tp_addr)); ip6_addr.l2tp_conn_id = tunnel_id; err = kernel_bind(sock, (struct sockaddr *)&ip6_addr, sizeof(ip6_addr)); if (err < 0) goto out; ip6_addr.l2tp_family = AF_INET6; memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6, sizeof(ip6_addr.l2tp_addr)); ip6_addr.l2tp_conn_id = peer_tunnel_id; err = kernel_connect(sock, (struct sockaddr *)&ip6_addr, sizeof(ip6_addr), 0); if (err < 0) goto out; } else #endif { struct sockaddr_l2tpip ip_addr = {0}; err = sock_create_kern(net, AF_INET, SOCK_DGRAM, IPPROTO_L2TP, &sock); if (err < 0) goto out; ip_addr.l2tp_family = AF_INET; ip_addr.l2tp_addr = cfg->local_ip; ip_addr.l2tp_conn_id = tunnel_id; err = kernel_bind(sock, (struct sockaddr *)&ip_addr, sizeof(ip_addr)); if (err < 0) goto out; ip_addr.l2tp_family = AF_INET; ip_addr.l2tp_addr = cfg->peer_ip; ip_addr.l2tp_conn_id = peer_tunnel_id; err = kernel_connect(sock, (struct sockaddr *)&ip_addr, sizeof(ip_addr), 0); if (err < 0) goto out; } break; default: goto out; } out: *sockp = sock; if (err < 0 && sock) { kernel_sock_shutdown(sock, SHUT_RDWR); sock_release(sock); *sockp = NULL; } return err; } int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) { struct l2tp_tunnel *tunnel = NULL; int err; enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP; if (cfg) encap = cfg->encap; tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); if (!tunnel) { err = -ENOMEM; goto err; } tunnel->version = version; tunnel->tunnel_id = tunnel_id; tunnel->peer_tunnel_id = peer_tunnel_id; sprintf(&tunnel->name[0], "tunl %u", tunnel_id); spin_lock_init(&tunnel->list_lock); tunnel->acpt_newsess = true; INIT_LIST_HEAD(&tunnel->session_list); tunnel->encap = encap; refcount_set(&tunnel->ref_count, 1); tunnel->fd = fd; /* Init delete workqueue struct */ INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work); err = 0; err: if (tunnelp) *tunnelp = tunnel; return err; } EXPORT_SYMBOL_GPL(l2tp_tunnel_create); static int l2tp_validate_socket(const struct sock *sk, const struct net *net, enum l2tp_encap_type encap) { struct l2tp_tunnel *tunnel; if (!net_eq(sock_net(sk), net)) return -EINVAL; if (sk->sk_type != SOCK_DGRAM) return -EPROTONOSUPPORT; if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) return -EPROTONOSUPPORT; if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) || (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP)) return -EPROTONOSUPPORT; if (encap == L2TP_ENCAPTYPE_UDP && sk->sk_user_data) return -EBUSY; tunnel = l2tp_sk_to_tunnel(sk); if (tunnel) { l2tp_tunnel_put(tunnel); return -EBUSY; } return 0; } int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, struct l2tp_tunnel_cfg *cfg) { struct l2tp_net *pn = l2tp_pernet(net); u32 tunnel_id = tunnel->tunnel_id; struct socket *sock; struct sock *sk; int ret; spin_lock_bh(&pn->l2tp_tunnel_idr_lock); ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id, GFP_ATOMIC); spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); if (ret) return ret == -ENOSPC ? -EEXIST : ret; if (tunnel->fd < 0) { ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id, tunnel->peer_tunnel_id, cfg, &sock); if (ret < 0) goto err; } else { sock = sockfd_lookup(tunnel->fd, &ret); if (!sock) goto err; } sk = sock->sk; lock_sock(sk); write_lock_bh(&sk->sk_callback_lock); ret = l2tp_validate_socket(sk, net, tunnel->encap); if (ret < 0) goto err_inval_sock; write_unlock_bh(&sk->sk_callback_lock); if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { struct udp_tunnel_sock_cfg udp_cfg = { .encap_type = UDP_ENCAP_L2TPINUDP, .encap_rcv = l2tp_udp_encap_recv, .encap_err_rcv = l2tp_udp_encap_err_recv, .encap_destroy = l2tp_udp_encap_destroy, }; setup_udp_tunnel_sock(net, sock, &udp_cfg); } sk->sk_allocation = GFP_ATOMIC; release_sock(sk); sock_hold(sk); tunnel->sock = sk; tunnel->l2tp_net = net; spin_lock_bh(&pn->l2tp_tunnel_idr_lock); idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id); spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); trace_register_tunnel(tunnel); if (tunnel->fd >= 0) sockfd_put(sock); return 0; err_inval_sock: write_unlock_bh(&sk->sk_callback_lock); release_sock(sk); if (tunnel->fd < 0) sock_release(sock); else sockfd_put(sock); err: l2tp_tunnel_remove(net, tunnel); return ret; } EXPORT_SYMBOL_GPL(l2tp_tunnel_register); /* This function is used by the netlink TUNNEL_DELETE command. */ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) { if (!test_and_set_bit(0, &tunnel->dead)) { trace_delete_tunnel(tunnel); refcount_inc(&tunnel->ref_count); queue_work(l2tp_wq, &tunnel->del_work); } } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); void l2tp_session_delete(struct l2tp_session *session) { if (!test_and_set_bit(0, &session->dead)) { trace_delete_session(session); refcount_inc(&session->ref_count); queue_work(l2tp_wq, &session->del_work); } } EXPORT_SYMBOL_GPL(l2tp_session_delete); /* Workqueue session deletion function */ static void l2tp_session_del_work(struct work_struct *work) { struct l2tp_session *session = container_of(work, struct l2tp_session, del_work); l2tp_session_unhash(session); l2tp_session_queue_purge(session); if (session->session_close) (*session->session_close)(session); /* drop initial ref */ l2tp_session_put(session); /* drop workqueue ref */ l2tp_session_put(session); } /* We come here whenever a session's send_seq, cookie_len or * l2specific_type parameters are set. */ void l2tp_session_set_header_len(struct l2tp_session *session, int version, enum l2tp_encap_type encap) { if (version == L2TP_HDR_VER_2) { session->hdr_len = 6; if (session->send_seq) session->hdr_len += 4; } else { session->hdr_len = 4 + session->cookie_len; session->hdr_len += l2tp_get_l2specific_len(session); if (encap == L2TP_ENCAPTYPE_UDP) session->hdr_len += 4; } } EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL); if (session) { session->magic = L2TP_SESSION_MAGIC; session->session_id = session_id; session->peer_session_id = peer_session_id; session->nr = 0; if (tunnel->version == L2TP_HDR_VER_2) session->nr_max = 0xffff; else session->nr_max = 0xffffff; session->nr_window_size = session->nr_max / 2; session->nr_oos_count_max = 4; /* Use NR of first received packet */ session->reorder_skip = 1; sprintf(&session->name[0], "sess %u/%u", tunnel->tunnel_id, session->session_id); skb_queue_head_init(&session->reorder_q); session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id); INIT_HLIST_NODE(&session->hlist); INIT_LIST_HEAD(&session->clist); INIT_LIST_HEAD(&session->list); INIT_WORK(&session->del_work, l2tp_session_del_work); if (cfg) { session->pwtype = cfg->pw_type; session->send_seq = cfg->send_seq; session->recv_seq = cfg->recv_seq; session->lns_mode = cfg->lns_mode; session->reorder_timeout = cfg->reorder_timeout; session->l2specific_type = cfg->l2specific_type; session->cookie_len = cfg->cookie_len; memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len); session->peer_cookie_len = cfg->peer_cookie_len; memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); } l2tp_session_set_header_len(session, tunnel->version, tunnel->encap); refcount_set(&session->ref_count, 1); return session; } return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(l2tp_session_create); /***************************************************************************** * Init and cleanup *****************************************************************************/ static __net_init int l2tp_init_net(struct net *net) { struct l2tp_net *pn = net_generic(net, l2tp_net_id); idr_init(&pn->l2tp_tunnel_idr); spin_lock_init(&pn->l2tp_tunnel_idr_lock); idr_init(&pn->l2tp_v2_session_idr); idr_init(&pn->l2tp_v3_session_idr); spin_lock_init(&pn->l2tp_session_idr_lock); return 0; } static __net_exit void l2tp_pre_exit_net(struct net *net) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel = NULL; unsigned long tunnel_id, tmp; rcu_read_lock_bh(); idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { if (tunnel) l2tp_tunnel_delete(tunnel); } rcu_read_unlock_bh(); if (l2tp_wq) { /* Run all TUNNEL_DELETE work items just queued. */ __flush_workqueue(l2tp_wq); /* Each TUNNEL_DELETE work item will queue a SESSION_DELETE * work item for each session in the tunnel. Flush the * workqueue again to process these. */ __flush_workqueue(l2tp_wq); } } static int l2tp_idr_item_unexpected(int id, void *p, void *data) { const char *idr_name = data; pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id); WARN_ON_ONCE(1); return 1; } static __net_exit void l2tp_exit_net(struct net *net) { struct l2tp_net *pn = l2tp_pernet(net); /* Our per-net IDRs should be empty. Check that is so, to * help catch cleanup races or refcnt leaks. */ idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected, "v2_session"); idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected, "v3_session"); idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected, "tunnel"); idr_destroy(&pn->l2tp_v2_session_idr); idr_destroy(&pn->l2tp_v3_session_idr); idr_destroy(&pn->l2tp_tunnel_idr); } static struct pernet_operations l2tp_net_ops = { .init = l2tp_init_net, .exit = l2tp_exit_net, .pre_exit = l2tp_pre_exit_net, .id = &l2tp_net_id, .size = sizeof(struct l2tp_net), }; static int __init l2tp_init(void) { int rc = 0; rc = register_pernet_device(&l2tp_net_ops); if (rc) goto out; l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); if (!l2tp_wq) { pr_err("alloc_workqueue failed\n"); unregister_pernet_device(&l2tp_net_ops); rc = -ENOMEM; goto out; } pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION); out: return rc; } static void __exit l2tp_exit(void) { unregister_pernet_device(&l2tp_net_ops); if (l2tp_wq) { destroy_workqueue(l2tp_wq); l2tp_wq = NULL; } } module_init(l2tp_init); module_exit(l2tp_exit); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("L2TP core"); MODULE_LICENSE("GPL"); MODULE_VERSION(L2TP_DRV_VERSION);
641 6540 261 7100 2 7050 43 612 612 118 591 97 2516 173 173 174 1 1 1 1 9 1 1 9 9 261 1 8 8 8 4 109 109 4 109 2 101 43 43 1425 1421 468 1022 295 107 57 50 8 8 7775 3 74 74 106 107 106 105 1 9 1 986 1800 4 42 45 44 35 27 7 28 25 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 /* SPDX-License-Identifier: GPL-2.0 */ /* * Linux Socket Filter Data Structures */ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ #include <linux/atomic.h> #include <linux/bpf.h> #include <linux/refcount.h> #include <linux/compat.h> #include <linux/skbuff.h> #include <linux/linkage.h> #include <linux/printk.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/capability.h> #include <linux/set_memory.h> #include <linux/kallsyms.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> #include <linux/sockptr.h> #include <crypto/sha1.h> #include <linux/u64_stats_sync.h> #include <net/sch_generic.h> #include <asm/byteorder.h> #include <uapi/linux/filter.h> struct sk_buff; struct sock; struct seccomp_data; struct bpf_prog_aux; struct xdp_rxq_info; struct xdp_buff; struct sock_reuseport; struct ctl_table; struct ctl_table_header; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function * calls in BPF_CALL instruction. */ #define BPF_REG_ARG1 BPF_REG_1 #define BPF_REG_ARG2 BPF_REG_2 #define BPF_REG_ARG3 BPF_REG_3 #define BPF_REG_ARG4 BPF_REG_4 #define BPF_REG_ARG5 BPF_REG_5 #define BPF_REG_CTX BPF_REG_6 #define BPF_REG_FP BPF_REG_10 /* Additional register mappings for converted user programs. */ #define BPF_REG_A BPF_REG_0 #define BPF_REG_X BPF_REG_7 #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ /* Kernel hidden auxiliary/helper register. */ #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) #define MAX_BPF_JIT_REG MAX_BPF_EXT_REG /* unused opcode to mark special call to bpf_tail_call() helper */ #define BPF_TAIL_CALL 0xf0 /* unused opcode to mark special load instruction. Same as BPF_ABS */ #define BPF_PROBE_MEM 0x20 /* unused opcode to mark special ldsx instruction. Same as BPF_IND */ #define BPF_PROBE_MEMSX 0x40 /* unused opcode to mark special load instruction. Same as BPF_MSH */ #define BPF_PROBE_MEM32 0xa0 /* unused opcode to mark special atomic instruction */ #define BPF_PROBE_ATOMIC 0xe0 /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 /* unused opcode to mark speculation barrier for mitigating * Speculative Store Bypass */ #define BPF_NOSPEC 0xc0 /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. */ #define BPF_SYM_ELF_TYPE 't' /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ #define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_ALU64_REG(OP, DST, SRC) \ BPF_ALU64_REG_OFF(OP, DST, SRC, 0) #define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_ALU32_REG(OP, DST, SRC) \ BPF_ALU32_REG_OFF(OP, DST, SRC, 0) /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ #define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_ALU64_IMM(OP, DST, IMM) \ BPF_ALU64_IMM_OFF(OP, DST, IMM, 0) #define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_ALU32_IMM(OP, DST, IMM) \ BPF_ALU32_IMM_OFF(OP, DST, IMM, 0) /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ #define BPF_ENDIAN(TYPE, DST, LEN) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = LEN }) /* Byte Swap, bswap16/32/64 */ #define BPF_BSWAP(DST, LEN) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = LEN }) /* Short form of mov, dst_reg = src_reg */ #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV32_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) /* Special (internal-only) form of mov, used to resolve per-CPU addrs: * dst_reg = src_reg + <percpu_base_off> * BPF_ADDR_PERCPU is used as a special insn->off value. */ #define BPF_ADDR_PERCPU (-1) #define BPF_MOV64_PERCPU_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = BPF_ADDR_PERCPU, \ .imm = 0 }) static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn) { return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU; } /* Short form of mov, dst_reg = imm32 */ #define BPF_MOV64_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV32_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */ #define BPF_MOVSX64_REG(DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_MOVSX32_REG(DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Special form of mov32, used for doing explicit zero extension on dst. */ #define BPF_ZEXT_REG(DST) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = DST, \ .off = 0, \ .imm = 1 }) static inline bool insn_is_zext(const struct bpf_insn *insn) { return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; } /* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers * to pointers in user vma. */ static inline bool insn_is_cast_user(const struct bpf_insn *insn) { return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1U << 16; } /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = (__u32) (IMM) }), \ ((struct bpf_insn) { \ .code = 0, /* zero is reserved opcode */ \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = IMM }) #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = IMM }) /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ #define BPF_LD_ABS(SIZE, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ #define BPF_LD_IND(SIZE, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ .dst_reg = 0, \ .src_reg = SRC, \ .off = 0, \ .imm = IMM }) /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Memory load, dst_reg = *(signed size *) (src_reg + off16) */ #define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* * Atomic operations: * * BPF_ADD *(uint *) (dst_reg + off16) += src_reg * BPF_AND *(uint *) (dst_reg + off16) &= src_reg * BPF_OR *(uint *) (dst_reg + off16) |= src_reg * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg); * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg); * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg); * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg); * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg) * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg) */ #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = OP }) /* Legacy alias */ #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ ((struct bpf_insn) { \ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ #define BPF_JMP_REG(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) /* Unconditional jumps, goto pc + off16 */ #define BPF_JMP_A(OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_JA, \ .dst_reg = 0, \ .src_reg = 0, \ .off = OFF, \ .imm = 0 }) /* Unconditional jumps, gotol pc + imm32 */ #define BPF_JMP32_A(IMM) \ ((struct bpf_insn) { \ .code = BPF_JMP32 | BPF_JA, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Relative call */ #define BPF_CALL_REL(TGT) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = BPF_PSEUDO_CALL, \ .off = 0, \ .imm = TGT }) /* Convert function address to BPF immediate */ #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base) #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = BPF_CALL_IMM(FUNC) }) /* Raw code statement block */ #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ ((struct bpf_insn) { \ .code = CODE, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = IMM }) /* Program exit */ #define BPF_EXIT_INSN() \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) /* Speculation barrier */ #define BPF_ST_NOSPEC() \ ((struct bpf_insn) { \ .code = BPF_ST | BPF_NOSPEC, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ ((struct sock_filter) BPF_STMT(CODE, K)) #define __BPF_JUMP(CODE, K, JT, JF) \ ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) #define bytes_to_bpf_size(bytes) \ ({ \ int bpf_size = -EINVAL; \ \ if (bytes == sizeof(u8)) \ bpf_size = BPF_B; \ else if (bytes == sizeof(u16)) \ bpf_size = BPF_H; \ else if (bytes == sizeof(u32)) \ bpf_size = BPF_W; \ else if (bytes == sizeof(u64)) \ bpf_size = BPF_DW; \ \ bpf_size; \ }) #define bpf_size_to_bytes(bpf_size) \ ({ \ int bytes = -EINVAL; \ \ if (bpf_size == BPF_B) \ bytes = sizeof(u8); \ else if (bpf_size == BPF_H) \ bytes = sizeof(u16); \ else if (bpf_size == BPF_W) \ bytes = sizeof(u32); \ else if (bpf_size == BPF_DW) \ bytes = sizeof(u64); \ \ bytes; \ }) #define BPF_SIZEOF(type) \ ({ \ const int __size = bytes_to_bpf_size(sizeof(type)); \ BUILD_BUG_ON(__size < 0); \ __size; \ }) #define BPF_FIELD_SIZEOF(type, field) \ ({ \ const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \ BUILD_BUG_ON(__size < 0); \ __size; \ }) #define BPF_LDST_BYTES(insn) \ ({ \ const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ WARN_ON(__size < 0); \ __size; \ }) #define __BPF_MAP_0(m, v, ...) v #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) #define __BPF_REG_0(...) __BPF_PAD(5) #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) #define __BPF_CAST(t, a) \ (__force t) \ (__force \ typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ (unsigned long)0, (t)0))) a #define __BPF_V void #define __BPF_N #define __BPF_DECL_ARGS(t, a) t a #define __BPF_DECL_REGS(t, a) u64 a #define __BPF_PAD(n) \ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ u64, __ur_3, u64, __ur_4, u64, __ur_5) #define BPF_CALL_x(x, attr, name, ...) \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ { \ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ } \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) #define __NOATTR #define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__) #define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__) #define bpf_ctx_range(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 #if BITS_PER_LONG == 64 # define bpf_ctx_range_ptr(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 #else # define bpf_ctx_range_ptr(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 #endif /* BITS_PER_LONG == 64 */ #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ ({ \ BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \ *(PTR_SIZE) = (SIZE); \ offsetof(TYPE, MEMBER); \ }) /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { u16 len; compat_uptr_t filter; /* struct sock_filter * */ }; struct sock_fprog_kern { u16 len; struct sock_filter *filter; }; /* Some arches need doubleword alignment for their instructions and/or data */ #define BPF_IMAGE_ALIGNMENT 8 struct bpf_binary_header { u32 size; u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); }; struct bpf_prog_stats { u64_stats_t cnt; u64_stats_t nsecs; u64_stats_t misses; struct u64_stats_sync syncp; } __aligned(2 * sizeof(u64)); struct sk_filter { refcount_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); extern struct mutex nf_conn_btf_access_lock; extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct bpf_reg_state *reg, int off, int size); typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, const struct bpf_insn *insnsi, unsigned int (*bpf_func)(const void *, const struct bpf_insn *)); static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, const void *ctx, bpf_dispatcher_fn dfunc) { u32 ret; cant_migrate(); if (static_branch_unlikely(&bpf_stats_enabled_key)) { struct bpf_prog_stats *stats; u64 duration, start = sched_clock(); unsigned long flags; ret = dfunc(ctx, prog->insnsi, prog->bpf_func); duration = sched_clock() - start; stats = this_cpu_ptr(prog->stats); flags = u64_stats_update_begin_irqsave(&stats->syncp); u64_stats_inc(&stats->cnt); u64_stats_add(&stats->nsecs, duration); u64_stats_update_end_irqrestore(&stats->syncp, flags); } else { ret = dfunc(ctx, prog->insnsi, prog->bpf_func); } return ret; } static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx) { return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func); } /* * Use in preemptible and therefore migratable context to make sure that * the execution of the BPF program runs on one CPU. * * This uses migrate_disable/enable() explicitly to document that the * invocation of a BPF program does not require reentrancy protection * against a BPF program which is invoked from a preempting task. */ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, const void *ctx) { u32 ret; migrate_disable(); ret = bpf_prog_run(prog, ctx); migrate_enable(); return ret; } #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN struct bpf_skb_data_end { struct qdisc_skb_cb qdisc_cb; void *data_meta; void *data_end; }; struct bpf_nh_params { u32 nh_family; union { u32 ipv4_nh; struct in6_addr ipv6_nh; }; }; /* flags for bpf_redirect_info kern_flags */ #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ #define BPF_RI_F_RI_INIT BIT(1) #define BPF_RI_F_CPU_MAP_INIT BIT(2) #define BPF_RI_F_DEV_MAP_INIT BIT(3) #define BPF_RI_F_XSK_MAP_INIT BIT(4) struct bpf_redirect_info { u64 tgt_index; void *tgt_value; struct bpf_map *map; u32 flags; u32 map_id; enum bpf_map_type map_type; struct bpf_nh_params nh; u32 kern_flags; }; struct bpf_net_context { struct bpf_redirect_info ri; struct list_head cpu_map_flush_list; struct list_head dev_map_flush_list; struct list_head xskmap_map_flush_list; }; static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx) { struct task_struct *tsk = current; if (tsk->bpf_net_context != NULL) return NULL; bpf_net_ctx->ri.kern_flags = 0; tsk->bpf_net_context = bpf_net_ctx; return bpf_net_ctx; } static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx) { if (bpf_net_ctx) current->bpf_net_context = NULL; } static inline struct bpf_net_context *bpf_net_ctx_get(void) { return current->bpf_net_context; } static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void) { struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) { memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh)); bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT; } return &bpf_net_ctx->ri; } static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void) { struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) { INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list); bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT; } return &bpf_net_ctx->cpu_map_flush_list; } static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void) { struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) { INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list); bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT; } return &bpf_net_ctx->dev_map_flush_list; } static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void) { struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) { INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list); bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT; } return &bpf_net_ctx->xskmap_map_flush_list; } static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map, struct list_head **lh_dev, struct list_head **lh_xsk) { struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); u32 kern_flags = bpf_net_ctx->ri.kern_flags; struct list_head *lh; *lh_map = *lh_dev = *lh_xsk = NULL; if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) return; lh = &bpf_net_ctx->dev_map_flush_list; if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh)) *lh_dev = lh; lh = &bpf_net_ctx->cpu_map_flush_list; if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh)) *lh_map = lh; lh = &bpf_net_ctx->xskmap_map_flush_list; if (IS_ENABLED(CONFIG_XDP_SOCKETS) && kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh)) *lh_xsk = lh; } /* Compute the linear packet data range [data, data_end) which * will be accessed by various program types (cls_bpf, act_bpf, * lwt, ...). Subsystems allowing direct data access must (!) * ensure that cb[] area can be written to when BPF program is * invoked (otherwise cb[] save/restore is necessary). */ static inline void bpf_compute_data_pointers(struct sk_buff *skb) { struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); cb->data_meta = skb->data - skb_metadata_len(skb); cb->data_end = skb->data + skb_headlen(skb); } /* Similar to bpf_compute_data_pointers(), except that save orginal * data in cb->data and cb->meta_data for restore. */ static inline void bpf_compute_and_save_data_end( struct sk_buff *skb, void **saved_data_end) { struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; *saved_data_end = cb->data_end; cb->data_end = skb->data + skb_headlen(skb); } /* Restore data saved by bpf_compute_and_save_data_end(). */ static inline void bpf_restore_data_end( struct sk_buff *skb, void *saved_data_end) { struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; cb->data_end = saved_data_end; } static inline u8 *bpf_skb_cb(const struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta * data between tail calls. Since this also needs to work with * tc, that scratch memory is mapped to qdisc_skb_cb's data area. * * In some socket filter cases, the cb unfortunately needs to be * saved/restored so that protocol specific skb->cb[] data won't * be lost. In any case, due to unpriviledged eBPF programs * attached to sockets, we need to clear the bpf_skb_cb() area * to not leak previous contents to user space. */ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN); BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != sizeof_field(struct qdisc_skb_cb, data)); return qdisc_skb_cb(skb)->data; } /* Must be invoked with migration disabled */ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, const void *ctx) { const struct sk_buff *skb = ctx; u8 *cb_data = bpf_skb_cb(skb); u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; if (unlikely(prog->cb_access)) { memcpy(cb_saved, cb_data, sizeof(cb_saved)); memset(cb_data, 0, sizeof(cb_saved)); } res = bpf_prog_run(prog, skb); if (unlikely(prog->cb_access)) memcpy(cb_data, cb_saved, sizeof(cb_saved)); return res; } static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u32 res; migrate_disable(); res = __bpf_prog_run_save_cb(prog, skb); migrate_enable(); return res; } static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u8 *cb_data = bpf_skb_cb(skb); u32 res; if (unlikely(prog->cb_access)) memset(cb_data, 0, BPF_SKB_CB_LEN); res = bpf_prog_run_pin_on_cpu(prog, skb); return res; } DECLARE_BPF_DISPATCHER(xdp) DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); u32 xdp_master_redirect(struct xdp_buff *xdp); void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) { return prog->len * sizeof(struct bpf_insn); } static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) { return round_up(bpf_prog_insn_size(prog) + sizeof(__be64) + 1, SHA1_BLOCK_SIZE); } static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), offsetof(struct bpf_prog, insns[proglen])); } static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) { /* When classic BPF programs have been loaded and the arch * does not have a classic BPF JIT (anymore), they have been * converted via bpf_migrate_filter() to eBPF and thus always * have an unspec program type. */ return prog->type == BPF_PROG_TYPE_UNSPEC; } static inline u32 bpf_ctx_off_adjust_machine(u32 size) { const u32 size_machine = sizeof(unsigned long); if (size > size_machine && size % size_machine == 0) size = size_machine; return size; } static inline bool bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) { return size <= size_default && (size & (size - 1)) == 0; } static inline u8 bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) { u8 access_off = off & (size_default - 1); #ifdef __LITTLE_ENDIAN return access_off; #else return size_default - (access_off + size); #endif } #define bpf_ctx_wide_access_ok(off, size, type, field) \ (size == sizeof(__u64) && \ off >= offsetof(type, field) && \ off + sizeof(__u64) <= offsetofend(type, field) && \ off % sizeof(__u64) == 0) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON if (!fp->jited) { set_vm_flush_reset_perms(fp); return set_memory_ro((unsigned long)fp, fp->pages); } #endif return 0; } static inline int __must_check bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { set_vm_flush_reset_perms(hdr); return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); } int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { return sk_filter_trim_cap(sk, skb, 1); } struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); bool bpf_opcode_in_insntable(u8 code); void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, const u32 *insn_to_jit_off); int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); void bpf_prog_jit_attempt_done(struct bpf_prog *prog); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); void __bpf_prog_free(struct bpf_prog *fp); static inline void bpf_prog_unlock_free(struct bpf_prog *fp) { __bpf_prog_free(fp); } typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, unsigned int flen); int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, bpf_aux_classic_check_t trans, bool save_orig); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); void sk_reuseport_prog_free(struct bpf_prog *prog); int sk_detach_filter(struct sock *sk); int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len); bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); #define __bpf_call_base_args \ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ (void *)__bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_jit_inlines_helper_call(s32 imm); bool bpf_jit_supports_subprog_tailcalls(void); bool bpf_jit_supports_percpu_insn(void); bool bpf_jit_supports_kfunc_call(void); bool bpf_jit_supports_far_kfunc_call(void); bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_ptr_xchg(void); bool bpf_jit_supports_arena(void); bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena); bool bpf_jit_supports_private_stack(void); u64 bpf_arch_uaddress_limit(void); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(const struct cred *cred) { /* Reconstruction of call-sites is dependent on kallsyms, * thus make dump the same restriction. */ return kallsyms_show_value(cred); } struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); static inline bool xdp_return_frame_no_direct(void) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_set_return_frame_no_direct(void) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_clear_return_frame_no_direct(void) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; } static inline int xdp_ok_fwd_dev(const struct net_device *fwd, unsigned int pktlen) { unsigned int len; if (unlikely(!(fwd->flags & IFF_UP))) return -ENETDOWN; len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; if (pktlen > len) return -EMSGSIZE; return 0; } /* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the * same cpu context. Further for best results no more than a single map * for the do_redirect/do_flush pair should be used. This limitation is * because we only track one map and force a flush when the map changes. * This does not appear to be a real limitation for existing software. */ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *prog); int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *prog); int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, struct xdp_frame *xdpf, struct bpf_prog *prog); void xdp_do_flush(void); void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act); #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, struct sock *migrating_sk, u32 hash); #else static inline struct sock * bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, struct sock *migrating_sk, u32 hash) { return NULL; } #endif #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; extern int bpf_jit_kallsyms; extern long bpf_jit_limit; extern long bpf_jit_limit_max; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); void bpf_jit_fill_hole_with_zero(void *area, unsigned int size); struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); u64 bpf_jit_alloc_exec_limit(void); void *bpf_jit_alloc_exec(unsigned long size); void bpf_jit_free_exec(void *addr); void bpf_jit_free(struct bpf_prog *fp); struct bpf_binary_header * bpf_jit_binary_pack_hdr(const struct bpf_prog *fp); void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_prog_pack_free(void *ptr, u32 size); static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) { return list_empty(&fp->aux->ksym.lnode) || fp->aux->ksym.lnode.prev == LIST_POISON2; } struct bpf_binary_header * bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, unsigned int alignment, struct bpf_binary_header **rw_hdr, u8 **rw_image, bpf_jit_fill_hole_t bpf_fill_ill_insns); int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, struct bpf_jit_poke_descriptor *poke); int bpf_jit_get_func_addr(const struct bpf_prog *prog, const struct bpf_insn *insn, bool extra_pass, u64 *func_addr, bool *func_addr_fixed); struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, proglen, pass, image, current->comm, task_pid_nr(current)); if (image) print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } static inline bool bpf_jit_is_ebpf(void) { # ifdef CONFIG_HAVE_EBPF_JIT return true; # else return false; # endif } static inline bool ebpf_jit_enabled(void) { return bpf_jit_enable && bpf_jit_is_ebpf(); } static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) { return fp->jited && bpf_jit_is_ebpf(); } static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) { /* These are the prerequisites, should someone ever have the * idea to call blinding outside of them, we make sure to * bail out. */ if (!bpf_jit_is_ebpf()) return false; if (!prog->jit_requested) return false; if (!bpf_jit_harden) return false; if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF)) return false; return true; } static inline bool bpf_jit_kallsyms_enabled(void) { /* There are a couple of corner cases where kallsyms should * not be enabled f.e. on hardening. */ if (bpf_jit_harden) return false; if (!bpf_jit_kallsyms) return false; if (bpf_jit_kallsyms == 1) return true; return false; } int __bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char *sym); bool is_bpf_text_address(unsigned long addr); int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym); struct bpf_prog *bpf_prog_ksym_find(unsigned long addr); static inline int bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { int ret = __bpf_address_lookup(addr, size, off, sym); if (ret && modname) *modname = NULL; return ret; } void bpf_prog_kallsyms_add(struct bpf_prog *fp); void bpf_prog_kallsyms_del(struct bpf_prog *fp); #else /* CONFIG_BPF_JIT */ static inline bool ebpf_jit_enabled(void) { return false; } static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) { return false; } static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) { return false; } static inline int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, struct bpf_jit_poke_descriptor *poke) { return -ENOTSUPP; } static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); } static inline bool bpf_jit_kallsyms_enabled(void) { return false; } static inline int __bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char *sym) { return 0; } static inline bool is_bpf_text_address(unsigned long addr) { return false; } static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym) { return -ERANGE; } static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) { return NULL; } static inline int bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { return 0; } static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) { } static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) { } #endif /* CONFIG_BPF_JIT */ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); #define BPF_ANC BIT(15) static inline bool bpf_needs_clear_a(const struct sock_filter *first) { switch (first->code) { case BPF_RET | BPF_K: case BPF_LD | BPF_W | BPF_LEN: return false; case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) return true; return false; default: return true; } } static inline u16 bpf_anc_helper(const struct sock_filter *ftest) { BUG_ON(ftest->code & BPF_ANC); switch (ftest->code) { case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ return BPF_ANC | SKF_AD_##CODE switch (ftest->k) { BPF_ANCILLARY(PROTOCOL); BPF_ANCILLARY(PKTTYPE); BPF_ANCILLARY(IFINDEX); BPF_ANCILLARY(NLATTR); BPF_ANCILLARY(NLATTR_NEST); BPF_ANCILLARY(MARK); BPF_ANCILLARY(QUEUE); BPF_ANCILLARY(HATYPE); BPF_ANCILLARY(RXHASH); BPF_ANCILLARY(CPU); BPF_ANCILLARY(ALU_XOR_X); BPF_ANCILLARY(VLAN_TAG); BPF_ANCILLARY(VLAN_TAG_PRESENT); BPF_ANCILLARY(PAY_OFFSET); BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } fallthrough; default: return ftest->code; } } void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size); static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; } struct bpf_sock_addr_kern { struct sock *sk; struct sockaddr *uaddr; /* Temporary "register" to make indirect stores to nested structures * defined above. We need three registers to make such a store, but * only two (src and dst) are available at convert_ctx_access time */ u64 tmp_reg; void *t_ctx; /* Attach type specific context. */ u32 uaddrlen; }; struct bpf_sock_ops_kern { struct sock *sk; union { u32 args[4]; u32 reply; u32 replylong[4]; }; struct sk_buff *syn_skb; struct sk_buff *skb; void *skb_data_end; u8 op; u8 is_fullsock; u8 remaining_opt_len; u64 temp; /* temp and everything after is not * initialized to 0 before calling * the BPF program. New fields that * should be initialized to 0 should * be inserted before temp. * temp is scratch storage used by * sock_ops_convert_ctx_access * as temporary storage of a register. */ }; struct bpf_sysctl_kern { struct ctl_table_header *head; const struct ctl_table *table; void *cur_val; size_t cur_len; void *new_val; size_t new_len; int new_updated; int write; loff_t *ppos; /* Temporary "register" for indirect stores to ppos. */ u64 tmp_reg; }; #define BPF_SOCKOPT_KERN_BUF_SIZE 32 struct bpf_sockopt_buf { u8 data[BPF_SOCKOPT_KERN_BUF_SIZE]; }; struct bpf_sockopt_kern { struct sock *sk; u8 *optval; u8 *optval_end; s32 level; s32 optname; s32 optlen; /* for retval in struct bpf_cg_run_ctx */ struct task_struct *current_task; /* Temporary "register" for indirect stores to ppos. */ u64 tmp_reg; }; int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); struct bpf_sk_lookup_kern { u16 family; u16 protocol; __be16 sport; u16 dport; struct { __be32 saddr; __be32 daddr; } v4; struct { const struct in6_addr *saddr; const struct in6_addr *daddr; } v6; struct sock *selected_sk; u32 ingress_ifindex; bool no_reuseport; }; extern struct static_key_false bpf_sk_lookup_enabled; /* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. * * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and * SK_DROP. Their meaning is as follows: * * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup * SK_DROP : terminate lookup with -ECONNREFUSED * * This macro aggregates return values and selected sockets from * multiple BPF programs according to following rules in order: * * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, * macro result is SK_PASS and last ctx.selected_sk is used. * 2. If any program returned SK_DROP return value, * macro result is SK_DROP. * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. * * Caller must ensure that the prog array is non-NULL, and that the * array as well as the programs it contains remain valid. */ #define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ ({ \ struct bpf_sk_lookup_kern *_ctx = &(ctx); \ struct bpf_prog_array_item *_item; \ struct sock *_selected_sk = NULL; \ bool _no_reuseport = false; \ struct bpf_prog *_prog; \ bool _all_pass = true; \ u32 _ret; \ \ migrate_disable(); \ _item = &(array)->items[0]; \ while ((_prog = READ_ONCE(_item->prog))) { \ /* restore most recent selection */ \ _ctx->selected_sk = _selected_sk; \ _ctx->no_reuseport = _no_reuseport; \ \ _ret = func(_prog, _ctx); \ if (_ret == SK_PASS && _ctx->selected_sk) { \ /* remember last non-NULL socket */ \ _selected_sk = _ctx->selected_sk; \ _no_reuseport = _ctx->no_reuseport; \ } else if (_ret == SK_DROP && _all_pass) { \ _all_pass = false; \ } \ _item++; \ } \ _ctx->selected_sk = _selected_sk; \ _ctx->no_reuseport = _no_reuseport; \ migrate_enable(); \ _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ }) static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 dport, const int ifindex, struct sock **psk) { struct bpf_prog_array *run_array; struct sock *selected_sk = NULL; bool no_reuseport = false; rcu_read_lock(); run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); if (run_array) { struct bpf_sk_lookup_kern ctx = { .family = AF_INET, .protocol = protocol, .v4.saddr = saddr, .v4.daddr = daddr, .sport = sport, .dport = dport, .ingress_ifindex = ifindex, }; u32 act; act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; } else { selected_sk = ERR_PTR(-ECONNREFUSED); } } rcu_read_unlock(); *psk = selected_sk; return no_reuseport; } #if IS_ENABLED(CONFIG_IPV6) static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const u16 dport, const int ifindex, struct sock **psk) { struct bpf_prog_array *run_array; struct sock *selected_sk = NULL; bool no_reuseport = false; rcu_read_lock(); run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); if (run_array) { struct bpf_sk_lookup_kern ctx = { .family = AF_INET6, .protocol = protocol, .v6.saddr = saddr, .v6.daddr = daddr, .sport = sport, .dport = dport, .ingress_ifindex = ifindex, }; u32 act; act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; } else { selected_sk = ERR_PTR(-ECONNREFUSED); } } rcu_read_unlock(); *psk = selected_sk; return no_reuseport; } #endif /* IS_ENABLED(CONFIG_IPV6) */ static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index, u64 flags, const u64 flag_mask, void *lookup_elem(struct bpf_map *map, u32 key)) { struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; /* Lower bits of the flags are used as return code on lookup failure */ if (unlikely(flags & ~(action_mask | flag_mask))) return XDP_ABORTED; ri->tgt_value = lookup_elem(map, index); if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) { /* If the lookup fails we want to clear out the state in the * redirect_info struct completely, so that if an eBPF program * performs multiple lookups, the last one always takes * precedence. */ ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */ ri->map_type = BPF_MAP_TYPE_UNSPEC; return flags & action_mask; } ri->tgt_index = index; ri->map_id = map->id; ri->map_type = map->map_type; if (flags & BPF_F_BROADCAST) { WRITE_ONCE(ri->map, map); ri->flags = flags; } else { WRITE_ONCE(ri->map, NULL); ri->flags = 0; } return XDP_REDIRECT; } #ifdef CONFIG_NET int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len); int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags); int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len); void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, unsigned long len, bool flush); #else /* CONFIG_NET */ static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) { return -EOPNOTSUPP; } static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) { return -EOPNOTSUPP; } static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len) { return -EOPNOTSUPP; } static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len) { return -EOPNOTSUPP; } static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) { return NULL; } static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, unsigned long len, bool flush) { } #endif /* CONFIG_NET */ #endif /* __LINUX_FILTER_H__ */
14 14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 2 2 1 2 3 3 2 1 1 1 1 3 1 1 1 1 1 1 1 1 3 3 3 2 1 3 5 5 4 3 2 2 2 2 2 2 2 4 2 2 2 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 8 8 8 3 1 1 1 8 9 1 9 8 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 9 2 2 2 2 2 1 1 1 1 1 9 19 24 25 23 23 22 22 21 20 20 20 19 13 3 3 2 2 2 2 3 5 5 5 5 1 4 3 3 3 1 1 1 1 1 1 2 5 1 1 2 2 2 1 1 4 4 3 2 1 4 6 6 5 2 2 2 1 1 1 1 1 6 2 2 1 1 1 1 5 5 5 5 2 3 2 1 3 3 3 2 1 1 4 4 3 2 1 4 2 2 1 2 8 8 8 8 8 8 8 8 8 8 8 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 /* * Copyright (c) 2017 Mellanox Technologies. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/pid.h> #include <linux/pid_namespace.h> #include <linux/mutex.h> #include <net/netlink.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_netlink.h> #include "core_priv.h" #include "cma_priv.h" #include "restrack.h" #include "uverbs.h" /* * This determines whether a non-privileged user is allowed to specify a * controlled QKEY or not, when true non-privileged user is allowed to specify * a controlled QKEY. */ static bool privileged_qkey; typedef int (*res_fill_func_t)(struct sk_buff*, bool, struct rdma_restrack_entry*, uint32_t); /* * Sort array elements by the netlink attribute name */ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE }, [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, .len = IB_DEVICE_NAME_MAX }, [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ }, [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ }, [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { .len = sizeof(struct __kernel_sockaddr_storage) }, [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { .len = sizeof(struct __kernel_sockaddr_storage) }, [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_SUBTYPE] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 }, [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 }, [RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DRIVER_DETAILS] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DEV_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING }, [RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 }, }; static int put_driver_name_print_type(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type) { if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) return -EMSGSIZE; if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) return -EMSGSIZE; return 0; } static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type, u32 value) { if (put_driver_name_print_type(msg, name, print_type)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) return -EMSGSIZE; return 0; } static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type, u64 value) { if (put_driver_name_print_type(msg, name, print_type)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; return 0; } int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, const char *str) { if (put_driver_name_print_type(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC)) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) return -EMSGSIZE; return 0; } EXPORT_SYMBOL(rdma_nl_put_driver_string); int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) { return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u32); int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, u32 value) { return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) { return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u64); int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) { return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); bool rdma_nl_get_privileged_qkey(void) { return privileged_qkey || capable(CAP_NET_RAW); } EXPORT_SYMBOL(rdma_nl_get_privileged_qkey); static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, dev_name(&device->dev))) return -EMSGSIZE; return 0; } static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) { char fw[IB_FW_VERSION_NAME_MAX]; int ret = 0; u32 port; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) return -EMSGSIZE; BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, device->attrs.device_cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; ib_get_device_fw_str(device, fw); /* Device without FW has strlen(fw) = 0 */ if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, be64_to_cpu(device->node_guid), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, be64_to_cpu(device->attrs.sys_image_guid), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) return -EMSGSIZE; if (device->type && nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_TYPE, device->type)) return -EMSGSIZE; if (device->parent && nla_put_string(msg, RDMA_NLDEV_ATTR_PARENT_NAME, dev_name(&device->parent->dev))) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE, device->name_assign_type)) return -EMSGSIZE; /* * Link type is determined on first port and mlx4 device * which can potentially have two different link type for the same * IB device is considered as better to be avoided in the future, */ port = rdma_start_port(device); if (rdma_cap_opa_mad(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); else if (rdma_protocol_ib(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); else if (rdma_protocol_iwarp(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); else if (rdma_protocol_roce(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); else if (rdma_protocol_usnic(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "usnic"); return ret; } static int fill_port_info(struct sk_buff *msg, struct ib_device *device, u32 port, const struct net *net) { struct net_device *netdev = NULL; struct ib_port_attr attr; int ret; u64 cap_flags = 0; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) return -EMSGSIZE; ret = ib_query_port(device, port, &attr); if (ret) return ret; if (rdma_protocol_ib(device, port)) { BUILD_BUG_ON((sizeof(attr.port_cap_flags) + sizeof(attr.port_cap_flags2)) > sizeof(u64)); cap_flags = attr.port_cap_flags | ((u64)attr.port_cap_flags2 << 32); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) return -EMSGSIZE; } if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) return -EMSGSIZE; netdev = ib_device_get_netdev(device, port); if (netdev && net_eq(dev_net(netdev), net)) { ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); if (ret) goto out; ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); } out: dev_put(netdev); return ret; } static int fill_res_info_entry(struct sk_buff *msg, const char *name, u64 curr) { struct nlattr *entry_attr; entry_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, RDMA_NLDEV_ATTR_PAD)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_res_info(struct sk_buff *msg, struct ib_device *device, bool show_details) { static const char * const names[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_PD] = "pd", [RDMA_RESTRACK_CQ] = "cq", [RDMA_RESTRACK_QP] = "qp", [RDMA_RESTRACK_CM_ID] = "cm_id", [RDMA_RESTRACK_MR] = "mr", [RDMA_RESTRACK_CTX] = "ctx", [RDMA_RESTRACK_SRQ] = "srq", }; struct nlattr *table_attr; int ret, i, curr; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); if (!table_attr) return -EMSGSIZE; for (i = 0; i < RDMA_RESTRACK_MAX; i++) { if (!names[i]) continue; curr = rdma_restrack_count(device, i, show_details); ret = fill_res_info_entry(msg, names[i], curr); if (ret) goto err; } nla_nest_end(msg, table_attr); return 0; err: nla_nest_cancel(msg, table_attr); return ret; } static int fill_res_name_pid(struct sk_buff *msg, struct rdma_restrack_entry *res) { int err = 0; /* * For user resources, user is should read /proc/PID/comm to get the * name of the task file. */ if (rdma_is_kernel_res(res)) { err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, res->kern_name); } else { pid_t pid; pid = task_pid_vnr(res->task); /* * Task is dead and in zombie state. * There is no need to print PID anymore. */ if (pid) /* * This part is racy, task can be killed and PID will * be zero right here but it is ok, next query won't * return PID. We don't promise real-time reflection * of SW objects. */ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); } return err ? -EMSGSIZE : 0; } static int fill_res_qp_entry_query(struct sk_buff *msg, struct rdma_restrack_entry *res, struct ib_device *dev, struct ib_qp *qp) { struct ib_qp_init_attr qp_init_attr; struct ib_qp_attr qp_attr; int ret; ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); if (ret) return ret; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, qp_attr.dest_qp_num)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, qp_attr.rq_psn)) goto err; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) goto err; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, qp_attr.path_mig_state)) goto err; } if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) goto err; if (dev->ops.fill_res_qp_entry) return dev->ops.fill_res_qp_entry(msg, qp); return 0; err: return -EMSGSIZE; } static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_qp *qp = container_of(res, struct ib_qp, res); struct ib_device *dev = qp->device; int ret; if (port && port != qp->port) return -EAGAIN; /* In create_qp() port is not set yet */ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) return -EMSGSIZE; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); if (ret) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) return -EMSGSIZE; ret = fill_res_name_pid(msg, res); if (ret) return -EMSGSIZE; return fill_res_qp_entry_query(msg, res, dev, qp); } static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_qp *qp = container_of(res, struct ib_qp, res); struct ib_device *dev = qp->device; if (port && port != qp->port) return -EAGAIN; if (!dev->ops.fill_res_qp_entry_raw) return -EINVAL; return dev->ops.fill_res_qp_entry_raw(msg, qp); } static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct rdma_id_private *id_priv = container_of(res, struct rdma_id_private, res); struct ib_device *dev = id_priv->id.device; struct rdma_cm_id *cm_id = &id_priv->id; if (port && port != cm_id->port_num) return -EAGAIN; if (cm_id->port_num && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) goto err; if (id_priv->qp_num) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) goto err; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) goto err; if (cm_id->route.addr.src_addr.ss_family && nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, sizeof(cm_id->route.addr.src_addr), &cm_id->route.addr.src_addr)) goto err; if (cm_id->route.addr.dst_addr.ss_family && nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, sizeof(cm_id->route.addr.dst_addr), &cm_id->route.addr.dst_addr)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) goto err; if (fill_res_name_pid(msg, res)) goto err; if (dev->ops.fill_res_cm_id_entry) return dev->ops.fill_res_cm_id_entry(msg, cm_id); return 0; err: return -EMSGSIZE; } static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_cq *cq = container_of(res, struct ib_cq, res); struct ib_device *dev = cq->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; /* Poll context is only valid for kernel CQs */ if (rdma_is_kernel_res(res) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, cq->uobject->uevent.uobject.context->res.id)) return -EMSGSIZE; if (fill_res_name_pid(msg, res)) return -EMSGSIZE; return (dev->ops.fill_res_cq_entry) ? dev->ops.fill_res_cq_entry(msg, cq) : 0; } static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_cq *cq = container_of(res, struct ib_cq, res); struct ib_device *dev = cq->device; if (!dev->ops.fill_res_cq_entry_raw) return -EINVAL; return dev->ops.fill_res_cq_entry_raw(msg, cq); } static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) return -EMSGSIZE; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) return -EMSGSIZE; if (fill_res_name_pid(msg, res)) return -EMSGSIZE; return (dev->ops.fill_res_mr_entry) ? dev->ops.fill_res_mr_entry(msg, mr) : 0; } static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (!dev->ops.fill_res_mr_entry_raw) return -EINVAL; return dev->ops.fill_res_mr_entry_raw(msg, mr); } static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_pd *pd = container_of(res, struct ib_pd, res); if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, pd->local_dma_lkey)) goto err; if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, pd->unsafe_global_rkey)) goto err; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) goto err; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, pd->uobject->context->res.id)) goto err; return fill_res_name_pid(msg, res); err: return -EMSGSIZE; } static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res); if (rdma_is_kernel_res(res)) return 0; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) return -EMSGSIZE; return fill_res_name_pid(msg, res); } static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, uint32_t max_range) { struct nlattr *entry_attr; if (!min_range) return 0; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); if (!entry_attr) return -EMSGSIZE; if (min_range == max_range) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) goto err; } else { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) goto err; } nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) { uint32_t min_range = 0, prev = 0; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct nlattr *table_attr; struct ib_qp *qp = NULL; unsigned long id = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); if (!table_attr) return -EMSGSIZE; rt = &srq->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { if (!rdma_restrack_get(res)) continue; qp = container_of(res, struct ib_qp, res); if (!qp->srq || (qp->srq->res.id != srq->res.id)) { rdma_restrack_put(res); continue; } if (qp->qp_num < prev) /* qp_num should be ascending */ goto err_loop; if (min_range == 0) { min_range = qp->qp_num; } else if (qp->qp_num > (prev + 1)) { if (fill_res_range_qp_entry(msg, min_range, prev)) goto err_loop; min_range = qp->qp_num; } prev = qp->qp_num; rdma_restrack_put(res); } xa_unlock(&rt->xa); if (fill_res_range_qp_entry(msg, min_range, prev)) goto err; nla_nest_end(msg, table_attr); return 0; err_loop: rdma_restrack_put(res); xa_unlock(&rt->xa); err: nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_srq *srq = container_of(res, struct ib_srq, res); struct ib_device *dev = srq->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) goto err; if (ib_srq_has_cq(srq->srq_type)) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, srq->ext.cq->res.id)) goto err; } if (fill_res_srq_qps(msg, srq)) goto err; if (fill_res_name_pid(msg, res)) goto err; if (dev->ops.fill_res_srq_entry) return dev->ops.fill_res_srq_entry(msg, srq); return 0; err: return -EMSGSIZE; } static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_srq *srq = container_of(res, struct ib_srq, res); struct ib_device *dev = srq->device; if (!dev->ops.fill_res_srq_entry_raw) return -EINVAL; return dev->ops.fill_res_srq_entry_raw(msg, srq); } static int fill_stat_counter_mode(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_counter_mode *m = &counter->mode; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) return -EMSGSIZE; if (m->mode == RDMA_COUNTER_MODE_AUTO) { if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) return -EMSGSIZE; if ((m->mask & RDMA_COUNTER_MASK_PID) && fill_res_name_pid(msg, &counter->res)) return -EMSGSIZE; } return 0; } static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) { struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_stat_counter_qps(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct nlattr *table_attr; struct ib_qp *qp = NULL; unsigned long id = 0; int ret = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); if (!table_attr) return -EMSGSIZE; rt = &counter->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { qp = container_of(res, struct ib_qp, res); if (!qp->counter || (qp->counter->id != counter->id)) continue; ret = fill_stat_counter_qp_entry(msg, qp->qp_num); if (ret) goto err; } xa_unlock(&rt->xa); nla_nest_end(msg, table_attr); return 0; err: xa_unlock(&rt->xa); nla_nest_cancel(msg, table_attr); return ret; } int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, u64 value) { struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, name)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, value, RDMA_NLDEV_ATTR_PAD)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry); static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) goto err; if (dev->ops.fill_stat_mr_entry) return dev->ops.fill_stat_mr_entry(msg, mr); return 0; err: return -EMSGSIZE; } static int fill_stat_counter_hwcounters(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_hw_stats *st = counter->stats; struct nlattr *table_attr; int i; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table_attr) return -EMSGSIZE; mutex_lock(&st->lock); for (i = 0; i < st->num_counters; i++) { if (test_bit(i, st->is_disabled)) continue; if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, st->value[i])) goto err; } mutex_unlock(&st->lock); nla_nest_end(msg, table_attr); return 0; err: mutex_unlock(&st->lock); nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct rdma_counter *counter = container_of(res, struct rdma_counter, res); if (port && port != counter->port) return -EAGAIN; /* Dump it even query failed */ rdma_counter_query_stats(counter); if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || fill_stat_counter_mode(msg, counter) || fill_stat_counter_qps(msg, counter) || fill_stat_counter_hwcounters(msg, counter)) return -EMSGSIZE; return 0; } static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; int err; err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); if (!nlh) { err = -EMSGSIZE; goto err_free; } err = fill_dev_info(msg, device); if (err) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return err; } static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; u32 index; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { char name[IB_DEVICE_NAME_MAX] = {}; nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], IB_DEVICE_NAME_MAX); if (strlen(name) == 0) { err = -EINVAL; goto done; } err = ib_device_rename(device, name); goto done; } if (tb[RDMA_NLDEV_NET_NS_FD]) { u32 ns_fd; ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); err = ib_device_set_netns_put(skb, device, ns_fd); goto put_done; } if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) { u8 use_dim; use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]); err = ib_device_set_dim(device, use_dim); goto done; } done: ib_device_put(device); put_done: return err; } static int _nldev_get_dumpit(struct ib_device *device, struct sk_buff *skb, struct netlink_callback *cb, unsigned int idx) { int start = cb->args[0]; struct nlmsghdr *nlh; if (idx < start) return 0; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, NLM_F_MULTI); if (!nlh || fill_dev_info(skb, device)) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); idx++; out: cb->args[0] = idx; return skb->len; } static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { /* * There is no need to take lock, because * we are relying on ib_core's locking. */ return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); } static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; u32 port; int err; err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { err = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); if (!nlh) { err = -EMSGSIZE; goto err_free; } err = fill_port_info(msg, device, port, sock_net(skb->sk)); if (err) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return err; } static int nldev_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; int start = cb->args[0]; struct nlmsghdr *nlh; u32 idx = 0; u32 ifindex; int err; unsigned int p; err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, NULL); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), ifindex); if (!device) return -EINVAL; rdma_for_each_port (device, p) { /* * The dumpit function returns all information from specific * index. This specific index is taken from the netlink * messages request sent by user and it is available * in cb->args[0]. * * Usually, the user doesn't fill this field and it causes * to return everything. * */ if (idx < start) { idx++; continue; } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET), 0, NLM_F_MULTI); if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) { nlmsg_cancel(skb, nlh); goto out; } idx++; nlmsg_end(skb, nlh); } out: ib_device_put(device); cb->args[0] = idx; return skb->len; } static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; bool show_details = false; struct ib_device *device; struct sk_buff *msg; u32 index; int ret; ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]) show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_free; } ret = fill_res_info(msg, device, show_details); if (ret) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int _nldev_res_get_dumpit(struct ib_device *device, struct sk_buff *skb, struct netlink_callback *cb, unsigned int idx) { int start = cb->args[0]; struct nlmsghdr *nlh; if (idx < start) return 0; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, NLM_F_MULTI); if (!nlh || fill_res_info(skb, device, false)) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); idx++; out: cb->args[0] = idx; return skb->len; } static int nldev_res_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); } struct nldev_fill_res_entry { enum rdma_nldev_attr nldev_attr; u8 flags; u32 entry; u32 id; }; enum nldev_res_flags { NLDEV_PER_DEV = 1 << 0, }; static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, .id = RDMA_NLDEV_ATTR_RES_LQPN, }, [RDMA_RESTRACK_CM_ID] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CM_IDN, }, [RDMA_RESTRACK_CQ] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CQN, }, [RDMA_RESTRACK_MR] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, .id = RDMA_NLDEV_ATTR_RES_MRN, }, [RDMA_RESTRACK_PD] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, .id = RDMA_NLDEV_ATTR_RES_PDN, }, [RDMA_RESTRACK_COUNTER] = { .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, }, [RDMA_RESTRACK_CTX] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CTXN, }, [RDMA_RESTRACK_SRQ] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_SRQN, }, }; static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, enum rdma_restrack_type res_type, res_fill_func_t fill_func) { const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct rdma_restrack_entry *res; struct ib_device *device; u32 index, id, port = 0; bool has_cap_net_admin; struct sk_buff *msg; int ret; ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } } if ((port && fe->flags & NLDEV_PER_DEV) || (!port && ~fe->flags & NLDEV_PER_DEV)) { ret = -EINVAL; goto err; } id = nla_get_u32(tb[fe->id]); res = rdma_restrack_get_byid(device, res_type, id); if (IS_ERR(res)) { ret = PTR_ERR(res); goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err_get; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NL_GET_OP(nlh->nlmsg_type)), 0, 0); if (!nlh || fill_nldev_handle(msg, device)) { ret = -EMSGSIZE; goto err_free; } has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); ret = fill_func(msg, has_cap_net_admin, res, port); if (ret) goto err_free; rdma_restrack_put(res); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err_get: rdma_restrack_put(res); err: ib_device_put(device); return ret; } static int res_get_common_dumpit(struct sk_buff *skb, struct netlink_callback *cb, enum rdma_restrack_type res_type, res_fill_func_t fill_func) { const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; int err, ret = 0, idx = 0; bool show_details = false; struct nlattr *table_attr; struct nlattr *entry_attr; struct ib_device *device; int start = cb->args[0]; bool has_cap_net_admin; struct nlmsghdr *nlh; unsigned long id; u32 index, port = 0; bool filled = false; err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, NULL); /* * Right now, we are expecting the device index to get res information, * but it is possible to extend this code to return all devices in * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. * if it doesn't exist, we will iterate over all devices. * * But it is not needed for now. */ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]) show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]); /* * If no PORT_INDEX is supplied, we will return all QPs from that device */ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err_index; } } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 0, NLM_F_MULTI); if (!nlh || fill_nldev_handle(skb, device)) { ret = -EMSGSIZE; goto err; } table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); if (!table_attr) { ret = -EMSGSIZE; goto err; } has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN); rt = &device->res[res_type]; xa_lock(&rt->xa); /* * FIXME: if the skip ahead is something common this loop should * use xas_for_each & xas_pause to optimize, we can have a lot of * objects. */ xa_for_each(&rt->xa, id, res) { if (xa_get_mark(&rt->xa, res->id, RESTRACK_DD) && !show_details) goto next; if (idx < start || !rdma_restrack_get(res)) goto next; xa_unlock(&rt->xa); filled = true; entry_attr = nla_nest_start_noflag(skb, fe->entry); if (!entry_attr) { ret = -EMSGSIZE; rdma_restrack_put(res); goto msg_full; } ret = fill_func(skb, has_cap_net_admin, res, port); rdma_restrack_put(res); if (ret) { nla_nest_cancel(skb, entry_attr); if (ret == -EMSGSIZE) goto msg_full; if (ret == -EAGAIN) goto again; goto res_err; } nla_nest_end(skb, entry_attr); again: xa_lock(&rt->xa); next: idx++; } xa_unlock(&rt->xa); msg_full: nla_nest_end(skb, table_attr); nlmsg_end(skb, nlh); cb->args[0] = idx; /* * No more entries to fill, cancel the message and * return 0 to mark end of dumpit. */ if (!filled) goto err; ib_device_put(device); return skb->len; res_err: nla_nest_cancel(skb, table_attr); err: nlmsg_cancel(skb, nlh); err_index: ib_device_put(device); return ret; } #define RES_GET_FUNCS(name, type) \ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ struct netlink_callback *cb) \ { \ return res_get_common_dumpit(skb, cb, type, \ fill_res_##name##_entry); \ } \ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ struct nlmsghdr *nlh, \ struct netlink_ext_ack *extack) \ { \ return res_get_common_doit(skb, nlh, extack, type, \ fill_res_##name##_entry); \ } RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX); RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ); RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ); static LIST_HEAD(link_ops); static DECLARE_RWSEM(link_ops_rwsem); static const struct rdma_link_ops *link_ops_get(const char *type) { const struct rdma_link_ops *ops; list_for_each_entry(ops, &link_ops, list) { if (!strcmp(ops->type, type)) goto out; } ops = NULL; out: return ops; } void rdma_link_register(struct rdma_link_ops *ops) { down_write(&link_ops_rwsem); if (WARN_ON_ONCE(link_ops_get(ops->type))) goto out; list_add(&ops->list, &link_ops); out: up_write(&link_ops_rwsem); } EXPORT_SYMBOL(rdma_link_register); void rdma_link_unregister(struct rdma_link_ops *ops) { down_write(&link_ops_rwsem); list_del(&ops->list); up_write(&link_ops_rwsem); } EXPORT_SYMBOL(rdma_link_unregister); static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; char ibdev_name[IB_DEVICE_NAME_MAX]; const struct rdma_link_ops *ops; char ndev_name[IFNAMSIZ]; struct net_device *ndev; char type[IFNAMSIZ]; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) return -EINVAL; nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(ibdev_name)); if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) return -EINVAL; nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME], sizeof(ndev_name)); ndev = dev_get_by_name(sock_net(skb->sk), ndev_name); if (!ndev) return -ENODEV; down_read(&link_ops_rwsem); ops = link_ops_get(type); #ifdef CONFIG_MODULES if (!ops) { up_read(&link_ops_rwsem); request_module("rdma-link-%s", type); down_read(&link_ops_rwsem); ops = link_ops_get(type); } #endif err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL; up_read(&link_ops_rwsem); dev_put(ndev); return err; } static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; u32 index; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) { ib_device_put(device); return -EINVAL; } ib_unregister_device_and_put(device); return 0; } static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE]; struct ib_client_nl_info data = {}; struct ib_device *ibdev = NULL; struct sk_buff *msg; u32 index; int err; err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE]) return -EINVAL; nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE], sizeof(client_name)); if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) { index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); ibdev = ib_device_get_by_index(sock_net(skb->sk), index); if (!ibdev) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(ibdev, data.port)) { err = -EINVAL; goto out_put; } } else { data.port = -1; } } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { return -EINVAL; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out_put; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET_CHARDEV), 0, 0); if (!nlh) { err = -EMSGSIZE; goto out_nlmsg; } data.nl_msg = msg; err = ib_get_client_nl_info(ibdev, client_name, &data); if (err) goto out_nlmsg; err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, huge_encode_dev(data.cdev->devt), RDMA_NLDEV_ATTR_PAD); if (err) goto out_data; err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, RDMA_NLDEV_ATTR_PAD); if (err) goto out_data; if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, dev_name(data.cdev))) { err = -EMSGSIZE; goto out_data; } nlmsg_end(msg, nlh); put_device(data.cdev); if (ibdev) ib_device_put(ibdev); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); out_data: put_device(data.cdev); out_nlmsg: nlmsg_free(msg); out_put: if (ibdev) ib_device_put(ibdev); return err; } static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct sk_buff *msg; int err; err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_SYS_GET), 0, 0); if (!nlh) { nlmsg_free(msg); return -EMSGSIZE; } err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, (u8)ib_devices_shared_netns); if (err) { nlmsg_free(msg); return err; } err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, (u8)privileged_qkey); if (err) { nlmsg_free(msg); return err; } err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, 1); if (err) { nlmsg_free(msg); return err; } /* * Copy-on-fork is supported. * See commits: * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes") * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") * for more details. Don't backport this without them. * * Return value ignored on purpose, assume copy-on-fork is not * supported in case of failure. */ nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); nlmsg_end(msg, nlh); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); } static int nldev_set_sys_set_netns_doit(struct nlattr *tb[]) { u8 enable; int err; enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); /* Only 0 and 1 are supported */ if (enable > 1) return -EINVAL; err = rdma_compatdev_set(enable); return err; } static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[]) { u8 enable; enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]); /* Only 0 and 1 are supported */ if (enable > 1) return -EINVAL; privileged_qkey = enable; return 0; } static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err) return -EINVAL; if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) return nldev_set_sys_set_netns_doit(tb); if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]) return nldev_set_sys_set_pqkey_doit(tb); return -EINVAL; } static int nldev_stat_set_mode_doit(struct sk_buff *msg, struct netlink_ext_ack *extack, struct nlattr *tb[], struct ib_device *device, u32 port) { u32 mode, mask = 0, qpn, cntn = 0; int ret; /* Currently only counter for QP is supported */ if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); if (mode == RDMA_COUNTER_MODE_AUTO) { if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) mask = nla_get_u32( tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); return rdma_counter_set_auto_mode(device, port, mask, extack); } if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) return -EINVAL; qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); ret = rdma_counter_bind_qpn(device, port, qpn, cntn); if (ret) return ret; } else { ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn); if (ret) return ret; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { ret = -EMSGSIZE; goto err_fill; } return 0; err_fill: rdma_counter_unbind_qpn(device, port, qpn, cntn); return ret; } static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[], struct ib_device *device, u32 port) { struct rdma_hw_stats *stats; struct nlattr *entry_attr; unsigned long *target; int rem, i, ret = 0; u32 index; stats = ib_get_hw_stats_port(device, port); if (!stats) return -EINVAL; target = kcalloc(BITS_TO_LONGS(stats->num_counters), sizeof(*stats->is_disabled), GFP_KERNEL); if (!target) return -ENOMEM; nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS], rem) { index = nla_get_u32(entry_attr); if ((index >= stats->num_counters) || !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) { ret = -EINVAL; goto out; } set_bit(index, target); } for (i = 0; i < stats->num_counters; i++) { if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL)) continue; ret = rdma_counter_modify(device, port, i, test_bit(i, target)); if (ret) goto out; } out: kfree(target); return ret; } static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index, port; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err_put_device; } if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] && !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { ret = -EINVAL; goto err_put_device; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err_put_device; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_free_msg; } if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) { ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); if (ret) goto err_free_msg; } if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { ret = nldev_stat_set_counter_dynamic_doit(tb, device, port); if (ret) goto err_free_msg; } nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free_msg: nlmsg_free(msg); err_put_device: ib_device_put(device); return ret; } static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index, port, qpn, cntn; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] || !tb[RDMA_NLDEV_ATTR_RES_LQPN]) return -EINVAL; if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_fill; } cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { ret = -EMSGSIZE; goto err_fill; } ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); if (ret) goto err_fill; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_fill: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct nlattr *tb[]) { struct rdma_hw_stats *stats; struct nlattr *table_attr; struct ib_device *device; int ret, num_cnts, i; struct sk_buff *msg; u32 index, port; u64 v; if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) { ret = -EINVAL; goto err; } port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); stats = ib_get_hw_stats_port(device, port); if (!stats) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET), 0, 0); if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_msg; } mutex_lock(&stats->lock); num_cnts = device->ops.get_hw_stats(device, stats, port, 0); if (num_cnts < 0) { ret = -EINVAL; goto err_stats; } table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table_attr) { ret = -EMSGSIZE; goto err_stats; } for (i = 0; i < num_cnts; i++) { if (test_bit(i, stats->is_disabled)) continue; v = stats->value[i] + rdma_counter_get_hwstat_value(device, port, i); if (rdma_nl_stat_hwcounter_entry(msg, stats->descs[i].name, v)) { ret = -EMSGSIZE; goto err_table; } } nla_nest_end(msg, table_attr); mutex_unlock(&stats->lock); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_table: nla_nest_cancel(msg, table_attr); err_stats: mutex_unlock(&stats->lock); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct nlattr *tb[]) { static enum rdma_nl_counter_mode mode; static enum rdma_nl_counter_mask mask; struct ib_device *device; struct sk_buff *msg; u32 index, port; int ret; if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) return nldev_res_get_counter_doit(skb, nlh, extack); if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_msg; } ret = rdma_counter_get_mode(device, port, &mode, &mask); if (ret) goto err_msg; if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { ret = -EMSGSIZE; goto err_msg; } if ((mode == RDMA_COUNTER_MODE_AUTO) && nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { ret = -EMSGSIZE; goto err_msg; } nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret) return -EINVAL; if (!tb[RDMA_NLDEV_ATTR_STAT_RES]) return stat_get_doit_default_counter(skb, nlh, extack, tb); switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { case RDMA_NLDEV_ATTR_RES_QP: ret = stat_get_doit_qp(skb, nlh, extack, tb); break; case RDMA_NLDEV_ATTR_RES_MR: ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR, fill_stat_mr_entry); break; default: ret = -EINVAL; break; } return ret; } static int nldev_stat_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; ret = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, NULL); if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES]) return -EINVAL; switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { case RDMA_NLDEV_ATTR_RES_QP: ret = nldev_res_get_counter_dumpit(skb, cb); break; case RDMA_NLDEV_ATTR_RES_MR: ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR, fill_stat_mr_entry); break; default: ret = -EINVAL; break; } return ret; } static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry; struct rdma_hw_stats *stats; struct ib_device *device; struct sk_buff *msg; u32 devid, port; int ret, i; ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), devid); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } stats = ib_get_hw_stats_port(device, port); if (!stats) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put( msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS), 0, 0); ret = -EMSGSIZE; if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) goto err_msg; table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table) goto err_msg; mutex_lock(&stats->lock); for (i = 0; i < stats->num_counters; i++) { entry = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); if (!entry) goto err_msg_table; if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, stats->descs[i].name) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) goto err_msg_entry; if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) && (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, !test_bit(i, stats->is_disabled)))) goto err_msg_entry; nla_nest_end(msg, entry); } mutex_unlock(&stats->lock); nla_nest_end(msg, table); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_msg_entry: nla_nest_cancel(msg, entry); err_msg_table: mutex_unlock(&stats->lock); nla_nest_cancel(msg, table); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int nldev_newdev(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; enum rdma_nl_dev_type type; struct ib_device *parent; char name[IFNAMSIZ] = {}; u32 parentid; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_DEV_TYPE]) return -EINVAL; nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(name)); type = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_TYPE]); parentid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); parent = ib_device_get_by_index(sock_net(skb->sk), parentid); if (!parent) return -EINVAL; ret = ib_add_sub_device(parent, type, name); ib_device_put(parent); return ret; } static int nldev_deldev(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; u32 devid; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), devid); if (!device) return -EINVAL; return ib_del_sub_device_and_put(device); } static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, .dump = nldev_get_dumpit, }, [RDMA_NLDEV_CMD_GET_CHARDEV] = { .doit = nldev_get_chardev, }, [RDMA_NLDEV_CMD_SET] = { .doit = nldev_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_NEWLINK] = { .doit = nldev_newlink, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_DELLINK] = { .doit = nldev_dellink, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_PORT_GET] = { .doit = nldev_port_get_doit, .dump = nldev_port_get_dumpit, }, [RDMA_NLDEV_CMD_RES_GET] = { .doit = nldev_res_get_doit, .dump = nldev_res_get_dumpit, }, [RDMA_NLDEV_CMD_RES_QP_GET] = { .doit = nldev_res_get_qp_doit, .dump = nldev_res_get_qp_dumpit, }, [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { .doit = nldev_res_get_cm_id_doit, .dump = nldev_res_get_cm_id_dumpit, }, [RDMA_NLDEV_CMD_RES_CQ_GET] = { .doit = nldev_res_get_cq_doit, .dump = nldev_res_get_cq_dumpit, }, [RDMA_NLDEV_CMD_RES_MR_GET] = { .doit = nldev_res_get_mr_doit, .dump = nldev_res_get_mr_dumpit, }, [RDMA_NLDEV_CMD_RES_PD_GET] = { .doit = nldev_res_get_pd_doit, .dump = nldev_res_get_pd_dumpit, }, [RDMA_NLDEV_CMD_RES_CTX_GET] = { .doit = nldev_res_get_ctx_doit, .dump = nldev_res_get_ctx_dumpit, }, [RDMA_NLDEV_CMD_RES_SRQ_GET] = { .doit = nldev_res_get_srq_doit, .dump = nldev_res_get_srq_dumpit, }, [RDMA_NLDEV_CMD_SYS_GET] = { .doit = nldev_sys_get_doit, }, [RDMA_NLDEV_CMD_SYS_SET] = { .doit = nldev_set_sys_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_SET] = { .doit = nldev_stat_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_GET] = { .doit = nldev_stat_get_doit, .dump = nldev_stat_get_dumpit, }, [RDMA_NLDEV_CMD_STAT_DEL] = { .doit = nldev_stat_del_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { .doit = nldev_res_get_qp_raw_doit, .dump = nldev_res_get_qp_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { .doit = nldev_res_get_cq_raw_doit, .dump = nldev_res_get_cq_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { .doit = nldev_res_get_mr_raw_doit, .dump = nldev_res_get_mr_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = { .doit = nldev_res_get_srq_raw_doit, .dump = nldev_res_get_srq_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_GET_STATUS] = { .doit = nldev_stat_get_counter_status_doit, }, [RDMA_NLDEV_CMD_NEWDEV] = { .doit = nldev_newdev, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_DELDEV] = { .doit = nldev_deldev, .flags = RDMA_NL_ADMIN_PERM, }, }; static int fill_mon_netdev_rename(struct sk_buff *msg, struct ib_device *device, u32 port, const struct net *net) { struct net_device *netdev = ib_device_get_netdev(device, port); int ret = 0; if (!netdev || !net_eq(dev_net(netdev), net)) goto out; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); if (ret) goto out; ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); out: dev_put(netdev); return ret; } static int fill_mon_netdev_association(struct sk_buff *msg, struct ib_device *device, u32 port, const struct net *net) { struct net_device *netdev = ib_device_get_netdev(device, port); int ret = 0; if (netdev && !net_eq(dev_net(netdev), net)) goto out; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index); if (ret) goto out; ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, dev_name(&device->dev)); if (ret) goto out; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port); if (ret) goto out; if (netdev) { ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); if (ret) goto out; ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); } out: dev_put(netdev); return ret; } static void rdma_nl_notify_err_msg(struct ib_device *device, u32 port_num, enum rdma_nl_notify_event_type type) { struct net_device *netdev; switch (type) { case RDMA_REGISTER_EVENT: dev_warn_ratelimited(&device->dev, "Failed to send RDMA monitor register device event\n"); break; case RDMA_UNREGISTER_EVENT: dev_warn_ratelimited(&device->dev, "Failed to send RDMA monitor unregister device event\n"); break; case RDMA_NETDEV_ATTACH_EVENT: netdev = ib_device_get_netdev(device, port_num); dev_warn_ratelimited(&device->dev, "Failed to send RDMA monitor netdev attach event: port %d netdev %d\n", port_num, netdev->ifindex); dev_put(netdev); break; case RDMA_NETDEV_DETACH_EVENT: dev_warn_ratelimited(&device->dev, "Failed to send RDMA monitor netdev detach event: port %d\n", port_num); break; case RDMA_RENAME_EVENT: dev_warn_ratelimited(&device->dev, "Failed to send RDMA monitor rename device event\n"); break; case RDMA_NETDEV_RENAME_EVENT: netdev = ib_device_get_netdev(device, port_num); dev_warn_ratelimited(&device->dev, "Failed to send RDMA monitor netdev rename event: port %d netdev %d\n", port_num, netdev->ifindex); dev_put(netdev); break; default: break; } } int rdma_nl_notify_event(struct ib_device *device, u32 port_num, enum rdma_nl_notify_event_type type) { struct sk_buff *skb; struct net *net; int ret = 0; void *nlh; net = read_pnet(&device->coredev.rdma_net); if (!net) return -EINVAL; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; nlh = nlmsg_put(skb, 0, 0, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR), 0, 0); if (!nlh) goto err_free; switch (type) { case RDMA_REGISTER_EVENT: case RDMA_UNREGISTER_EVENT: case RDMA_RENAME_EVENT: ret = fill_nldev_handle(skb, device); if (ret) goto err_free; break; case RDMA_NETDEV_ATTACH_EVENT: case RDMA_NETDEV_DETACH_EVENT: ret = fill_mon_netdev_association(skb, device, port_num, net); if (ret) goto err_free; break; case RDMA_NETDEV_RENAME_EVENT: ret = fill_mon_netdev_rename(skb, device, port_num, net); if (ret) goto err_free; break; default: break; } ret = nla_put_u8(skb, RDMA_NLDEV_ATTR_EVENT_TYPE, type); if (ret) goto err_free; nlmsg_end(skb, nlh); ret = rdma_nl_multicast(net, skb, RDMA_NL_GROUP_NOTIFY, GFP_KERNEL); if (ret && ret != -ESRCH) { skb = NULL; /* skb is freed in the netlink send-op handling */ goto err_free; } return 0; err_free: rdma_nl_notify_err_msg(device, port_num, type); nlmsg_free(skb); return ret; } void __init nldev_init(void) { rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); } void nldev_exit(void) { rdma_nl_unregister(RDMA_NL_NLDEV); } MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 // SPDX-License-Identifier: GPL-2.0-only /*************************************************************************** * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> * * * * Based on Logitech G13 driver (v0.4) * * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> * * * ***************************************************************************/ #include <linux/hid.h> #include <linux/hid-debug.h> #include <linux/input.h> #include "hid-ids.h" #include <linux/fb.h> #include <linux/vmalloc.h> #include <linux/backlight.h> #include <linux/lcd.h> #include <linux/leds.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/completion.h> #include <linux/uaccess.h> #include <linux/module.h> #include <media/rc-core.h> #include "hid-picolcd.h" int picolcd_raw_cir(struct picolcd_data *data, struct hid_report *report, u8 *raw_data, int size) { unsigned long flags; int i, w, sz; struct ir_raw_event rawir = {}; /* ignore if rc_dev is NULL or status is shunned */ spin_lock_irqsave(&data->lock, flags); if (!data->rc_dev || (data->status & PICOLCD_CIR_SHUN)) { spin_unlock_irqrestore(&data->lock, flags); return 1; } spin_unlock_irqrestore(&data->lock, flags); /* PicoLCD USB packets contain 16-bit intervals in network order, * with value negated for pulse. Intervals are in microseconds. * * Note: some userspace LIRC code for PicoLCD says negated values * for space - is it a matter of IR chip? (pulse for my TSOP2236) * * In addition, the first interval seems to be around 15000 + base * interval for non-first report of IR data - thus the quirk below * to get RC_CODE to understand Sony and JVC remotes I have at hand */ sz = size > 0 ? min((int)raw_data[0], size-1) : 0; for (i = 0; i+1 < sz; i += 2) { w = (raw_data[i] << 8) | (raw_data[i+1]); rawir.pulse = !!(w & 0x8000); rawir.duration = rawir.pulse ? (65536 - w) : w; /* Quirk!! - see above */ if (i == 0 && rawir.duration > 15000) rawir.duration -= 15000; ir_raw_event_store(data->rc_dev, &rawir); } ir_raw_event_handle(data->rc_dev); return 1; } static int picolcd_cir_open(struct rc_dev *dev) { struct picolcd_data *data = dev->priv; unsigned long flags; spin_lock_irqsave(&data->lock, flags); data->status &= ~PICOLCD_CIR_SHUN; spin_unlock_irqrestore(&data->lock, flags); return 0; } static void picolcd_cir_close(struct rc_dev *dev) { struct picolcd_data *data = dev->priv; unsigned long flags; spin_lock_irqsave(&data->lock, flags); data->status |= PICOLCD_CIR_SHUN; spin_unlock_irqrestore(&data->lock, flags); } /* initialize CIR input device */ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report) { struct rc_dev *rdev; int ret = 0; rdev = rc_allocate_device(RC_DRIVER_IR_RAW); if (!rdev) return -ENOMEM; rdev->priv = data; rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rdev->open = picolcd_cir_open; rdev->close = picolcd_cir_close; rdev->device_name = data->hdev->name; rdev->input_phys = data->hdev->phys; rdev->input_id.bustype = data->hdev->bus; rdev->input_id.vendor = data->hdev->vendor; rdev->input_id.product = data->hdev->product; rdev->input_id.version = data->hdev->version; rdev->dev.parent = &data->hdev->dev; rdev->driver_name = PICOLCD_NAME; rdev->map_name = RC_MAP_RC6_MCE; rdev->timeout = MS_TO_US(100); rdev->rx_resolution = 1; ret = rc_register_device(rdev); if (ret) goto err; data->rc_dev = rdev; return 0; err: rc_free_device(rdev); return ret; } void picolcd_exit_cir(struct picolcd_data *data) { struct rc_dev *rdev = data->rc_dev; data->rc_dev = NULL; rc_unregister_device(rdev); }
8 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 /* * net/tipc/eth_media.c: Ethernet bearer support for TIPC * * Copyright (c) 2001-2007, 2013-2014, Ericsson AB * Copyright (c) 2005-2008, 2011-2013, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "bearer.h" /* Convert Ethernet address (media address format) to string */ static int tipc_eth_addr2str(struct tipc_media_addr *addr, char *strbuf, int bufsz) { if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ return 1; sprintf(strbuf, "%pM", addr->value); return 0; } /* Convert from media address format to discovery message addr format */ static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) { memset(msg, 0, TIPC_MEDIA_INFO_SIZE); msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN); return 0; } /* Convert raw mac address format to media addr format */ static int tipc_eth_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, const char *msg) { memset(addr, 0, sizeof(*addr)); ether_addr_copy(addr->value, msg); addr->media_id = TIPC_MEDIA_TYPE_ETH; addr->broadcast = is_broadcast_ether_addr(addr->value); return 0; } /* Convert discovery msg addr format to Ethernet media addr format */ static int tipc_eth_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) { /* Skip past preamble: */ msg += TIPC_MEDIA_ADDR_OFFSET; return tipc_eth_raw2addr(b, addr, msg); } /* Ethernet media registration info */ struct tipc_media eth_media_info = { .send_msg = tipc_l2_send_msg, .enable_media = tipc_enable_l2_media, .disable_media = tipc_disable_l2_media, .addr2str = tipc_eth_addr2str, .addr2msg = tipc_eth_addr2msg, .msg2addr = tipc_eth_msg2addr, .raw2addr = tipc_eth_raw2addr, .priority = TIPC_DEF_LINK_PRI, .tolerance = TIPC_DEF_LINK_TOL, .min_win = TIPC_DEF_LINK_WIN, .max_win = TIPC_MAX_LINK_WIN, .type_id = TIPC_MEDIA_TYPE_ETH, .hwaddr_len = ETH_ALEN, .name = "eth" };
3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon */ #include <linux/module.h> #include "../../dccp.h" #include "tfrc.h" #define TFRC_CALC_X_ARRSIZE 500 #define TFRC_CALC_X_SPLIT 50000 /* 0.05 * 1000000, details below */ #define TFRC_SMALLEST_P (TFRC_CALC_X_SPLIT/TFRC_CALC_X_ARRSIZE) /* TFRC TCP Reno Throughput Equation Lookup Table for f(p) The following two-column lookup table implements a part of the TCP throughput equation from [RFC 3448, sec. 3.1]: s X_calc = -------------------------------------------------------------- R * sqrt(2*b*p/3) + (3 * t_RTO * sqrt(3*b*p/8) * (p + 32*p^3)) Where: X is the transmit rate in bytes/second s is the packet size in bytes R is the round trip time in seconds p is the loss event rate, between 0 and 1.0, of the number of loss events as a fraction of the number of packets transmitted t_RTO is the TCP retransmission timeout value in seconds b is the number of packets acknowledged by a single TCP ACK We can assume that b = 1 and t_RTO is 4 * R. The equation now becomes: s X_calc = ------------------------------------------------------- R * sqrt(p*2/3) + (12 * R * sqrt(p*3/8) * (p + 32*p^3)) which we can break down into: s X_calc = --------- R * f(p) where f(p) is given for 0 < p <= 1 by: f(p) = sqrt(2*p/3) + 12 * sqrt(3*p/8) * (p + 32*p^3) Since this is kernel code, floating-point arithmetic is avoided in favour of integer arithmetic. This means that nearly all fractional parameters are scaled by 1000000: * the parameters p and R * the return result f(p) The lookup table therefore actually tabulates the following function g(q): g(q) = 1000000 * f(q/1000000) Hence, when p <= 1, q must be less than or equal to 1000000. To achieve finer granularity for the practically more relevant case of small values of p (up to 5%), the second column is used; the first one ranges up to 100%. This split corresponds to the value of q = TFRC_CALC_X_SPLIT. At the same time this also determines the smallest resolution possible with this lookup table: TFRC_SMALLEST_P = TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE The entire table is generated by: for(i=0; i < TFRC_CALC_X_ARRSIZE; i++) { lookup[i][0] = g((i+1) * 1000000/TFRC_CALC_X_ARRSIZE); lookup[i][1] = g((i+1) * TFRC_CALC_X_SPLIT/TFRC_CALC_X_ARRSIZE); } With the given configuration, we have, with M = TFRC_CALC_X_ARRSIZE-1, lookup[0][0] = g(1000000/(M+1)) = 1000000 * f(0.2%) lookup[M][0] = g(1000000) = 1000000 * f(100%) lookup[0][1] = g(TFRC_SMALLEST_P) = 1000000 * f(0.01%) lookup[M][1] = g(TFRC_CALC_X_SPLIT) = 1000000 * f(5%) In summary, the two columns represent f(p) for the following ranges: * The first column is for 0.002 <= p <= 1.0 * The second column is for 0.0001 <= p <= 0.05 Where the columns overlap, the second (finer-grained) is given preference, i.e. the first column is used only for p >= 0.05. */ static const u32 tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE][2] = { { 37172, 8172 }, { 53499, 11567 }, { 66664, 14180 }, { 78298, 16388 }, { 89021, 18339 }, { 99147, 20108 }, { 108858, 21738 }, { 118273, 23260 }, { 127474, 24693 }, { 136520, 26052 }, { 145456, 27348 }, { 154316, 28589 }, { 163130, 29783 }, { 171919, 30935 }, { 180704, 32049 }, { 189502, 33130 }, { 198328, 34180 }, { 207194, 35202 }, { 216114, 36198 }, { 225097, 37172 }, { 234153, 38123 }, { 243294, 39055 }, { 252527, 39968 }, { 261861, 40864 }, { 271305, 41743 }, { 280866, 42607 }, { 290553, 43457 }, { 300372, 44293 }, { 310333, 45117 }, { 320441, 45929 }, { 330705, 46729 }, { 341131, 47518 }, { 351728, 48297 }, { 362501, 49066 }, { 373460, 49826 }, { 384609, 50577 }, { 395958, 51320 }, { 407513, 52054 }, { 419281, 52780 }, { 431270, 53499 }, { 443487, 54211 }, { 455940, 54916 }, { 468635, 55614 }, { 481581, 56306 }, { 494785, 56991 }, { 508254, 57671 }, { 521996, 58345 }, { 536019, 59014 }, { 550331, 59677 }, { 564939, 60335 }, { 579851, 60988 }, { 595075, 61636 }, { 610619, 62279 }, { 626491, 62918 }, { 642700, 63553 }, { 659253, 64183 }, { 676158, 64809 }, { 693424, 65431 }, { 711060, 66050 }, { 729073, 66664 }, { 747472, 67275 }, { 766266, 67882 }, { 785464, 68486 }, { 805073, 69087 }, { 825103, 69684 }, { 845562, 70278 }, { 866460, 70868 }, { 887805, 71456 }, { 909606, 72041 }, { 931873, 72623 }, { 954614, 73202 }, { 977839, 73778 }, { 1001557, 74352 }, { 1025777, 74923 }, { 1050508, 75492 }, { 1075761, 76058 }, { 1101544, 76621 }, { 1127867, 77183 }, { 1154739, 77741 }, { 1182172, 78298 }, { 1210173, 78852 }, { 1238753, 79405 }, { 1267922, 79955 }, { 1297689, 80503 }, { 1328066, 81049 }, { 1359060, 81593 }, { 1390684, 82135 }, { 1422947, 82675 }, { 1455859, 83213 }, { 1489430, 83750 }, { 1523671, 84284 }, { 1558593, 84817 }, { 1594205, 85348 }, { 1630518, 85878 }, { 1667543, 86406 }, { 1705290, 86932 }, { 1743770, 87457 }, { 1782994, 87980 }, { 1822973, 88501 }, { 1863717, 89021 }, { 1905237, 89540 }, { 1947545, 90057 }, { 1990650, 90573 }, { 2034566, 91087 }, { 2079301, 91600 }, { 2124869, 92111 }, { 2171279, 92622 }, { 2218543, 93131 }, { 2266673, 93639 }, { 2315680, 94145 }, { 2365575, 94650 }, { 2416371, 95154 }, { 2468077, 95657 }, { 2520707, 96159 }, { 2574271, 96660 }, { 2628782, 97159 }, { 2684250, 97658 }, { 2740689, 98155 }, { 2798110, 98651 }, { 2856524, 99147 }, { 2915944, 99641 }, { 2976382, 100134 }, { 3037850, 100626 }, { 3100360, 101117 }, { 3163924, 101608 }, { 3228554, 102097 }, { 3294263, 102586 }, { 3361063, 103073 }, { 3428966, 103560 }, { 3497984, 104045 }, { 3568131, 104530 }, { 3639419, 105014 }, { 3711860, 105498 }, { 3785467, 105980 }, { 3860253, 106462 }, { 3936229, 106942 }, { 4013410, 107422 }, { 4091808, 107902 }, { 4171435, 108380 }, { 4252306, 108858 }, { 4334431, 109335 }, { 4417825, 109811 }, { 4502501, 110287 }, { 4588472, 110762 }, { 4675750, 111236 }, { 4764349, 111709 }, { 4854283, 112182 }, { 4945564, 112654 }, { 5038206, 113126 }, { 5132223, 113597 }, { 5227627, 114067 }, { 5324432, 114537 }, { 5422652, 115006 }, { 5522299, 115474 }, { 5623389, 115942 }, { 5725934, 116409 }, { 5829948, 116876 }, { 5935446, 117342 }, { 6042439, 117808 }, { 6150943, 118273 }, { 6260972, 118738 }, { 6372538, 119202 }, { 6485657, 119665 }, { 6600342, 120128 }, { 6716607, 120591 }, { 6834467, 121053 }, { 6953935, 121514 }, { 7075025, 121976 }, { 7197752, 122436 }, { 7322131, 122896 }, { 7448175, 123356 }, { 7575898, 123815 }, { 7705316, 124274 }, { 7836442, 124733 }, { 7969291, 125191 }, { 8103877, 125648 }, { 8240216, 126105 }, { 8378321, 126562 }, { 8518208, 127018 }, { 8659890, 127474 }, { 8803384, 127930 }, { 8948702, 128385 }, { 9095861, 128840 }, { 9244875, 129294 }, { 9395760, 129748 }, { 9548529, 130202 }, { 9703198, 130655 }, { 9859782, 131108 }, { 10018296, 131561 }, { 10178755, 132014 }, { 10341174, 132466 }, { 10505569, 132917 }, { 10671954, 133369 }, { 10840345, 133820 }, { 11010757, 134271 }, { 11183206, 134721 }, { 11357706, 135171 }, { 11534274, 135621 }, { 11712924, 136071 }, { 11893673, 136520 }, { 12076536, 136969 }, { 12261527, 137418 }, { 12448664, 137867 }, { 12637961, 138315 }, { 12829435, 138763 }, { 13023101, 139211 }, { 13218974, 139658 }, { 13417071, 140106 }, { 13617407, 140553 }, { 13819999, 140999 }, { 14024862, 141446 }, { 14232012, 141892 }, { 14441465, 142339 }, { 14653238, 142785 }, { 14867346, 143230 }, { 15083805, 143676 }, { 15302632, 144121 }, { 15523842, 144566 }, { 15747453, 145011 }, { 15973479, 145456 }, { 16201939, 145900 }, { 16432847, 146345 }, { 16666221, 146789 }, { 16902076, 147233 }, { 17140429, 147677 }, { 17381297, 148121 }, { 17624696, 148564 }, { 17870643, 149007 }, { 18119154, 149451 }, { 18370247, 149894 }, { 18623936, 150336 }, { 18880241, 150779 }, { 19139176, 151222 }, { 19400759, 151664 }, { 19665007, 152107 }, { 19931936, 152549 }, { 20201564, 152991 }, { 20473907, 153433 }, { 20748982, 153875 }, { 21026807, 154316 }, { 21307399, 154758 }, { 21590773, 155199 }, { 21876949, 155641 }, { 22165941, 156082 }, { 22457769, 156523 }, { 22752449, 156964 }, { 23049999, 157405 }, { 23350435, 157846 }, { 23653774, 158287 }, { 23960036, 158727 }, { 24269236, 159168 }, { 24581392, 159608 }, { 24896521, 160049 }, { 25214642, 160489 }, { 25535772, 160929 }, { 25859927, 161370 }, { 26187127, 161810 }, { 26517388, 162250 }, { 26850728, 162690 }, { 27187165, 163130 }, { 27526716, 163569 }, { 27869400, 164009 }, { 28215234, 164449 }, { 28564236, 164889 }, { 28916423, 165328 }, { 29271815, 165768 }, { 29630428, 166208 }, { 29992281, 166647 }, { 30357392, 167087 }, { 30725779, 167526 }, { 31097459, 167965 }, { 31472452, 168405 }, { 31850774, 168844 }, { 32232445, 169283 }, { 32617482, 169723 }, { 33005904, 170162 }, { 33397730, 170601 }, { 33792976, 171041 }, { 34191663, 171480 }, { 34593807, 171919 }, { 34999428, 172358 }, { 35408544, 172797 }, { 35821174, 173237 }, { 36237335, 173676 }, { 36657047, 174115 }, { 37080329, 174554 }, { 37507197, 174993 }, { 37937673, 175433 }, { 38371773, 175872 }, { 38809517, 176311 }, { 39250924, 176750 }, { 39696012, 177190 }, { 40144800, 177629 }, { 40597308, 178068 }, { 41053553, 178507 }, { 41513554, 178947 }, { 41977332, 179386 }, { 42444904, 179825 }, { 42916290, 180265 }, { 43391509, 180704 }, { 43870579, 181144 }, { 44353520, 181583 }, { 44840352, 182023 }, { 45331092, 182462 }, { 45825761, 182902 }, { 46324378, 183342 }, { 46826961, 183781 }, { 47333531, 184221 }, { 47844106, 184661 }, { 48358706, 185101 }, { 48877350, 185541 }, { 49400058, 185981 }, { 49926849, 186421 }, { 50457743, 186861 }, { 50992759, 187301 }, { 51531916, 187741 }, { 52075235, 188181 }, { 52622735, 188622 }, { 53174435, 189062 }, { 53730355, 189502 }, { 54290515, 189943 }, { 54854935, 190383 }, { 55423634, 190824 }, { 55996633, 191265 }, { 56573950, 191706 }, { 57155606, 192146 }, { 57741621, 192587 }, { 58332014, 193028 }, { 58926806, 193470 }, { 59526017, 193911 }, { 60129666, 194352 }, { 60737774, 194793 }, { 61350361, 195235 }, { 61967446, 195677 }, { 62589050, 196118 }, { 63215194, 196560 }, { 63845897, 197002 }, { 64481179, 197444 }, { 65121061, 197886 }, { 65765563, 198328 }, { 66414705, 198770 }, { 67068508, 199213 }, { 67726992, 199655 }, { 68390177, 200098 }, { 69058085, 200540 }, { 69730735, 200983 }, { 70408147, 201426 }, { 71090343, 201869 }, { 71777343, 202312 }, { 72469168, 202755 }, { 73165837, 203199 }, { 73867373, 203642 }, { 74573795, 204086 }, { 75285124, 204529 }, { 76001380, 204973 }, { 76722586, 205417 }, { 77448761, 205861 }, { 78179926, 206306 }, { 78916102, 206750 }, { 79657310, 207194 }, { 80403571, 207639 }, { 81154906, 208084 }, { 81911335, 208529 }, { 82672880, 208974 }, { 83439562, 209419 }, { 84211402, 209864 }, { 84988421, 210309 }, { 85770640, 210755 }, { 86558080, 211201 }, { 87350762, 211647 }, { 88148708, 212093 }, { 88951938, 212539 }, { 89760475, 212985 }, { 90574339, 213432 }, { 91393551, 213878 }, { 92218133, 214325 }, { 93048107, 214772 }, { 93883493, 215219 }, { 94724314, 215666 }, { 95570590, 216114 }, { 96422343, 216561 }, { 97279594, 217009 }, { 98142366, 217457 }, { 99010679, 217905 }, { 99884556, 218353 }, { 100764018, 218801 }, { 101649086, 219250 }, { 102539782, 219698 }, { 103436128, 220147 }, { 104338146, 220596 }, { 105245857, 221046 }, { 106159284, 221495 }, { 107078448, 221945 }, { 108003370, 222394 }, { 108934074, 222844 }, { 109870580, 223294 }, { 110812910, 223745 }, { 111761087, 224195 }, { 112715133, 224646 }, { 113675069, 225097 }, { 114640918, 225548 }, { 115612702, 225999 }, { 116590442, 226450 }, { 117574162, 226902 }, { 118563882, 227353 }, { 119559626, 227805 }, { 120561415, 228258 }, { 121569272, 228710 }, { 122583219, 229162 }, { 123603278, 229615 }, { 124629471, 230068 }, { 125661822, 230521 }, { 126700352, 230974 }, { 127745083, 231428 }, { 128796039, 231882 }, { 129853241, 232336 }, { 130916713, 232790 }, { 131986475, 233244 }, { 133062553, 233699 }, { 134144966, 234153 }, { 135233739, 234608 }, { 136328894, 235064 }, { 137430453, 235519 }, { 138538440, 235975 }, { 139652876, 236430 }, { 140773786, 236886 }, { 141901190, 237343 }, { 143035113, 237799 }, { 144175576, 238256 }, { 145322604, 238713 }, { 146476218, 239170 }, { 147636442, 239627 }, { 148803298, 240085 }, { 149976809, 240542 }, { 151156999, 241000 }, { 152343890, 241459 }, { 153537506, 241917 }, { 154737869, 242376 }, { 155945002, 242835 }, { 157158929, 243294 }, { 158379673, 243753 }, { 159607257, 244213 }, { 160841704, 244673 }, { 162083037, 245133 }, { 163331279, 245593 }, { 164586455, 246054 }, { 165848586, 246514 }, { 167117696, 246975 }, { 168393810, 247437 }, { 169676949, 247898 }, { 170967138, 248360 }, { 172264399, 248822 }, { 173568757, 249284 }, { 174880235, 249747 }, { 176198856, 250209 }, { 177524643, 250672 }, { 178857621, 251136 }, { 180197813, 251599 }, { 181545242, 252063 }, { 182899933, 252527 }, { 184261908, 252991 }, { 185631191, 253456 }, { 187007807, 253920 }, { 188391778, 254385 }, { 189783129, 254851 }, { 191181884, 255316 }, { 192588065, 255782 }, { 194001698, 256248 }, { 195422805, 256714 }, { 196851411, 257181 }, { 198287540, 257648 }, { 199731215, 258115 }, { 201182461, 258582 }, { 202641302, 259050 }, { 204107760, 259518 }, { 205581862, 259986 }, { 207063630, 260454 }, { 208553088, 260923 }, { 210050262, 261392 }, { 211555174, 261861 }, { 213067849, 262331 }, { 214588312, 262800 }, { 216116586, 263270 }, { 217652696, 263741 }, { 219196666, 264211 }, { 220748520, 264682 }, { 222308282, 265153 }, { 223875978, 265625 }, { 225451630, 266097 }, { 227035265, 266569 }, { 228626905, 267041 }, { 230226576, 267514 }, { 231834302, 267986 }, { 233450107, 268460 }, { 235074016, 268933 }, { 236706054, 269407 }, { 238346244, 269881 }, { 239994613, 270355 }, { 241651183, 270830 }, { 243315981, 271305 } }; /* return largest index i such that fval <= lookup[i][small] */ static inline u32 tfrc_binsearch(u32 fval, u8 small) { u32 try, low = 0, high = TFRC_CALC_X_ARRSIZE - 1; while (low < high) { try = (low + high) / 2; if (fval <= tfrc_calc_x_lookup[try][small]) high = try; else low = try + 1; } return high; } /** * tfrc_calc_x - Calculate the send rate as per section 3.1 of RFC3448 * @s: packet size in bytes * @R: RTT scaled by 1000000 (i.e., microseconds) * @p: loss ratio estimate scaled by 1000000 * * Returns X_calc in bytes per second (not scaled). */ u32 tfrc_calc_x(u16 s, u32 R, u32 p) { u16 index; u32 f; u64 result; /* check against invalid parameters and divide-by-zero */ BUG_ON(p > 1000000); /* p must not exceed 100% */ BUG_ON(p == 0); /* f(0) = 0, divide by zero */ if (R == 0) { /* possible divide by zero */ DCCP_CRIT("WARNING: RTT is 0, returning maximum X_calc."); return ~0U; } if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */ if (p < TFRC_SMALLEST_P) { /* 0.0000 < p < 0.0001 */ DCCP_WARN("Value of p (%d) below resolution. " "Substituting %d\n", p, TFRC_SMALLEST_P); index = 0; } else /* 0.0001 <= p <= 0.05 */ index = p/TFRC_SMALLEST_P - 1; f = tfrc_calc_x_lookup[index][1]; } else { /* 0.05 < p <= 1.00 */ index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1; f = tfrc_calc_x_lookup[index][0]; } /* * Compute X = s/(R*f(p)) in bytes per second. * Since f(p) and R are both scaled by 1000000, we need to multiply by * 1000000^2. To avoid overflow, the result is computed in two stages. * This works under almost all reasonable operational conditions, for a * wide range of parameters. Yet, should some strange combination of * parameters result in overflow, the use of scaled_div32 will catch * this and return UINT_MAX - which is a logically adequate consequence. */ result = scaled_div(s, R); return scaled_div32(result, f); } /** * tfrc_calc_x_reverse_lookup - try to find p given f(p) * @fvalue: function value to match, scaled by 1000000 * * Returns closest match for p, also scaled by 1000000 */ u32 tfrc_calc_x_reverse_lookup(u32 fvalue) { int index; if (fvalue == 0) /* f(p) = 0 whenever p = 0 */ return 0; /* Error cases. */ if (fvalue < tfrc_calc_x_lookup[0][1]) { DCCP_WARN("fvalue %u smaller than resolution\n", fvalue); return TFRC_SMALLEST_P; } if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0]) { DCCP_WARN("fvalue %u exceeds bounds!\n", fvalue); return 1000000; } if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1]) { index = tfrc_binsearch(fvalue, 1); return (index + 1) * TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE; } /* else ... it must be in the coarse-grained column */ index = tfrc_binsearch(fvalue, 0); return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; } /** * tfrc_invert_loss_event_rate - Compute p so that 10^6 corresponds to 100% * @loss_event_rate: loss event rate to invert * When @loss_event_rate is large, there is a chance that p is truncated to 0. * To avoid re-entering slow-start in that case, we set p = TFRC_SMALLEST_P > 0. */ u32 tfrc_invert_loss_event_rate(u32 loss_event_rate) { if (loss_event_rate == UINT_MAX) /* see RFC 4342, 8.5 */ return 0; if (unlikely(loss_event_rate == 0)) /* map 1/0 into 100% */ return 1000000; return max_t(u32, scaled_div(1, loss_event_rate), TFRC_SMALLEST_P); }
13 13 13 13 13 11 4 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 27 27 12 11 10 10 10 10 10 12 8 7 7 8 9 11 2 1 3 1 2 1 2 8 8 8 6 2 6 6 10 8 1 7 4 6 10 4 1 4 3 2 28 27 28 27 27 27 9 1 8 5 9 2 3 2 2 21 19 15 15 15 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 // SPDX-License-Identifier: GPL-2.0-or-later /* * User-space I/O driver support for HID subsystem * Copyright (c) 2012 David Herrmann */ /* */ #include <linux/atomic.h> #include <linux/compat.h> #include <linux/cred.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/uhid.h> #include <linux/wait.h> #define UHID_NAME "uhid" #define UHID_BUFSIZE 32 struct uhid_device { struct mutex devlock; /* This flag tracks whether the HID device is usable for commands from * userspace. The flag is already set before hid_add_device(), which * runs in workqueue context, to allow hid_add_device() to communicate * with userspace. * However, if hid_add_device() fails, the flag is cleared without * holding devlock. * We guarantee that if @running changes from true to false while you're * holding @devlock, it's still fine to access @hid. */ bool running; __u8 *rd_data; uint rd_size; /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */ struct hid_device *hid; struct uhid_event input_buf; wait_queue_head_t waitq; spinlock_t qlock; __u8 head; __u8 tail; struct uhid_event *outq[UHID_BUFSIZE]; /* blocking GET_REPORT support; state changes protected by qlock */ struct mutex report_lock; wait_queue_head_t report_wait; bool report_running; u32 report_id; u32 report_type; struct uhid_event report_buf; struct work_struct worker; }; static struct miscdevice uhid_misc; static void uhid_device_add_worker(struct work_struct *work) { struct uhid_device *uhid = container_of(work, struct uhid_device, worker); int ret; ret = hid_add_device(uhid->hid); if (ret) { hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); /* We used to call hid_destroy_device() here, but that's really * messy to get right because we have to coordinate with * concurrent writes from userspace that might be in the middle * of using uhid->hid. * Just leave uhid->hid as-is for now, and clean it up when * userspace tries to close or reinitialize the uhid instance. * * However, we do have to clear the ->running flag and do a * wakeup to make sure userspace knows that the device is gone. */ WRITE_ONCE(uhid->running, false); wake_up_interruptible(&uhid->report_wait); } } static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) { __u8 newhead; newhead = (uhid->head + 1) % UHID_BUFSIZE; if (newhead != uhid->tail) { uhid->outq[uhid->head] = ev; uhid->head = newhead; wake_up_interruptible(&uhid->waitq); } else { hid_warn(uhid->hid, "Output queue is full\n"); kfree(ev); } } static int uhid_queue_event(struct uhid_device *uhid, __u32 event) { unsigned long flags; struct uhid_event *ev; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = event; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static int uhid_hid_start(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; struct uhid_event *ev; unsigned long flags; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_START; if (hid->report_enum[HID_FEATURE_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS; if (hid->report_enum[HID_OUTPUT_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS; if (hid->report_enum[HID_INPUT_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static void uhid_hid_stop(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; hid->claimed = 0; uhid_queue_event(uhid, UHID_STOP); } static int uhid_hid_open(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return uhid_queue_event(uhid, UHID_OPEN); } static void uhid_hid_close(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; uhid_queue_event(uhid, UHID_CLOSE); } static int uhid_hid_parse(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); } /* must be called with report_lock held */ static int __uhid_report_queue_and_wait(struct uhid_device *uhid, struct uhid_event *ev, __u32 *report_id) { unsigned long flags; int ret; spin_lock_irqsave(&uhid->qlock, flags); *report_id = ++uhid->report_id; uhid->report_type = ev->type + 1; uhid->report_running = true; uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); ret = wait_event_interruptible_timeout(uhid->report_wait, !uhid->report_running || !READ_ONCE(uhid->running), 5 * HZ); if (!ret || !READ_ONCE(uhid->running) || uhid->report_running) ret = -EIO; else if (ret < 0) ret = -ERESTARTSYS; else ret = 0; uhid->report_running = false; return ret; } static void uhid_report_wake_up(struct uhid_device *uhid, u32 id, const struct uhid_event *ev) { unsigned long flags; spin_lock_irqsave(&uhid->qlock, flags); /* id for old report; drop it silently */ if (uhid->report_type != ev->type || uhid->report_id != id) goto unlock; if (!uhid->report_running) goto unlock; memcpy(&uhid->report_buf, ev, sizeof(*ev)); uhid->report_running = false; wake_up_interruptible(&uhid->report_wait); unlock: spin_unlock_irqrestore(&uhid->qlock, flags); } static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum, u8 *buf, size_t count, u8 rtype) { struct uhid_device *uhid = hid->driver_data; struct uhid_get_report_reply_req *req; struct uhid_event *ev; int ret; if (!READ_ONCE(uhid->running)) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_GET_REPORT; ev->u.get_report.rnum = rnum; ev->u.get_report.rtype = rtype; ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) { kfree(ev); return ret; } /* this _always_ takes ownership of @ev */ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id); if (ret) goto unlock; req = &uhid->report_buf.u.get_report_reply; if (req->err) { ret = -EIO; } else { ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX); memcpy(buf, req->data, ret); } unlock: mutex_unlock(&uhid->report_lock); return ret; } static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum, const u8 *buf, size_t count, u8 rtype) { struct uhid_device *uhid = hid->driver_data; struct uhid_event *ev; int ret; if (!READ_ONCE(uhid->running) || count > UHID_DATA_MAX) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_SET_REPORT; ev->u.set_report.rnum = rnum; ev->u.set_report.rtype = rtype; ev->u.set_report.size = count; memcpy(ev->u.set_report.data, buf, count); ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) { kfree(ev); return ret; } /* this _always_ takes ownership of @ev */ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id); if (ret) goto unlock; if (uhid->report_buf.u.set_report_reply.err) ret = -EIO; else ret = count; unlock: mutex_unlock(&uhid->report_lock); return ret; } static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { u8 u_rtype; switch (rtype) { case HID_FEATURE_REPORT: u_rtype = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: u_rtype = UHID_OUTPUT_REPORT; break; case HID_INPUT_REPORT: u_rtype = UHID_INPUT_REPORT; break; default: return -EINVAL; } switch (reqtype) { case HID_REQ_GET_REPORT: return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype); case HID_REQ_SET_REPORT: return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype); default: return -EIO; } } static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { struct uhid_device *uhid = hid->driver_data; __u8 rtype; unsigned long flags; struct uhid_event *ev; switch (report_type) { case HID_FEATURE_REPORT: rtype = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: rtype = UHID_OUTPUT_REPORT; break; default: return -EINVAL; } if (count < 1 || count > UHID_DATA_MAX) return -EINVAL; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_OUTPUT; ev->u.output.size = count; ev->u.output.rtype = rtype; memcpy(ev->u.output.data, buf, count); spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return count; } static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf, size_t count) { return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT); } static const struct hid_ll_driver uhid_hid_driver = { .start = uhid_hid_start, .stop = uhid_hid_stop, .open = uhid_hid_open, .close = uhid_hid_close, .parse = uhid_hid_parse, .raw_request = uhid_hid_raw_request, .output_report = uhid_hid_output_report, .max_buffer_size = UHID_DATA_MAX, }; #ifdef CONFIG_COMPAT /* Apparently we haven't stepped on these rakes enough times yet. */ struct uhid_create_req_compat { __u8 name[128]; __u8 phys[64]; __u8 uniq[64]; compat_uptr_t rd_data; __u16 rd_size; __u16 bus; __u32 vendor; __u32 product; __u32 version; __u32 country; } __attribute__((__packed__)); static int uhid_event_from_user(const char __user *buffer, size_t len, struct uhid_event *event) { if (in_compat_syscall()) { u32 type; if (get_user(type, buffer)) return -EFAULT; if (type == UHID_CREATE) { /* * This is our messed up request with compat pointer. * It is largish (more than 256 bytes) so we better * allocate it from the heap. */ struct uhid_create_req_compat *compat; compat = kzalloc(sizeof(*compat), GFP_KERNEL); if (!compat) return -ENOMEM; buffer += sizeof(type); len -= sizeof(type); if (copy_from_user(compat, buffer, min(len, sizeof(*compat)))) { kfree(compat); return -EFAULT; } /* Shuffle the data over to proper structure */ event->type = type; memcpy(event->u.create.name, compat->name, sizeof(compat->name)); memcpy(event->u.create.phys, compat->phys, sizeof(compat->phys)); memcpy(event->u.create.uniq, compat->uniq, sizeof(compat->uniq)); event->u.create.rd_data = compat_ptr(compat->rd_data); event->u.create.rd_size = compat->rd_size; event->u.create.bus = compat->bus; event->u.create.vendor = compat->vendor; event->u.create.product = compat->product; event->u.create.version = compat->version; event->u.create.country = compat->country; kfree(compat); return 0; } /* All others can be copied directly */ } if (copy_from_user(event, buffer, min(len, sizeof(*event)))) return -EFAULT; return 0; } #else static int uhid_event_from_user(const char __user *buffer, size_t len, struct uhid_event *event) { if (copy_from_user(event, buffer, min(len, sizeof(*event)))) return -EFAULT; return 0; } #endif static int uhid_dev_create2(struct uhid_device *uhid, const struct uhid_event *ev) { struct hid_device *hid; size_t rd_size; void *rd_data; int ret; if (uhid->hid) return -EALREADY; rd_size = ev->u.create2.rd_size; if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE) return -EINVAL; rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL); if (!rd_data) return -ENOMEM; uhid->rd_size = rd_size; uhid->rd_data = rd_data; hid = hid_allocate_device(); if (IS_ERR(hid)) { ret = PTR_ERR(hid); goto err_free; } BUILD_BUG_ON(sizeof(hid->name) != sizeof(ev->u.create2.name)); strscpy(hid->name, ev->u.create2.name, sizeof(hid->name)); BUILD_BUG_ON(sizeof(hid->phys) != sizeof(ev->u.create2.phys)); strscpy(hid->phys, ev->u.create2.phys, sizeof(hid->phys)); BUILD_BUG_ON(sizeof(hid->uniq) != sizeof(ev->u.create2.uniq)); strscpy(hid->uniq, ev->u.create2.uniq, sizeof(hid->uniq)); hid->ll_driver = &uhid_hid_driver; hid->bus = ev->u.create2.bus; hid->vendor = ev->u.create2.vendor; hid->product = ev->u.create2.product; hid->version = ev->u.create2.version; hid->country = ev->u.create2.country; hid->driver_data = uhid; hid->dev.parent = uhid_misc.this_device; uhid->hid = hid; uhid->running = true; /* Adding of a HID device is done through a worker, to allow HID drivers * which use feature requests during .probe to work, without they would * be blocked on devlock, which is held by uhid_char_write. */ schedule_work(&uhid->worker); return 0; err_free: kfree(uhid->rd_data); uhid->rd_data = NULL; uhid->rd_size = 0; return ret; } static int uhid_dev_create(struct uhid_device *uhid, struct uhid_event *ev) { struct uhid_create_req orig; orig = ev->u.create; if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE) return -EINVAL; if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size)) return -EFAULT; memcpy(ev->u.create2.name, orig.name, sizeof(orig.name)); memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys)); memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq)); ev->u.create2.rd_size = orig.rd_size; ev->u.create2.bus = orig.bus; ev->u.create2.vendor = orig.vendor; ev->u.create2.product = orig.product; ev->u.create2.version = orig.version; ev->u.create2.country = orig.country; return uhid_dev_create2(uhid, ev); } static int uhid_dev_destroy(struct uhid_device *uhid) { if (!uhid->hid) return -EINVAL; WRITE_ONCE(uhid->running, false); wake_up_interruptible(&uhid->report_wait); cancel_work_sync(&uhid->worker); hid_destroy_device(uhid->hid); uhid->hid = NULL; kfree(uhid->rd_data); return 0; } static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data, min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0); return 0; } static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data, min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0); return 0; } static int uhid_dev_get_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev); return 0; } static int uhid_dev_set_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev); return 0; } static int uhid_char_open(struct inode *inode, struct file *file) { struct uhid_device *uhid; uhid = kzalloc(sizeof(*uhid), GFP_KERNEL); if (!uhid) return -ENOMEM; mutex_init(&uhid->devlock); mutex_init(&uhid->report_lock); spin_lock_init(&uhid->qlock); init_waitqueue_head(&uhid->waitq); init_waitqueue_head(&uhid->report_wait); uhid->running = false; INIT_WORK(&uhid->worker, uhid_device_add_worker); file->private_data = uhid; stream_open(inode, file); return 0; } static int uhid_char_release(struct inode *inode, struct file *file) { struct uhid_device *uhid = file->private_data; unsigned int i; uhid_dev_destroy(uhid); for (i = 0; i < UHID_BUFSIZE; ++i) kfree(uhid->outq[i]); kfree(uhid); return 0; } static ssize_t uhid_char_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; unsigned long flags; size_t len; /* they need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; try_again: if (file->f_flags & O_NONBLOCK) { if (uhid->head == uhid->tail) return -EAGAIN; } else { ret = wait_event_interruptible(uhid->waitq, uhid->head != uhid->tail); if (ret) return ret; } ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; if (uhid->head == uhid->tail) { mutex_unlock(&uhid->devlock); goto try_again; } else { len = min(count, sizeof(**uhid->outq)); if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) { ret = -EFAULT; } else { kfree(uhid->outq[uhid->tail]); uhid->outq[uhid->tail] = NULL; spin_lock_irqsave(&uhid->qlock, flags); uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE; spin_unlock_irqrestore(&uhid->qlock, flags); } } mutex_unlock(&uhid->devlock); return ret ? ret : len; } static ssize_t uhid_char_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; size_t len; /* we need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; memset(&uhid->input_buf, 0, sizeof(uhid->input_buf)); len = min(count, sizeof(uhid->input_buf)); ret = uhid_event_from_user(buffer, len, &uhid->input_buf); if (ret) goto unlock; switch (uhid->input_buf.type) { case UHID_CREATE: /* * 'struct uhid_create_req' contains a __user pointer which is * copied from, so it's unsafe to allow this with elevated * privileges (e.g. from a setuid binary) or via kernel_write(). */ if (file->f_cred != current_cred()) { pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n", task_tgid_vnr(current), current->comm); ret = -EACCES; goto unlock; } ret = uhid_dev_create(uhid, &uhid->input_buf); break; case UHID_CREATE2: ret = uhid_dev_create2(uhid, &uhid->input_buf); break; case UHID_DESTROY: ret = uhid_dev_destroy(uhid); break; case UHID_INPUT: ret = uhid_dev_input(uhid, &uhid->input_buf); break; case UHID_INPUT2: ret = uhid_dev_input2(uhid, &uhid->input_buf); break; case UHID_GET_REPORT_REPLY: ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf); break; case UHID_SET_REPORT_REPLY: ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf); break; default: ret = -EOPNOTSUPP; } unlock: mutex_unlock(&uhid->devlock); /* return "count" not "len" to not confuse the caller */ return ret ? ret : count; } static __poll_t uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */ poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static const struct file_operations uhid_fops = { .owner = THIS_MODULE, .open = uhid_char_open, .release = uhid_char_release, .read = uhid_char_read, .write = uhid_char_write, .poll = uhid_char_poll, }; static struct miscdevice uhid_misc = { .fops = &uhid_fops, .minor = UHID_MINOR, .name = UHID_NAME, }; module_misc_device(uhid_misc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); MODULE_ALIAS_MISCDEV(UHID_MINOR); MODULE_ALIAS("devname:" UHID_NAME);
5 11 11 11 11 11 5 5 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 // SPDX-License-Identifier: GPL-2.0-or-later /* * IP Payload Compression Protocol (IPComp) - RFC3173. * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au> * * Todo: * - Tunable compression parameters. * - Compression stats. * - Adaptive compression. */ #include <linux/crypto.h> #include <linux/err.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/vmalloc.h> #include <net/ip.h> #include <net/ipcomp.h> #include <net/xfrm.h> struct ipcomp_tfms { struct list_head list; struct crypto_comp * __percpu *tfms; int users; }; static DEFINE_MUTEX(ipcomp_resource_mutex); static void * __percpu *ipcomp_scratches; static int ipcomp_scratch_users; static LIST_HEAD(ipcomp_tfms_list); static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { struct ipcomp_data *ipcd = x->data; const int plen = skb->len; int dlen = IPCOMP_SCRATCH_SIZE; const u8 *start = skb->data; u8 *scratch = *this_cpu_ptr(ipcomp_scratches); struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms); int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); int len; if (err) return err; if (dlen < (plen + sizeof(struct ip_comp_hdr))) return -EINVAL; len = dlen - plen; if (len > skb_tailroom(skb)) len = skb_tailroom(skb); __skb_put(skb, len); len += plen; skb_copy_to_linear_data(skb, scratch, len); while ((scratch += len, dlen -= len) > 0) { skb_frag_t *frag; struct page *page; if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) return -EMSGSIZE; frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; len = PAGE_SIZE; if (dlen < len) len = dlen; skb_frag_fill_page_desc(frag, page, 0, len); memcpy(skb_frag_address(frag), scratch, len); skb->truesize += len; skb->data_len += len; skb->len += len; skb_shinfo(skb)->nr_frags++; } return 0; } int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) { int nexthdr; int err = -ENOMEM; struct ip_comp_hdr *ipch; if (skb_linearize_cow(skb)) goto out; skb->ip_summed = CHECKSUM_NONE; /* Remove ipcomp header and decompress original payload */ ipch = (void *)skb->data; nexthdr = ipch->nexthdr; skb->transport_header = skb->network_header + sizeof(*ipch); __skb_pull(skb, sizeof(*ipch)); err = ipcomp_decompress(x, skb); if (err) goto out; err = nexthdr; out: return err; } EXPORT_SYMBOL_GPL(ipcomp_input); static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) { struct ipcomp_data *ipcd = x->data; const int plen = skb->len; int dlen = IPCOMP_SCRATCH_SIZE; u8 *start = skb->data; struct crypto_comp *tfm; u8 *scratch; int err; local_bh_disable(); scratch = *this_cpu_ptr(ipcomp_scratches); tfm = *this_cpu_ptr(ipcd->tfms); err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); if (err) goto out; if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) { err = -EMSGSIZE; goto out; } memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); local_bh_enable(); pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); return 0; out: local_bh_enable(); return err; } int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct ip_comp_hdr *ipch; struct ipcomp_data *ipcd = x->data; if (skb->len < ipcd->threshold) { /* Don't bother compressing */ goto out_ok; } if (skb_linearize_cow(skb)) goto out_ok; err = ipcomp_compress(x, skb); if (err) { goto out_ok; } /* Install ipcomp header, convert into ipcomp datagram. */ ipch = ip_comp_hdr(skb); ipch->nexthdr = *skb_mac_header(skb); ipch->flags = 0; ipch->cpi = htons((u16 )ntohl(x->id.spi)); *skb_mac_header(skb) = IPPROTO_COMP; out_ok: skb_push(skb, -skb_network_offset(skb)); return 0; } EXPORT_SYMBOL_GPL(ipcomp_output); static void ipcomp_free_scratches(void) { int i; void * __percpu *scratches; if (--ipcomp_scratch_users) return; scratches = ipcomp_scratches; if (!scratches) return; for_each_possible_cpu(i) vfree(*per_cpu_ptr(scratches, i)); free_percpu(scratches); ipcomp_scratches = NULL; } static void * __percpu *ipcomp_alloc_scratches(void) { void * __percpu *scratches; int i; if (ipcomp_scratch_users++) return ipcomp_scratches; scratches = alloc_percpu(void *); if (!scratches) return NULL; ipcomp_scratches = scratches; for_each_possible_cpu(i) { void *scratch; scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i)); if (!scratch) return NULL; *per_cpu_ptr(scratches, i) = scratch; } return scratches; } static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms) { struct ipcomp_tfms *pos; int cpu; list_for_each_entry(pos, &ipcomp_tfms_list, list) { if (pos->tfms == tfms) break; } WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list)); if (--pos->users) return; list_del(&pos->list); kfree(pos); if (!tfms) return; for_each_possible_cpu(cpu) { struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); crypto_free_comp(tfm); } free_percpu(tfms); } static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name) { struct ipcomp_tfms *pos; struct crypto_comp * __percpu *tfms; int cpu; list_for_each_entry(pos, &ipcomp_tfms_list, list) { struct crypto_comp *tfm; /* This can be any valid CPU ID so we don't need locking. */ tfm = this_cpu_read(*pos->tfms); if (!strcmp(crypto_comp_name(tfm), alg_name)) { pos->users++; return pos->tfms; } } pos = kmalloc(sizeof(*pos), GFP_KERNEL); if (!pos) return NULL; pos->users = 1; INIT_LIST_HEAD(&pos->list); list_add(&pos->list, &ipcomp_tfms_list); pos->tfms = tfms = alloc_percpu(struct crypto_comp *); if (!tfms) goto error; for_each_possible_cpu(cpu) { struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; *per_cpu_ptr(tfms, cpu) = tfm; } return tfms; error: ipcomp_free_tfms(tfms); return NULL; } static void ipcomp_free_data(struct ipcomp_data *ipcd) { if (ipcd->tfms) ipcomp_free_tfms(ipcd->tfms); ipcomp_free_scratches(); } void ipcomp_destroy(struct xfrm_state *x) { struct ipcomp_data *ipcd = x->data; if (!ipcd) return; xfrm_state_delete_tunnel(x); mutex_lock(&ipcomp_resource_mutex); ipcomp_free_data(ipcd); mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); } EXPORT_SYMBOL_GPL(ipcomp_destroy); int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) { int err; struct ipcomp_data *ipcd; struct xfrm_algo_desc *calg_desc; err = -EINVAL; if (!x->calg) { NL_SET_ERR_MSG(extack, "Missing required compression algorithm"); goto out; } if (x->encap) { NL_SET_ERR_MSG(extack, "IPComp is not compatible with encapsulation"); goto out; } err = -ENOMEM; ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); if (!ipcd) goto out; mutex_lock(&ipcomp_resource_mutex); if (!ipcomp_alloc_scratches()) goto error; ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); if (!ipcd->tfms) goto error; mutex_unlock(&ipcomp_resource_mutex); calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); BUG_ON(!calg_desc); ipcd->threshold = calg_desc->uinfo.comp.threshold; x->data = ipcd; err = 0; out: return err; error: ipcomp_free_data(ipcd); mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); goto out; } EXPORT_SYMBOL_GPL(ipcomp_init_state); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
696 696 695 692 693 696 693 4 695 435 4 694 694 694 1 696 696 1 696 696 695 695 696 696 502 196 25 196 695 150 696 695 693 8 8 1059 828 379 373 101 8 339 379 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/uaccess.h> #include <linux/user-return-notifier.h> #include <linux/uprobes.h> #include <linux/context_tracking.h> #include <linux/entry-common.h> #include <linux/syscalls.h> #include <linux/rseq.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/fpu/signal.h> #include <asm/fpu/xstate.h> #include <asm/vdso.h> #include <asm/mce.h> #include <asm/sighandling.h> #include <asm/vm86.h> #include <asm/syscall.h> #include <asm/sigframe.h> #include <asm/signal.h> #include <asm/shstk.h> static inline int is_ia32_compat_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_IA32_EMULATION) && ksig->ka.sa.sa_flags & SA_IA32_ABI; } static inline int is_ia32_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig); } static inline int is_x32_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_X86_X32_ABI) && ksig->ka.sa.sa_flags & SA_X32_ABI; } /* * Enable all pkeys temporarily, so as to ensure that both the current * execution stack as well as the alternate signal stack are writeable. * The application can use any of the available pkeys to protect the * alternate signal stack, and we don't know which one it is, so enable * all. The PKRU register will be reset to init_pkru later in the flow, * in fpu__clear_user_states(), and it is the application's responsibility * to enable the appropriate pkey as the first step in the signal handler * so that the handler does not segfault. */ static inline u32 sig_prepare_pkru(void) { u32 orig_pkru = read_pkru(); write_pkru(0); return orig_pkru; } /* * Set up a signal frame. */ /* x86 ABI requires 16-byte alignment */ #define FRAME_ALIGNMENT 16UL #define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1) /* * Determine which stack to use.. */ void __user * get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, void __user **fpstate) { struct k_sigaction *ka = &ksig->ka; int ia32_frame = is_ia32_frame(ksig); /* Default to using normal stack */ bool nested_altstack = on_sig_stack(regs->sp); bool entering_altstack = false; unsigned long math_size = 0; unsigned long sp = regs->sp; unsigned long buf_fx = 0; u32 pkru; /* redzone */ if (!ia32_frame) sp -= 128; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { /* * This checks nested_altstack via sas_ss_flags(). Sensible * programs use SS_AUTODISARM, which disables that check, and * programs that don't use SS_AUTODISARM get compatible. */ if (sas_ss_flags(sp) == 0) { sp = current->sas_ss_sp + current->sas_ss_size; entering_altstack = true; } } else if (ia32_frame && !nested_altstack && regs->ss != __USER_DS && !(ka->sa.sa_flags & SA_RESTORER) && ka->sa.sa_restorer) { /* This is the legacy signal stack switching. */ sp = (unsigned long) ka->sa.sa_restorer; entering_altstack = true; } sp = fpu__alloc_mathframe(sp, ia32_frame, &buf_fx, &math_size); *fpstate = (void __user *)sp; sp -= frame_size; if (ia32_frame) /* * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4; else sp = round_down(sp, FRAME_ALIGNMENT) - 8; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (unlikely((nested_altstack || entering_altstack) && !__on_sig_stack(sp))) { if (show_unhandled_signals && printk_ratelimit()) pr_info("%s[%d] overflowed sigaltstack\n", current->comm, task_pid_nr(current)); return (void __user *)-1L; } /* Update PKRU to enable access to the alternate signal stack. */ pkru = sig_prepare_pkru(); /* save i387 and extended state */ if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size, pkru)) { /* * Restore PKRU to the original, user-defined value; disable * extra pkeys enabled for the alternate signal stack, if any. */ write_pkru(pkru); return (void __user *)-1L; } return (void __user *)sp; } /* * There are four different struct types for signal frame: sigframe_ia32, * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case * -- the largest size. It means the size for 64-bit apps is a bit more * than needed, but this keeps the code simple. */ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32) #else # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe) #endif /* * The FP state frame contains an XSAVE buffer which must be 64-byte aligned. * If a signal frame starts at an unaligned address, extra space is required. * This is the max alignment padding, conservatively. */ #define MAX_XSAVE_PADDING 63UL /* * The frame data is composed of the following areas and laid out as: * * ------------------------- * | alignment padding | * ------------------------- * | (f)xsave frame | * ------------------------- * | fsave header | * ------------------------- * | alignment padding | * ------------------------- * | siginfo + ucontext | * ------------------------- */ /* max_frame_size tells userspace the worst case signal stack size. */ static unsigned long __ro_after_init max_frame_size; static unsigned int __ro_after_init fpu_default_state_size; static int __init init_sigframe_size(void) { fpu_default_state_size = fpu__get_fpstate_size(); max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING; max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING; /* Userspace expects an aligned size. */ max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); pr_info("max sigframe size: %lu\n", max_frame_size); return 0; } early_initcall(init_sigframe_size); unsigned long get_sigframe_size(void) { return max_frame_size; } static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { /* Perform fixup for the pre-signal frame. */ rseq_signal_deliver(ksig, regs); /* Set up the stack frame */ if (is_ia32_frame(ksig)) { if (ksig->ka.sa.sa_flags & SA_SIGINFO) return ia32_setup_rt_frame(ksig, regs); else return ia32_setup_frame(ksig, regs); } else if (is_x32_frame(ksig)) { return x32_setup_rt_frame(ksig, regs); } else { return x64_setup_rt_frame(ksig, regs); } } static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { bool stepping, failed; struct fpu *fpu = &current->thread.fpu; if (v8086_mode(regs)) save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); /* Are we from a system call? */ if (syscall_get_nr(current, regs) != -1) { /* If so, check system call restarting.. */ switch (syscall_get_error(current, regs)) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->ax = -EINTR; break; case -ERESTARTSYS: if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->ax = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; break; } } /* * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now * so that register information in the sigcontext is correct and * then notify the tracer before entering the signal handler. */ stepping = test_thread_flag(TIF_SINGLESTEP); if (stepping) user_disable_single_step(current); failed = (setup_rt_frame(ksig, regs) < 0); if (!failed) { /* * Clear the direction flag as per the ABI for function entry. * * Clear RF when entering the signal handler, because * it might disable possible debug exception from the * signal handler. * * Clear TF for the case when it wasn't set by debugger to * avoid the recursive send_sigtrap() in SIGTRAP handler. */ regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); /* * Ensure the signal handler starts with the new fpu state. */ fpu__clear_user_states(fpu); } signal_setup_done(failed, ksig, stepping); } static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) { #ifdef CONFIG_IA32_EMULATION