538 536 535 538 314 312 10 312 9 314 311 1 314 23 292 108 44 104 122 122 14 117 21 21 6 4 6 517 556 554 534 10 15 4 398 311 539 554 23 532 424 316 213 214 397 282 314 107 107 4 32 32 32 32 99 53 89 107 100 99 7 20 24 79 11 11 2 12 521 20 516 28 518 516 28 18 122 6 107 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 | // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 International Business Machines Corp. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * This module provides the abstraction for an SCTP transport representing * a remote transport address. For local transport addresses, we just use * union sctp_addr. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Hui Huang <hui.huang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/types.h> #include <linux/random.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* 1st Level Abstractions. */ /* Initialize a new transport from provided memory. */ static struct sctp_transport *sctp_transport_init(struct net *net, struct sctp_transport *peer, const union sctp_addr *addr, gfp_t gfp) { /* Copy in the address. */ peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len); memset(&peer->saddr, 0, sizeof(union sctp_addr)); peer->sack_generation = 0; /* From 6.3.1 RTO Calculation: * * C1) Until an RTT measurement has been made for a packet sent to the * given destination transport address, set RTO to the protocol * parameter 'RTO.Initial'. */ peer->rto = msecs_to_jiffies(net->sctp.rto_initial); peer->last_time_heard = 0; peer->last_time_ecne_reduced = jiffies; peer->param_flags = SPP_HB_DISABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* Initialize the default path max_retrans. */ peer->pathmaxrxt = net->sctp.max_retrans_path; peer->pf_retrans = net->sctp.pf_retrans; INIT_LIST_HEAD(&peer->transmitted); INIT_LIST_HEAD(&peer->send_ready); INIT_LIST_HEAD(&peer->transports); timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0); timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0); timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0); timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0); timer_setup(&peer->proto_unreach_timer, sctp_generate_proto_unreach_event, 0); /* Initialize the 64-bit random nonce sent with heartbeat. */ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); refcount_set(&peer->refcnt, 1); return peer; } /* Allocate and initialize a new transport. */ struct sctp_transport *sctp_transport_new(struct net *net, const union sctp_addr *addr, gfp_t gfp) { struct sctp_transport *transport; transport = kzalloc(sizeof(*transport), gfp); if (!transport) goto fail; if (!sctp_transport_init(net, transport, addr, gfp)) goto fail_init; SCTP_DBG_OBJCNT_INC(transport); return transport; fail_init: kfree(transport); fail: return NULL; } /* This transport is no longer needed. Free up if possible, or * delay until it last reference count. */ void sctp_transport_free(struct sctp_transport *transport) { transport->dead = 1; /* Try to delete the heartbeat timer. */ if (timer_delete(&transport->hb_timer)) sctp_transport_put(transport); /* Delete the T3_rtx timer if it's active. * There is no point in not doing this now and letting * structure hang around in memory since we know * the transport is going away. */ if (timer_delete(&transport->T3_rtx_timer)) sctp_transport_put(transport); if (timer_delete(&transport->reconf_timer)) sctp_transport_put(transport); if (timer_delete(&transport->probe_timer)) sctp_transport_put(transport); /* Delete the ICMP proto unreachable timer if it's active. */ if (timer_delete(&transport->proto_unreach_timer)) sctp_transport_put(transport); sctp_transport_put(transport); } static void sctp_transport_destroy_rcu(struct rcu_head *head) { struct sctp_transport *transport; transport = container_of(head, struct sctp_transport, rcu); dst_release(transport->dst); kfree(transport); SCTP_DBG_OBJCNT_DEC(transport); } /* Destroy the transport data structure. * Assumes there are no more users of this structure. */ static void sctp_transport_destroy(struct sctp_transport *transport) { if (unlikely(refcount_read(&transport->refcnt))) { WARN(1, "Attempt to destroy undead transport %p!\n", transport); return; } sctp_packet_free(&transport->packet); if (transport->asoc) sctp_association_put(transport->asoc); call_rcu(&transport->rcu, sctp_transport_destroy_rcu); } /* Start T3_rtx timer if it is not already running and update the heartbeat * timer. This routine is called every time a DATA chunk is sent. */ void sctp_transport_reset_t3_rtx(struct sctp_transport *transport) { /* RFC 2960 6.3.2 Retransmission Timer Rules * * R1) Every time a DATA chunk is sent to any address(including a * retransmission), if the T3-rtx timer of that address is not running * start it running so that it will expire after the RTO of that * address. */ if (!timer_pending(&transport->T3_rtx_timer)) if (!mod_timer(&transport->T3_rtx_timer, jiffies + transport->rto)) sctp_transport_hold(transport); } void sctp_transport_reset_hb_timer(struct sctp_transport *transport) { unsigned long expires; /* When a data chunk is sent, reset the heartbeat interval. */ expires = jiffies + sctp_transport_timeout(transport); if (!mod_timer(&transport->hb_timer, expires + get_random_u32_below(transport->rto))) sctp_transport_hold(transport); } void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) { if (!timer_pending(&transport->reconf_timer)) if (!mod_timer(&transport->reconf_timer, jiffies + transport->rto)) sctp_transport_hold(transport); } void sctp_transport_reset_probe_timer(struct sctp_transport *transport) { if (!mod_timer(&transport->probe_timer, jiffies + transport->probe_interval)) sctp_transport_hold(transport); } void sctp_transport_reset_raise_timer(struct sctp_transport *transport) { if (!mod_timer(&transport->probe_timer, jiffies + transport->probe_interval * 30)) sctp_transport_hold(transport); } /* This transport has been assigned to an association. * Initialize fields from the association or from the sock itself. * Register the reference count in the association. */ void sctp_transport_set_owner(struct sctp_transport *transport, struct sctp_association *asoc) { transport->asoc = asoc; sctp_association_hold(asoc); } /* Initialize the pmtu of a transport. */ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) { /* If we don't have a fresh route, look one up */ if (!transport->dst || transport->dst->obsolete) { sctp_transport_dst_release(transport); transport->af_specific->get_dst(transport, &transport->saddr, &transport->fl, sk); } if (transport->param_flags & SPP_PMTUD_DISABLE) { struct sctp_association *asoc = transport->asoc; if (!transport->pathmtu && asoc && asoc->pathmtu) transport->pathmtu = asoc->pathmtu; if (transport->pathmtu) return; } if (transport->dst) transport->pathmtu = sctp_dst_mtu(transport->dst); else transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; sctp_transport_pl_update(transport); } void sctp_transport_pl_send(struct sctp_transport *t) { if (t->pl.probe_count < SCTP_MAX_PROBES) goto out; t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); } } else if (t->pl.state == SCTP_PL_SEARCH) { if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ t->pl.state = SCTP_PL_BASE; /* Search -> Base */ t->pl.probe_size = SCTP_BASE_PLPMTU; t->pl.probe_high = 0; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); } else { /* Normal probe failure. */ t->pl.probe_high = t->pl.probe_size; t->pl.probe_size = t->pl.pmtu; } } else if (t->pl.state == SCTP_PL_COMPLETE) { if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */ t->pl.probe_size = SCTP_BASE_PLPMTU; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); } } out: pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); t->pl.probe_count++; } bool sctp_transport_pl_recv(struct sctp_transport *t) { pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); t->pl.pmtu = t->pl.probe_size; t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */ t->pl.probe_size += SCTP_PL_BIG_STEP; } else if (t->pl.state == SCTP_PL_ERROR) { t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */ t->pl.pmtu = t->pl.probe_size; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); t->pl.probe_size += SCTP_PL_BIG_STEP; } else if (t->pl.state == SCTP_PL_SEARCH) { if (!t->pl.probe_high) { if (t->pl.probe_size < SCTP_MAX_PLPMTU) { t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, SCTP_MAX_PLPMTU); return false; } t->pl.probe_high = SCTP_MAX_PLPMTU; } t->pl.probe_size += SCTP_PL_MIN_STEP; if (t->pl.probe_size >= t->pl.probe_high) { t->pl.probe_high = 0; t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */ t->pl.probe_size = t->pl.pmtu; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); sctp_transport_reset_raise_timer(t); } } else if (t->pl.state == SCTP_PL_COMPLETE) { /* Raise probe_size again after 30 * interval in Search Complete */ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU); } return t->pl.state == SCTP_PL_COMPLETE; } static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) { pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu); if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size) return false; if (t->pl.state == SCTP_PL_BASE) { if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) { t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); return true; } } else if (t->pl.state == SCTP_PL_SEARCH) { if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { t->pl.state = SCTP_PL_BASE; /* Search -> Base */ t->pl.probe_size = SCTP_BASE_PLPMTU; t->pl.probe_count = 0; t->pl.probe_high = 0; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); return true; } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) { t->pl.probe_size = pmtu; t->pl.probe_count = 0; } } else if (t->pl.state == SCTP_PL_COMPLETE) { if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { t->pl.state = SCTP_PL_BASE; /* Complete -> Base */ t->pl.probe_size = SCTP_BASE_PLPMTU; t->pl.probe_count = 0; t->pl.probe_high = 0; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_transport_reset_probe_timer(t); return true; } } return false; } bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { struct sock *sk = t->asoc->base.sk; struct dst_entry *dst; bool change = true; if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n", __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); /* Use default minimum segment instead */ pmtu = SCTP_DEFAULT_MINSEGMENT; } pmtu = SCTP_TRUNC4(pmtu); if (sctp_transport_pl_enabled(t)) return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t)); dst = sctp_transport_dst_check(t); if (dst) { struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family); union sctp_addr addr; pf->af->from_sk(&addr, sk); pf->to_sk_daddr(&t->ipaddr, sk); dst->ops->update_pmtu(dst, sk, NULL, pmtu, true); pf->to_sk_daddr(&addr, sk); dst = sctp_transport_dst_check(t); } if (!dst) { t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); dst = t->dst; } if (dst) { /* Re-fetch, as under layers may have a higher minimum size */ pmtu = sctp_dst_mtu(dst); change = t->pathmtu != pmtu; } t->pathmtu = pmtu; return change; } /* Caches the dst entry and source address for a transport's destination * address. */ void sctp_transport_route(struct sctp_transport *transport, union sctp_addr *saddr, struct sctp_sock *opt) { struct sctp_association *asoc = transport->asoc; struct sctp_af *af = transport->af_specific; sctp_transport_dst_release(transport); af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); if (saddr) memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); else af->get_saddr(opt, transport, &transport->fl); sctp_transport_pmtu(transport, sctp_opt2sk(opt)); /* Initialize sk->sk_rcv_saddr, if the transport is the * association's active path for getsockname(). */ if (transport->dst && asoc && (!asoc->peer.primary_path || transport == asoc->peer.active_path)) opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk); } /* Hold a reference to a transport. */ int sctp_transport_hold(struct sctp_transport *transport) { return refcount_inc_not_zero(&transport->refcnt); } /* Release a reference to a transport and clean up * if there are no more references. */ void sctp_transport_put(struct sctp_transport *transport) { if (refcount_dec_and_test(&transport->refcnt)) sctp_transport_destroy(transport); } /* Update transport's RTO based on the newly calculated RTT. */ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) { if (unlikely(!tp->rto_pending)) /* We should not be doing any RTO updates unless rto_pending is set. */ pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); if (tp->rttvar || tp->srtt) { struct net *net = tp->asoc->base.net; /* 6.3.1 C3) When a new RTT measurement R' is made, set * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' */ /* Note: The above algorithm has been rewritten to * express rto_beta and rto_alpha as inverse powers * of two. * For example, assuming the default value of RTO.Alpha of * 1/8, rto_alpha would be expressed as 3. */ tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) + (rtt >> net->sctp.rto_alpha); } else { /* 6.3.1 C2) When the first RTT measurement R is made, set * SRTT <- R, RTTVAR <- R/2. */ tp->srtt = rtt; tp->rttvar = rtt >> 1; } /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. */ if (tp->rttvar == 0) tp->rttvar = SCTP_CLOCK_GRANULARITY; /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ tp->rto = tp->srtt + (tp->rttvar << 2); /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min * seconds then it is rounded up to RTO.Min seconds. */ if (tp->rto < tp->asoc->rto_min) tp->rto = tp->asoc->rto_min; /* 6.3.1 C7) A maximum value may be placed on RTO provided it is * at least RTO.max seconds. */ if (tp->rto > tp->asoc->rto_max) tp->rto = tp->asoc->rto_max; sctp_max_rto(tp->asoc, tp); tp->rtt = rtt; /* Reset rto_pending so that a new RTT measurement is started when a * new data chunk is sent. */ tp->rto_pending = 0; pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n", __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); } /* This routine updates the transport's cwnd and partial_bytes_acked * parameters based on the bytes acked in the received SACK. */ void sctp_transport_raise_cwnd(struct sctp_transport *transport, __u32 sack_ctsn, __u32 bytes_acked) { struct sctp_association *asoc = transport->asoc; __u32 cwnd, ssthresh, flight_size, pba, pmtu; cwnd = transport->cwnd; flight_size = transport->flight_size; /* See if we need to exit Fast Recovery first */ if (asoc->fast_recovery && TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) asoc->fast_recovery = 0; ssthresh = transport->ssthresh; pba = transport->partial_bytes_acked; pmtu = transport->asoc->pathmtu; if (cwnd <= ssthresh) { /* RFC 4960 7.2.1 * o When cwnd is less than or equal to ssthresh, an SCTP * endpoint MUST use the slow-start algorithm to increase * cwnd only if the current congestion window is being fully * utilized, an incoming SACK advances the Cumulative TSN * Ack Point, and the data sender is not in Fast Recovery. * Only when these three conditions are met can the cwnd be * increased; otherwise, the cwnd MUST not be increased. * If these conditions are met, then cwnd MUST be increased * by, at most, the lesser of 1) the total size of the * previously outstanding DATA chunk(s) acknowledged, and * 2) the destination's path MTU. This upper bound protects * against the ACK-Splitting attack outlined in [SAVAGE99]. */ if (asoc->fast_recovery) return; /* The appropriate cwnd increase algorithm is performed * if, and only if the congestion window is being fully * utilized. Note that RFC4960 Errata 3.22 removed the * other condition on ctsn moving. */ if (flight_size < cwnd) return; if (bytes_acked > pmtu) cwnd += pmtu; else cwnd += bytes_acked; pr_debug("%s: slow start: transport:%p, bytes_acked:%d, " "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n", __func__, transport, bytes_acked, cwnd, ssthresh, flight_size, pba); } else { /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, * upon each SACK arrival, increase partial_bytes_acked * by the total number of bytes of all new chunks * acknowledged in that SACK including chunks * acknowledged by the new Cumulative TSN Ack and by Gap * Ack Blocks. (updated by RFC4960 Errata 3.22) * * When partial_bytes_acked is greater than cwnd and * before the arrival of the SACK the sender had less * bytes of data outstanding than cwnd (i.e., before * arrival of the SACK, flightsize was less than cwnd), * reset partial_bytes_acked to cwnd. (RFC 4960 Errata * 3.26) * * When partial_bytes_acked is equal to or greater than * cwnd and before the arrival of the SACK the sender * had cwnd or more bytes of data outstanding (i.e., * before arrival of the SACK, flightsize was greater * than or equal to cwnd), partial_bytes_acked is reset * to (partial_bytes_acked - cwnd). Next, cwnd is * increased by MTU. (RFC 4960 Errata 3.12) */ pba += bytes_acked; if (pba > cwnd && flight_size < cwnd) pba = cwnd; if (pba >= cwnd && flight_size >= cwnd) { pba = pba - cwnd; cwnd += pmtu; } pr_debug("%s: congestion avoidance: transport:%p, " "bytes_acked:%d, cwnd:%d, ssthresh:%d, " "flight_size:%d, pba:%d\n", __func__, transport, bytes_acked, cwnd, ssthresh, flight_size, pba); } transport->cwnd = cwnd; transport->partial_bytes_acked = pba; } /* This routine is used to lower the transport's cwnd when congestion is * detected. */ void sctp_transport_lower_cwnd(struct sctp_transport *transport, enum sctp_lower_cwnd reason) { struct sctp_association *asoc = transport->asoc; switch (reason) { case SCTP_LOWER_CWND_T3_RTX: /* RFC 2960 Section 7.2.3, sctpimpguide * When the T3-rtx timer expires on an address, SCTP should * perform slow start by: * ssthresh = max(cwnd/2, 4*MTU) * cwnd = 1*MTU * partial_bytes_acked = 0 */ transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = asoc->pathmtu; /* T3-rtx also clears fast recovery */ asoc->fast_recovery = 0; break; case SCTP_LOWER_CWND_FAST_RTX: /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the * destination address(es) to which the missing DATA chunks * were last sent, according to the formula described in * Section 7.2.3. * * RFC 2960 7.2.3, sctpimpguide Upon detection of packet * losses from SACK (see Section 7.2.4), An endpoint * should do the following: * ssthresh = max(cwnd/2, 4*MTU) * cwnd = ssthresh * partial_bytes_acked = 0 */ if (asoc->fast_recovery) return; /* Mark Fast recovery */ asoc->fast_recovery = 1; asoc->fast_recovery_exit = asoc->next_tsn - 1; transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = transport->ssthresh; break; case SCTP_LOWER_CWND_ECNE: /* RFC 2481 Section 6.1.2. * If the sender receives an ECN-Echo ACK packet * then the sender knows that congestion was encountered in the * network on the path from the sender to the receiver. The * indication of congestion should be treated just as a * congestion loss in non-ECN Capable TCP. That is, the TCP * source halves the congestion window "cwnd" and reduces the * slow start threshold "ssthresh". * A critical condition is that TCP does not react to * congestion indications more than once every window of * data (or more loosely more than once every round-trip time). */ if (time_after(jiffies, transport->last_time_ecne_reduced + transport->rtt)) { transport->ssthresh = max(transport->cwnd/2, 4*asoc->pathmtu); transport->cwnd = transport->ssthresh; transport->last_time_ecne_reduced = jiffies; } break; case SCTP_LOWER_CWND_INACTIVE: /* RFC 2960 Section 7.2.1, sctpimpguide * When the endpoint does not transmit data on a given * transport address, the cwnd of the transport address * should be adjusted to max(cwnd/2, 4*MTU) per RTO. * NOTE: Although the draft recommends that this check needs * to be done every RTO interval, we do it every hearbeat * interval. */ transport->cwnd = max(transport->cwnd/2, 4*asoc->pathmtu); /* RFC 4960 Errata 3.27.2: also adjust sshthresh */ transport->ssthresh = transport->cwnd; break; } transport->partial_bytes_acked = 0; pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n", __func__, transport, reason, transport->cwnd, transport->ssthresh); } /* Apply Max.Burst limit to the congestion window: * sctpimpguide-05 2.14.2 * D) When the time comes for the sender to * transmit new DATA chunks, the protocol parameter Max.Burst MUST * first be applied to limit how many new DATA chunks may be sent. * The limit is applied by adjusting cwnd as follows: * if ((flightsize+ Max.Burst * MTU) < cwnd) * cwnd = flightsize + Max.Burst * MTU */ void sctp_transport_burst_limited(struct sctp_transport *t) { struct sctp_association *asoc = t->asoc; u32 old_cwnd = t->cwnd; u32 max_burst_bytes; if (t->burst_limited || asoc->max_burst == 0) return; max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); if (max_burst_bytes < old_cwnd) { t->cwnd = max_burst_bytes; t->burst_limited = old_cwnd; } } /* Restore the old cwnd congestion window, after the burst had it's * desired effect. */ void sctp_transport_burst_reset(struct sctp_transport *t) { if (t->burst_limited) { t->cwnd = t->burst_limited; t->burst_limited = 0; } } /* What is the next timeout value for this transport? */ unsigned long sctp_transport_timeout(struct sctp_transport *trans) { /* RTO + timer slack +/- 50% of RTO */ unsigned long timeout = trans->rto >> 1; if (trans->state != SCTP_UNCONFIRMED && trans->state != SCTP_PF) timeout += trans->hbinterval; return max_t(unsigned long, timeout, HZ / 5); } /* Reset transport variables to their initial values */ void sctp_transport_reset(struct sctp_transport *t) { struct sctp_association *asoc = t->asoc; /* RFC 2960 (bis), Section 5.2.4 * All the congestion control parameters (e.g., cwnd, ssthresh) * related to this peer MUST be reset to their initial values * (see Section 6.2.1) */ t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); t->burst_limited = 0; t->ssthresh = asoc->peer.i.a_rwnd; t->rto = asoc->rto_initial; sctp_max_rto(asoc, t); t->rtt = 0; t->srtt = 0; t->rttvar = 0; /* Reset these additional variables so that we have a clean slate. */ t->partial_bytes_acked = 0; t->flight_size = 0; t->error_count = 0; t->rto_pending = 0; t->hb_sent = 0; /* Initialize the state information for SFR-CACC */ t->cacc.changeover_active = 0; t->cacc.cycling_changeover = 0; t->cacc.next_tsn_at_change = 0; t->cacc.cacc_saw_newack = 0; } /* Schedule retransmission on the given transport */ void sctp_transport_immediate_rtx(struct sctp_transport *t) { /* Stop pending T3_rtx_timer */ if (timer_delete(&t->T3_rtx_timer)) sctp_transport_put(t); sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); if (!timer_pending(&t->T3_rtx_timer)) { if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) sctp_transport_hold(t); } } /* Drop dst */ void sctp_transport_dst_release(struct sctp_transport *t) { dst_release(t->dst); t->dst = NULL; t->dst_pending_confirm = 0; } /* Schedule neighbour confirm */ void sctp_transport_dst_confirm(struct sctp_transport *t) { t->dst_pending_confirm = 1; } |
1 1 1 1 1 1 1 1 1 20 20 15 15 1 15 41 20 21 55 55 53 2 22 22 7 7 7 22 22 22 22 22 22 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/ceph/ceph_debug.h> #include <linux/backing-dev.h> #include <linux/ctype.h> #include <linux/fs.h> #include <linux/inet.h> #include <linux/in6.h> #include <linux/key.h> #include <keys/ceph-type.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/nsproxy.h> #include <linux/fs_parser.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/ceph/ceph_features.h> #include <linux/ceph/libceph.h> #include <linux/ceph/debugfs.h> #include <linux/ceph/decode.h> #include <linux/ceph/mon_client.h> #include <linux/ceph/auth.h> #include "crypto.h" /* * Module compatibility interface. For now it doesn't do anything, * but its existence signals a certain level of functionality. * * The data buffer is used to pass information both to and from * libceph. The return value indicates whether libceph determines * it is compatible with the caller (from another kernel module), * given the provided data. * * The data pointer can be null. */ bool libceph_compatible(void *data) { return true; } EXPORT_SYMBOL(libceph_compatible); static int param_get_supported_features(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "0x%llx", CEPH_FEATURES_SUPPORTED_DEFAULT); } static const struct kernel_param_ops param_ops_supported_features = { .get = param_get_supported_features, }; module_param_cb(supported_features, ¶m_ops_supported_features, NULL, 0444); const char *ceph_msg_type_name(int type) { switch (type) { case CEPH_MSG_SHUTDOWN: return "shutdown"; case CEPH_MSG_PING: return "ping"; case CEPH_MSG_AUTH: return "auth"; case CEPH_MSG_AUTH_REPLY: return "auth_reply"; case CEPH_MSG_MON_MAP: return "mon_map"; case CEPH_MSG_MON_GET_MAP: return "mon_get_map"; case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe"; case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack"; case CEPH_MSG_STATFS: return "statfs"; case CEPH_MSG_STATFS_REPLY: return "statfs_reply"; case CEPH_MSG_MON_GET_VERSION: return "mon_get_version"; case CEPH_MSG_MON_GET_VERSION_REPLY: return "mon_get_version_reply"; case CEPH_MSG_MDS_MAP: return "mds_map"; case CEPH_MSG_FS_MAP_USER: return "fs_map_user"; case CEPH_MSG_CLIENT_SESSION: return "client_session"; case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect"; case CEPH_MSG_CLIENT_REQUEST: return "client_request"; case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward"; case CEPH_MSG_CLIENT_REPLY: return "client_reply"; case CEPH_MSG_CLIENT_CAPS: return "client_caps"; case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release"; case CEPH_MSG_CLIENT_QUOTA: return "client_quota"; case CEPH_MSG_CLIENT_SNAP: return "client_snap"; case CEPH_MSG_CLIENT_LEASE: return "client_lease"; case CEPH_MSG_POOLOP_REPLY: return "poolop_reply"; case CEPH_MSG_POOLOP: return "poolop"; case CEPH_MSG_MON_COMMAND: return "mon_command"; case CEPH_MSG_MON_COMMAND_ACK: return "mon_command_ack"; case CEPH_MSG_OSD_MAP: return "osd_map"; case CEPH_MSG_OSD_OP: return "osd_op"; case CEPH_MSG_OSD_OPREPLY: return "osd_opreply"; case CEPH_MSG_WATCH_NOTIFY: return "watch_notify"; case CEPH_MSG_OSD_BACKOFF: return "osd_backoff"; default: return "unknown"; } } EXPORT_SYMBOL(ceph_msg_type_name); /* * Initially learn our fsid, or verify an fsid matches. */ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) { if (client->have_fsid) { if (ceph_fsid_compare(&client->fsid, fsid)) { pr_err("bad fsid, had %pU got %pU", &client->fsid, fsid); return -1; } } else { memcpy(&client->fsid, fsid, sizeof(*fsid)); } return 0; } EXPORT_SYMBOL(ceph_check_fsid); static int strcmp_null(const char *s1, const char *s2) { if (!s1 && !s2) return 0; if (s1 && !s2) return -1; if (!s1 && s2) return 1; return strcmp(s1, s2); } int ceph_compare_options(struct ceph_options *new_opt, struct ceph_client *client) { struct ceph_options *opt1 = new_opt; struct ceph_options *opt2 = client->options; int ofs = offsetof(struct ceph_options, mon_addr); int i; int ret; /* * Don't bother comparing options if network namespaces don't * match. */ if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net))) return -1; ret = memcmp(opt1, opt2, ofs); if (ret) return ret; ret = strcmp_null(opt1->name, opt2->name); if (ret) return ret; if (opt1->key && !opt2->key) return -1; if (!opt1->key && opt2->key) return 1; if (opt1->key && opt2->key) { if (opt1->key->type != opt2->key->type) return -1; if (opt1->key->created.tv_sec != opt2->key->created.tv_sec) return -1; if (opt1->key->created.tv_nsec != opt2->key->created.tv_nsec) return -1; if (opt1->key->len != opt2->key->len) return -1; if (opt1->key->key && !opt2->key->key) return -1; if (!opt1->key->key && opt2->key->key) return 1; if (opt1->key->key && opt2->key->key) { ret = memcmp(opt1->key->key, opt2->key->key, opt1->key->len); if (ret) return ret; } } ret = ceph_compare_crush_locs(&opt1->crush_locs, &opt2->crush_locs); if (ret) return ret; /* any matching mon ip implies a match */ for (i = 0; i < opt1->num_mon; i++) { if (ceph_monmap_contains(client->monc.monmap, &opt1->mon_addr[i])) return 0; } return -1; } EXPORT_SYMBOL(ceph_compare_options); int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid) { int i = 0; char tmp[3]; int err = -EINVAL; int d; dout("%s '%s'\n", __func__, str); tmp[2] = 0; while (*str && i < 16) { if (ispunct(*str)) { str++; continue; } if (!isxdigit(str[0]) || !isxdigit(str[1])) break; tmp[0] = str[0]; tmp[1] = str[1]; if (sscanf(tmp, "%x", &d) < 1) break; fsid->fsid[i] = d & 0xff; i++; str += 2; } if (i == 16) err = 0; dout("%s ret %d got fsid %pU\n", __func__, err, fsid); return err; } EXPORT_SYMBOL(ceph_parse_fsid); /* * ceph options */ enum { Opt_osdkeepalivetimeout, Opt_mount_timeout, Opt_osd_idle_ttl, Opt_osd_request_timeout, /* int args above */ Opt_fsid, Opt_name, Opt_secret, Opt_key, Opt_ip, Opt_crush_location, Opt_read_from_replica, Opt_ms_mode, /* string args above */ Opt_share, Opt_crc, Opt_cephx_require_signatures, Opt_cephx_sign_messages, Opt_tcp_nodelay, Opt_abort_on_full, Opt_rxbounce, }; enum { Opt_read_from_replica_no, Opt_read_from_replica_balance, Opt_read_from_replica_localize, }; static const struct constant_table ceph_param_read_from_replica[] = { {"no", Opt_read_from_replica_no}, {"balance", Opt_read_from_replica_balance}, {"localize", Opt_read_from_replica_localize}, {} }; enum ceph_ms_mode { Opt_ms_mode_legacy, Opt_ms_mode_crc, Opt_ms_mode_secure, Opt_ms_mode_prefer_crc, Opt_ms_mode_prefer_secure }; static const struct constant_table ceph_param_ms_mode[] = { {"legacy", Opt_ms_mode_legacy}, {"crc", Opt_ms_mode_crc}, {"secure", Opt_ms_mode_secure}, {"prefer-crc", Opt_ms_mode_prefer_crc}, {"prefer-secure", Opt_ms_mode_prefer_secure}, {} }; static const struct fs_parameter_spec ceph_parameters[] = { fsparam_flag ("abort_on_full", Opt_abort_on_full), __fsparam (NULL, "cephx_require_signatures", Opt_cephx_require_signatures, fs_param_neg_with_no|fs_param_deprecated, NULL), fsparam_flag_no ("cephx_sign_messages", Opt_cephx_sign_messages), fsparam_flag_no ("crc", Opt_crc), fsparam_string ("crush_location", Opt_crush_location), fsparam_string ("fsid", Opt_fsid), fsparam_string ("ip", Opt_ip), fsparam_string ("key", Opt_key), fsparam_u32 ("mount_timeout", Opt_mount_timeout), fsparam_string ("name", Opt_name), fsparam_u32 ("osd_idle_ttl", Opt_osd_idle_ttl), fsparam_u32 ("osd_request_timeout", Opt_osd_request_timeout), fsparam_u32 ("osdkeepalive", Opt_osdkeepalivetimeout), fsparam_enum ("read_from_replica", Opt_read_from_replica, ceph_param_read_from_replica), fsparam_flag ("rxbounce", Opt_rxbounce), fsparam_enum ("ms_mode", Opt_ms_mode, ceph_param_ms_mode), fsparam_string ("secret", Opt_secret), fsparam_flag_no ("share", Opt_share), fsparam_flag_no ("tcp_nodelay", Opt_tcp_nodelay), {} }; struct ceph_options *ceph_alloc_options(void) { struct ceph_options *opt; opt = kzalloc(sizeof(*opt), GFP_KERNEL); if (!opt) return NULL; opt->crush_locs = RB_ROOT; opt->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*opt->mon_addr), GFP_KERNEL); if (!opt->mon_addr) { kfree(opt); return NULL; } opt->flags = CEPH_OPT_DEFAULT; opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT; opt->read_from_replica = CEPH_READ_FROM_REPLICA_DEFAULT; opt->con_modes[0] = CEPH_CON_MODE_UNKNOWN; opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN; return opt; } EXPORT_SYMBOL(ceph_alloc_options); void ceph_destroy_options(struct ceph_options *opt) { dout("destroy_options %p\n", opt); if (!opt) return; ceph_clear_crush_locs(&opt->crush_locs); kfree(opt->name); if (opt->key) { ceph_crypto_key_destroy(opt->key); kfree(opt->key); } kfree(opt->mon_addr); kfree(opt); } EXPORT_SYMBOL(ceph_destroy_options); /* get secret from key store */ static int get_secret(struct ceph_crypto_key *dst, const char *name, struct p_log *log) { struct key *ukey; int key_err; int err = 0; struct ceph_crypto_key *ckey; ukey = request_key(&key_type_ceph, name, NULL); if (IS_ERR(ukey)) { /* request_key errors don't map nicely to mount(2) errors; don't even try, but still printk */ key_err = PTR_ERR(ukey); switch (key_err) { case -ENOKEY: error_plog(log, "Failed due to key not found: %s", name); break; case -EKEYEXPIRED: error_plog(log, "Failed due to expired key: %s", name); break; case -EKEYREVOKED: error_plog(log, "Failed due to revoked key: %s", name); break; default: error_plog(log, "Failed due to key error %d: %s", key_err, name); } err = -EPERM; goto out; } ckey = ukey->payload.data[0]; err = ceph_crypto_key_clone(dst, ckey); if (err) goto out_key; /* pass through, err is 0 */ out_key: key_put(ukey); out: return err; } int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt, struct fc_log *l, char delim) { struct p_log log = {.prefix = "libceph", .log = l}; int ret; /* ip1[:port1][<delim>ip2[:port2]...] */ ret = ceph_parse_ips(buf, buf + len, opt->mon_addr, CEPH_MAX_MON, &opt->num_mon, delim); if (ret) { error_plog(&log, "Failed to parse monitor IPs: %d", ret); return ret; } return 0; } EXPORT_SYMBOL(ceph_parse_mon_ips); int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt, struct fc_log *l) { struct fs_parse_result result; int token, err; struct p_log log = {.prefix = "libceph", .log = l}; token = __fs_parse(&log, ceph_parameters, param, &result); dout("%s fs_parse '%s' token %d\n", __func__, param->key, token); if (token < 0) return token; switch (token) { case Opt_ip: err = ceph_parse_ips(param->string, param->string + param->size, &opt->my_addr, 1, NULL, ','); if (err) { error_plog(&log, "Failed to parse ip: %d", err); return err; } opt->flags |= CEPH_OPT_MYIP; break; case Opt_fsid: err = ceph_parse_fsid(param->string, &opt->fsid); if (err) { error_plog(&log, "Failed to parse fsid: %d", err); return err; } opt->flags |= CEPH_OPT_FSID; break; case Opt_name: kfree(opt->name); opt->name = param->string; param->string = NULL; break; case Opt_secret: ceph_crypto_key_destroy(opt->key); kfree(opt->key); opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); if (!opt->key) return -ENOMEM; err = ceph_crypto_key_unarmor(opt->key, param->string); if (err) { error_plog(&log, "Failed to parse secret: %d", err); return err; } break; case Opt_key: ceph_crypto_key_destroy(opt->key); kfree(opt->key); opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); if (!opt->key) return -ENOMEM; return get_secret(opt->key, param->string, &log); case Opt_crush_location: ceph_clear_crush_locs(&opt->crush_locs); err = ceph_parse_crush_location(param->string, &opt->crush_locs); if (err) { error_plog(&log, "Failed to parse CRUSH location: %d", err); return err; } break; case Opt_read_from_replica: switch (result.uint_32) { case Opt_read_from_replica_no: opt->read_from_replica = 0; break; case Opt_read_from_replica_balance: opt->read_from_replica = CEPH_OSD_FLAG_BALANCE_READS; break; case Opt_read_from_replica_localize: opt->read_from_replica = CEPH_OSD_FLAG_LOCALIZE_READS; break; default: BUG(); } break; case Opt_ms_mode: switch (result.uint_32) { case Opt_ms_mode_legacy: opt->con_modes[0] = CEPH_CON_MODE_UNKNOWN; opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN; break; case Opt_ms_mode_crc: opt->con_modes[0] = CEPH_CON_MODE_CRC; opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN; break; case Opt_ms_mode_secure: opt->con_modes[0] = CEPH_CON_MODE_SECURE; opt->con_modes[1] = CEPH_CON_MODE_UNKNOWN; break; case Opt_ms_mode_prefer_crc: opt->con_modes[0] = CEPH_CON_MODE_CRC; opt->con_modes[1] = CEPH_CON_MODE_SECURE; break; case Opt_ms_mode_prefer_secure: opt->con_modes[0] = CEPH_CON_MODE_SECURE; opt->con_modes[1] = CEPH_CON_MODE_CRC; break; default: BUG(); } break; case Opt_osdkeepalivetimeout: /* 0 isn't well defined right now, reject it */ if (result.uint_32 < 1 || result.uint_32 > INT_MAX / 1000) goto out_of_range; opt->osd_keepalive_timeout = msecs_to_jiffies(result.uint_32 * 1000); break; case Opt_osd_idle_ttl: /* 0 isn't well defined right now, reject it */ if (result.uint_32 < 1 || result.uint_32 > INT_MAX / 1000) goto out_of_range; opt->osd_idle_ttl = msecs_to_jiffies(result.uint_32 * 1000); break; case Opt_mount_timeout: /* 0 is "wait forever" (i.e. infinite timeout) */ if (result.uint_32 > INT_MAX / 1000) goto out_of_range; opt->mount_timeout = msecs_to_jiffies(result.uint_32 * 1000); break; case Opt_osd_request_timeout: /* 0 is "wait forever" (i.e. infinite timeout) */ if (result.uint_32 > INT_MAX / 1000) goto out_of_range; opt->osd_request_timeout = msecs_to_jiffies(result.uint_32 * 1000); break; case Opt_share: if (!result.negated) opt->flags &= ~CEPH_OPT_NOSHARE; else opt->flags |= CEPH_OPT_NOSHARE; break; case Opt_crc: if (!result.negated) opt->flags &= ~CEPH_OPT_NOCRC; else opt->flags |= CEPH_OPT_NOCRC; break; case Opt_cephx_require_signatures: if (!result.negated) warn_plog(&log, "Ignoring cephx_require_signatures"); else warn_plog(&log, "Ignoring nocephx_require_signatures, use nocephx_sign_messages"); break; case Opt_cephx_sign_messages: if (!result.negated) opt->flags &= ~CEPH_OPT_NOMSGSIGN; else opt->flags |= CEPH_OPT_NOMSGSIGN; break; case Opt_tcp_nodelay: if (!result.negated) opt->flags |= CEPH_OPT_TCP_NODELAY; else opt->flags &= ~CEPH_OPT_TCP_NODELAY; break; case Opt_abort_on_full: opt->flags |= CEPH_OPT_ABORT_ON_FULL; break; case Opt_rxbounce: opt->flags |= CEPH_OPT_RXBOUNCE; break; default: BUG(); } return 0; out_of_range: return inval_plog(&log, "%s out of range", param->key); } EXPORT_SYMBOL(ceph_parse_param); int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, bool show_all) { struct ceph_options *opt = client->options; size_t pos = m->count; struct rb_node *n; if (opt->name) { seq_puts(m, "name="); seq_escape(m, opt->name, ", \t\n\\"); seq_putc(m, ','); } if (opt->key) seq_puts(m, "secret=<hidden>,"); if (!RB_EMPTY_ROOT(&opt->crush_locs)) { seq_puts(m, "crush_location="); for (n = rb_first(&opt->crush_locs); ; ) { struct crush_loc_node *loc = rb_entry(n, struct crush_loc_node, cl_node); seq_printf(m, "%s:%s", loc->cl_loc.cl_type_name, loc->cl_loc.cl_name); n = rb_next(n); if (!n) break; seq_putc(m, '|'); } seq_putc(m, ','); } if (opt->read_from_replica == CEPH_OSD_FLAG_BALANCE_READS) { seq_puts(m, "read_from_replica=balance,"); } else if (opt->read_from_replica == CEPH_OSD_FLAG_LOCALIZE_READS) { seq_puts(m, "read_from_replica=localize,"); } if (opt->con_modes[0] != CEPH_CON_MODE_UNKNOWN) { if (opt->con_modes[0] == CEPH_CON_MODE_CRC && opt->con_modes[1] == CEPH_CON_MODE_UNKNOWN) { seq_puts(m, "ms_mode=crc,"); } else if (opt->con_modes[0] == CEPH_CON_MODE_SECURE && opt->con_modes[1] == CEPH_CON_MODE_UNKNOWN) { seq_puts(m, "ms_mode=secure,"); } else if (opt->con_modes[0] == CEPH_CON_MODE_CRC && opt->con_modes[1] == CEPH_CON_MODE_SECURE) { seq_puts(m, "ms_mode=prefer-crc,"); } else if (opt->con_modes[0] == CEPH_CON_MODE_SECURE && opt->con_modes[1] == CEPH_CON_MODE_CRC) { seq_puts(m, "ms_mode=prefer-secure,"); } } if (opt->flags & CEPH_OPT_FSID) seq_printf(m, "fsid=%pU,", &opt->fsid); if (opt->flags & CEPH_OPT_NOSHARE) seq_puts(m, "noshare,"); if (opt->flags & CEPH_OPT_NOCRC) seq_puts(m, "nocrc,"); if (opt->flags & CEPH_OPT_NOMSGSIGN) seq_puts(m, "nocephx_sign_messages,"); if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) seq_puts(m, "notcp_nodelay,"); if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL)) seq_puts(m, "abort_on_full,"); if (opt->flags & CEPH_OPT_RXBOUNCE) seq_puts(m, "rxbounce,"); if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) seq_printf(m, "mount_timeout=%d,", jiffies_to_msecs(opt->mount_timeout) / 1000); if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) seq_printf(m, "osd_idle_ttl=%d,", jiffies_to_msecs(opt->osd_idle_ttl) / 1000); if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) seq_printf(m, "osdkeepalivetimeout=%d,", jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000); if (opt->osd_request_timeout != CEPH_OSD_REQUEST_TIMEOUT_DEFAULT) seq_printf(m, "osd_request_timeout=%d,", jiffies_to_msecs(opt->osd_request_timeout) / 1000); /* drop redundant comma */ if (m->count != pos) m->count--; return 0; } EXPORT_SYMBOL(ceph_print_client_options); struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client) { return &client->msgr.inst.addr; } EXPORT_SYMBOL(ceph_client_addr); u64 ceph_client_gid(struct ceph_client *client) { return client->monc.auth->global_id; } EXPORT_SYMBOL(ceph_client_gid); /* * create a fresh client instance */ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private) { struct ceph_client *client; struct ceph_entity_addr *myaddr = NULL; int err; err = wait_for_random_bytes(); if (err < 0) return ERR_PTR(err); client = kzalloc(sizeof(*client), GFP_KERNEL); if (client == NULL) return ERR_PTR(-ENOMEM); client->private = private; client->options = opt; mutex_init(&client->mount_mutex); init_waitqueue_head(&client->auth_wq); client->auth_err = 0; client->extra_mon_dispatch = NULL; client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT; client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT; if (!ceph_test_opt(client, NOMSGSIGN)) client->required_features |= CEPH_FEATURE_MSG_AUTH; /* msgr */ if (ceph_test_opt(client, MYIP)) myaddr = &client->options->my_addr; ceph_messenger_init(&client->msgr, myaddr); /* subsystems */ err = ceph_monc_init(&client->monc, client); if (err < 0) goto fail; err = ceph_osdc_init(&client->osdc, client); if (err < 0) goto fail_monc; return client; fail_monc: ceph_monc_stop(&client->monc); fail: ceph_messenger_fini(&client->msgr); kfree(client); return ERR_PTR(err); } EXPORT_SYMBOL(ceph_create_client); void ceph_destroy_client(struct ceph_client *client) { dout("destroy_client %p\n", client); atomic_set(&client->msgr.stopping, 1); /* unmount */ ceph_osdc_stop(&client->osdc); ceph_monc_stop(&client->monc); ceph_messenger_fini(&client->msgr); ceph_debugfs_client_cleanup(client); ceph_destroy_options(client->options); kfree(client); dout("destroy_client %p done\n", client); } EXPORT_SYMBOL(ceph_destroy_client); void ceph_reset_client_addr(struct ceph_client *client) { ceph_messenger_reset_nonce(&client->msgr); ceph_monc_reopen_session(&client->monc); ceph_osdc_reopen_osds(&client->osdc); } EXPORT_SYMBOL(ceph_reset_client_addr); /* * true if we have the mon map (and have thus joined the cluster) */ static bool have_mon_and_osd_map(struct ceph_client *client) { return client->monc.monmap && client->monc.monmap->epoch && client->osdc.osdmap && client->osdc.osdmap->epoch; } /* * mount: join the ceph cluster, and open root directory. */ int __ceph_open_session(struct ceph_client *client, unsigned long started) { unsigned long timeout = client->options->mount_timeout; long err; /* open session, and wait for mon and osd maps */ err = ceph_monc_open_session(&client->monc); if (err < 0) return err; while (!have_mon_and_osd_map(client)) { if (timeout && time_after_eq(jiffies, started + timeout)) return -ETIMEDOUT; /* wait */ dout("mount waiting for mon_map\n"); err = wait_event_interruptible_timeout(client->auth_wq, have_mon_and_osd_map(client) || (client->auth_err < 0), ceph_timeout_jiffies(timeout)); if (err < 0) return err; if (client->auth_err < 0) return client->auth_err; } pr_info("client%llu fsid %pU\n", ceph_client_gid(client), &client->fsid); ceph_debugfs_client_init(client); return 0; } EXPORT_SYMBOL(__ceph_open_session); int ceph_open_session(struct ceph_client *client) { int ret; unsigned long started = jiffies; /* note the start time */ dout("open_session start\n"); mutex_lock(&client->mount_mutex); ret = __ceph_open_session(client, started); mutex_unlock(&client->mount_mutex); return ret; } EXPORT_SYMBOL(ceph_open_session); int ceph_wait_for_latest_osdmap(struct ceph_client *client, unsigned long timeout) { u64 newest_epoch; int ret; ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch); if (ret) return ret; if (client->osdc.osdmap->epoch >= newest_epoch) return 0; ceph_osdc_maybe_request_map(&client->osdc); return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout); } EXPORT_SYMBOL(ceph_wait_for_latest_osdmap); static int __init init_ceph_lib(void) { int ret = 0; ceph_debugfs_init(); ret = ceph_crypto_init(); if (ret < 0) goto out_debugfs; ret = ceph_msgr_init(); if (ret < 0) goto out_crypto; ret = ceph_osdc_setup(); if (ret < 0) goto out_msgr; pr_info("loaded (mon/osd proto %d/%d)\n", CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL); return 0; out_msgr: ceph_msgr_exit(); out_crypto: ceph_crypto_shutdown(); out_debugfs: ceph_debugfs_cleanup(); return ret; } static void __exit exit_ceph_lib(void) { dout("exit_ceph_lib\n"); WARN_ON(!ceph_strings_empty()); ceph_osdc_cleanup(); ceph_msgr_exit(); ceph_crypto_shutdown(); ceph_debugfs_cleanup(); } module_init(init_ceph_lib); module_exit(exit_ceph_lib); MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); MODULE_AUTHOR("Patience Warnick <patience@newdream.net>"); MODULE_DESCRIPTION("Ceph core library"); MODULE_LICENSE("GPL"); |
4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RNG: Random Number Generator algorithms under the crypto API * * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> */ #ifndef _CRYPTO_RNG_H #define _CRYPTO_RNG_H #include <linux/atomic.h> #include <linux/container_of.h> #include <linux/crypto.h> struct crypto_rng; /** * struct rng_alg - random number generator definition * * @generate: The function defined by this variable obtains a * random number. The random number generator transform * must generate the random number out of the context * provided with this call, plus any additional data * if provided to the call. * @seed: Seed or reseed the random number generator. With the * invocation of this function call, the random number * generator shall become ready for generation. If the * random number generator requires a seed for setting * up a new state, the seed must be provided by the * consumer while invoking this function. The required * size of the seed is defined with @seedsize . * @set_ent: Set entropy that would otherwise be obtained from * entropy source. Internal use only. * @seedsize: The seed size required for a random number generator * initialization defined with this variable. Some * random number generators does not require a seed * as the seeding is implemented internally without * the need of support by the consumer. In this case, * the seed size is set to zero. * @base: Common crypto API algorithm data structure. */ struct rng_alg { int (*generate)(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen); int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); void (*set_ent)(struct crypto_rng *tfm, const u8 *data, unsigned int len); unsigned int seedsize; struct crypto_alg base; }; struct crypto_rng { struct crypto_tfm base; }; extern struct crypto_rng *crypto_default_rng; int crypto_get_default_rng(void); void crypto_put_default_rng(void); /** * DOC: Random number generator API * * The random number generator API is used with the ciphers of type * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) */ /** * crypto_alloc_rng() -- allocate RNG handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * message digest cipher * @type: specifies the type of the cipher * @mask: specifies the mask for the cipher * * Allocate a cipher handle for a random number generator. The returned struct * crypto_rng is the cipher handle that is required for any subsequent * API invocation for that random number generator. * * For all random number generators, this call creates a new private copy of * the random number generator that does not share a state with other * instances. The only exception is the "krng" random number generator which * is a kernel crypto API use case for the get_random_bytes() function of the * /dev/random driver. * * Return: allocated cipher handle in case of success; IS_ERR() is true in case * of an error, PTR_ERR() returns the error code. */ struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask); static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) { return &tfm->base; } static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg) { return container_of(alg, struct rng_alg, base); } /** * crypto_rng_alg - obtain name of RNG * @tfm: cipher handle * * Return the generic name (cra_name) of the initialized random number generator * * Return: generic name string */ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) { return __crypto_rng_alg(crypto_rng_tfm(tfm)->__crt_alg); } /** * crypto_free_rng() - zeroize and free RNG handle * @tfm: cipher handle to be freed * * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_rng(struct crypto_rng *tfm) { crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } /** * crypto_rng_generate() - get random number * @tfm: cipher handle * @src: Input buffer holding additional data, may be NULL * @slen: Length of additional data * @dst: output buffer holding the random numbers * @dlen: length of the output buffer * * This function fills the caller-allocated buffer with random * numbers using the random number generator referenced by the * cipher handle. * * Return: 0 function was successful; < 0 if an error occurred */ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); } /** * crypto_rng_get_bytes() - get random number * @tfm: cipher handle * @rdata: output buffer holding the random numbers * @dlen: length of the output buffer * * This function fills the caller-allocated buffer with random numbers using the * random number generator referenced by the cipher handle. * * Return: 0 function was successful; < 0 if an error occurred */ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { return crypto_rng_generate(tfm, NULL, 0, rdata, dlen); } /** * crypto_rng_reset() - re-initialize the RNG * @tfm: cipher handle * @seed: seed input data * @slen: length of the seed input data * * The reset function completely re-initializes the random number generator * referenced by the cipher handle by clearing the current state. The new state * is initialized with the caller provided seed or automatically, depending * on the random number generator type (the ANSI X9.31 RNG requires * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). * The seed is provided as a parameter to this function call. The provided seed * should have the length of the seed size defined for the random number * generator as defined by crypto_rng_seedsize. * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); /** * crypto_rng_seedsize() - obtain seed size of RNG * @tfm: cipher handle * * The function returns the seed size for the random number generator * referenced by the cipher handle. This value may be zero if the random * number generator does not implement or require a reseeding. For example, * the SP800-90A DRBGs implement an automated reseeding after reaching a * pre-defined threshold. * * Return: seed size for the random number generator */ static inline int crypto_rng_seedsize(struct crypto_rng *tfm) { return crypto_rng_alg(tfm)->seedsize; } #endif |
2087 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SMP_H #define __LINUX_SMP_H /* * Generic SMP support * Alan Cox. <alan@redhat.com> */ #include <linux/errno.h> #include <linux/types.h> #include <linux/list.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/smp_types.h> typedef void (*smp_call_func_t)(void *info); typedef bool (*smp_cond_func_t)(int cpu, void *info); /* * structure shares (partial) layout with struct irq_work */ struct __call_single_data { struct __call_single_node node; smp_call_func_t func; void *info; }; #define CSD_INIT(_func, _info) \ (struct __call_single_data){ .func = (_func), .info = (_info), } /* Use __aligned() to avoid to use 2 cache lines for 1 csd */ typedef struct __call_single_data call_single_data_t __aligned(sizeof(struct __call_single_data)); #define INIT_CSD(_csd, _func, _info) \ do { \ *(_csd) = CSD_INIT((_func), (_info)); \ } while (0) /* * Enqueue a llist_node on the call_single_queue; be very careful, read * flush_smp_call_function_queue() in detail. */ extern void __smp_call_single_queue(int cpu, struct llist_node *node); /* total number of cpus in this system (may exceed NR_CPUS) */ extern unsigned int total_cpus; int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, int wait); void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void *info, bool wait, const struct cpumask *mask); int smp_call_function_single_async(int cpu, call_single_data_t *csd); /* * Cpus stopping functions in panic. All have default weak definitions. * Architecture-dependent code may override them. */ void __noreturn panic_smp_self_stop(void); void __noreturn nmi_panic_self_stop(struct pt_regs *regs); void crash_smp_send_stop(void); /* * Call a function on all processors */ static inline void on_each_cpu(smp_call_func_t func, void *info, int wait) { on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask); } /** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. The * exception is that it may be used during early boot while * early_boot_irqs_disabled is set. */ static inline void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { on_each_cpu_cond_mask(NULL, func, info, wait, mask); } /* * Call a function on each processor for which the supplied function * cond_func returns a positive value. This may include the local * processor. May be used during early boot while early_boot_irqs_disabled is * set. Use local_irq_save/restore() instead of local_irq_disable/enable(). */ static inline void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, void *info, bool wait) { on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); } /* * Architecture specific boot CPU setup. Defined as empty weak function in * init/main.c. Architectures can override it. */ void __init smp_prepare_boot_cpu(void); #ifdef CONFIG_SMP #include <linux/preempt.h> #include <linux/compiler.h> #include <linux/thread_info.h> #include <asm/smp.h> /* * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. * (defined in asm header): */ /* * stops all CPUs but the current one: */ extern void smp_send_stop(void); /* * sends a 'reschedule' event to another CPU: */ extern void arch_smp_send_reschedule(int cpu); /* * scheduler_ipi() is inline so can't be passed as callback reason, but the * callsite IP should be sufficient for root-causing IPIs sent from here. */ #define smp_send_reschedule(cpu) ({ \ trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \ arch_smp_send_reschedule(cpu); \ }) /* * Prepare machine for booting other CPUs. */ extern void smp_prepare_cpus(unsigned int max_cpus); /* * Bring a CPU up */ extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); /* * Final polishing of CPUs */ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ void smp_call_function(smp_call_func_t func, void *info, int wait); void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait); void kick_all_cpus_sync(void); void wake_up_all_idle_cpus(void); /* * Generic and arch helpers */ void __init call_function_init(void); void generic_smp_call_function_single_interrupt(void); #define generic_smp_call_function_interrupt \ generic_smp_call_function_single_interrupt extern unsigned int setup_max_cpus; extern void __init setup_nr_cpu_ids(void); extern void __init smp_init(void); extern int __boot_cpu_id; static inline int get_boot_cpu_id(void) { return __boot_cpu_id; } #else /* !SMP */ static inline void smp_send_stop(void) { } /* * These macros fold the SMP functionality into a single CPU system */ #define raw_smp_processor_id() 0 static inline void up_smp_call_function(smp_call_func_t func, void *info) { } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) static inline void smp_send_reschedule(int cpu) { } #define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) static inline void call_function_init(void) { } static inline int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { return smp_call_function_single(0, func, info, wait); } static inline void kick_all_cpus_sync(void) { } static inline void wake_up_all_idle_cpus(void) { } #define setup_max_cpus 0 #ifdef CONFIG_UP_LATE_INIT extern void __init up_late_init(void); static inline void smp_init(void) { up_late_init(); } #else static inline void smp_init(void) { } #endif static inline int get_boot_cpu_id(void) { return 0; } #endif /* !SMP */ /** * raw_processor_id() - get the current (unstable) CPU id * * For then you know what you are doing and need an unstable * CPU id. */ /** * smp_processor_id() - get the current (stable) CPU id * * This is the normal accessor to the CPU id and should be used * whenever possible. * * The CPU id is stable when: * * - IRQs are disabled; * - preemption is disabled; * - the task is CPU affine. * * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN * when smp_processor_id() is used when the CPU id is not stable. */ /* * Allow the architecture to differentiate between a stable and unstable read. * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a * regular asm read for the stable. */ #ifndef __smp_processor_id #define __smp_processor_id() raw_smp_processor_id() #endif #ifdef CONFIG_DEBUG_PREEMPT extern unsigned int debug_smp_processor_id(void); # define smp_processor_id() debug_smp_processor_id() #else # define smp_processor_id() __smp_processor_id() #endif #define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) #define put_cpu() preempt_enable() /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: */ extern void arch_disable_smp_support(void); extern void arch_thaw_secondary_cpus_begin(void); extern void arch_thaw_secondary_cpus_end(void); void smp_setup_processor_id(void); int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys); /* SMP core functions */ int smpcfd_prepare_cpu(unsigned int cpu); int smpcfd_dead_cpu(unsigned int cpu); int smpcfd_dying_cpu(unsigned int cpu); #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG bool csd_lock_is_stuck(void); #else static inline bool csd_lock_is_stuck(void) { return false; } #endif #endif /* __LINUX_SMP_H */ |
233 232 229 11 79 250 250 250 13 250 249 38 89 26 114 22 15 9 6 157 157 22 132 25 136 102 116 136 135 26 136 9 9 65 77 26 77 379 79 79 79 143 307 120 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 | // SPDX-License-Identifier: GPL-2.0 /* * Tty buffer allocation management */ #include <linux/types.h> #include <linux/errno.h> #include <linux/minmax.h> #include <linux/tty.h> #include <linux/tty_buffer.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/ratelimit.h> #include "tty.h" #define MIN_TTYB_SIZE 256 #define TTYB_ALIGN_MASK 0xff /* * Byte threshold to limit memory consumption for flip buffers. * The actual memory limit is > 2x this amount. */ #define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL) /* * We default to dicing tty buffer allocations to this many characters * in order to avoid multiple page allocations. We know the size of * tty_buffer itself but it must also be taken into account that the * buffer is 256 byte aligned. See tty_buffer_find for the allocation * logic this must match. */ #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~TTYB_ALIGN_MASK) /** * tty_buffer_lock_exclusive - gain exclusive access to buffer * @port: tty port owning the flip buffer * * Guarantees safe use of the &tty_ldisc_ops.receive_buf() method by excluding * the buffer work and any pending flush from using the flip buffer. Data can * continue to be added concurrently to the flip buffer from the driver side. * * See also tty_buffer_unlock_exclusive(). */ void tty_buffer_lock_exclusive(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; atomic_inc(&buf->priority); mutex_lock(&buf->lock); } EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive); /** * tty_buffer_unlock_exclusive - release exclusive access * @port: tty port owning the flip buffer * * The buffer work is restarted if there is data in the flip buffer. * * See also tty_buffer_lock_exclusive(). */ void tty_buffer_unlock_exclusive(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; bool restart = buf->head->commit != buf->head->read; atomic_dec(&buf->priority); mutex_unlock(&buf->lock); if (restart) queue_work(system_unbound_wq, &buf->work); } EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive); /** * tty_buffer_space_avail - return unused buffer space * @port: tty port owning the flip buffer * * Returns: the # of bytes which can be written by the driver without reaching * the buffer limit. * * Note: this does not guarantee that memory is available to write the returned * # of bytes (use tty_prepare_flip_string() to pre-allocate if memory * guarantee is required). */ unsigned int tty_buffer_space_avail(struct tty_port *port) { int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used); return max(space, 0); } EXPORT_SYMBOL_GPL(tty_buffer_space_avail); static void tty_buffer_reset(struct tty_buffer *p, size_t size) { p->used = 0; p->size = size; p->next = NULL; p->commit = 0; p->lookahead = 0; p->read = 0; p->flags = true; } /** * tty_buffer_free_all - free buffers used by a tty * @port: tty port to free from * * Remove all the buffers pending on a tty whether queued with data or in the * free ring. Must be called when the tty is no longer in use. */ void tty_buffer_free_all(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; struct tty_buffer *p, *next; struct llist_node *llist; unsigned int freed = 0; int still_used; while ((p = buf->head) != NULL) { buf->head = p->next; freed += p->size; if (p->size > 0) kfree(p); } llist = llist_del_all(&buf->free); llist_for_each_entry_safe(p, next, llist, free) kfree(p); tty_buffer_reset(&buf->sentinel, 0); buf->head = &buf->sentinel; buf->tail = &buf->sentinel; still_used = atomic_xchg(&buf->mem_used, 0); WARN(still_used != freed, "we still have not freed %d bytes!", still_used - freed); } /** * tty_buffer_alloc - allocate a tty buffer * @port: tty port * @size: desired size (characters) * * Allocate a new tty buffer to hold the desired number of characters. We * round our buffers off in 256 character chunks to get better allocation * behaviour. * * Returns: %NULL if out of memory or the allocation would exceed the per * device queue. */ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) { struct llist_node *free; struct tty_buffer *p; /* Round the buffer size out */ size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); if (size <= MIN_TTYB_SIZE) { free = llist_del_first(&port->buf.free); if (free) { p = llist_entry(free, struct tty_buffer, free); goto found; } } /* Should possibly check if this fails for the largest buffer we * have queued and recycle that ? */ if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) return NULL; p = kmalloc(struct_size(p, data, 2 * size), GFP_ATOMIC | __GFP_NOWARN); if (p == NULL) return NULL; found: tty_buffer_reset(p, size); atomic_add(size, &port->buf.mem_used); return p; } /** * tty_buffer_free - free a tty buffer * @port: tty port owning the buffer * @b: the buffer to free * * Free a tty buffer, or add it to the free list according to our internal * strategy. */ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b) { struct tty_bufhead *buf = &port->buf; /* Dumb strategy for now - should keep some stats */ WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0); if (b->size > MIN_TTYB_SIZE) kfree(b); else if (b->size > 0) llist_add(&b->free, &buf->free); } /** * tty_buffer_flush - flush full tty buffers * @tty: tty to flush * @ld: optional ldisc ptr (must be referenced) * * Flush all the buffers containing receive data. If @ld != %NULL, flush the * ldisc input buffer. * * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'. */ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) { struct tty_port *port = tty->port; struct tty_bufhead *buf = &port->buf; struct tty_buffer *next; atomic_inc(&buf->priority); mutex_lock(&buf->lock); /* paired w/ release in __tty_buffer_request_room; ensures there are * no pending memory accesses to the freed buffer */ while ((next = smp_load_acquire(&buf->head->next)) != NULL) { tty_buffer_free(port, buf->head); buf->head = next; } buf->head->read = buf->head->commit; buf->head->lookahead = buf->head->read; if (ld && ld->ops->flush_buffer) ld->ops->flush_buffer(tty); atomic_dec(&buf->priority); mutex_unlock(&buf->lock); } /** * __tty_buffer_request_room - grow tty buffer if needed * @port: tty port * @size: size desired * @flags: buffer has to store flags along character data * * Make at least @size bytes of linear space available for the tty buffer. * * Will change over to a new buffer if the current buffer is encoded as * %TTY_NORMAL (so has no flags buffer) and the new buffer requires a flags * buffer. * * Returns: the size we managed to find. */ static int __tty_buffer_request_room(struct tty_port *port, size_t size, bool flags) { struct tty_bufhead *buf = &port->buf; struct tty_buffer *n, *b = buf->tail; size_t left = (b->flags ? 1 : 2) * b->size - b->used; bool change = !b->flags && flags; if (!change && left >= size) return size; /* This is the slow path - looking for new buffers to use */ n = tty_buffer_alloc(port, size); if (n == NULL) return change ? 0 : left; n->flags = flags; buf->tail = n; /* * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs() * ensures they see all buffer data. */ smp_store_release(&b->commit, b->used); /* * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs() * ensures the latest commit value can be read before the head * is advanced to the next buffer. */ smp_store_release(&b->next, n); return size; } int tty_buffer_request_room(struct tty_port *port, size_t size) { return __tty_buffer_request_room(port, size, true); } EXPORT_SYMBOL_GPL(tty_buffer_request_room); size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars, const u8 *flags, bool mutable_flags, size_t size) { bool need_flags = mutable_flags || flags[0] != TTY_NORMAL; size_t copied = 0; do { size_t goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); size_t space = __tty_buffer_request_room(port, goal, need_flags); struct tty_buffer *tb = port->buf.tail; if (unlikely(space == 0)) break; memcpy(char_buf_ptr(tb, tb->used), chars, space); if (mutable_flags) { memcpy(flag_buf_ptr(tb, tb->used), flags, space); flags += space; } else if (tb->flags) { memset(flag_buf_ptr(tb, tb->used), flags[0], space); } else { /* tb->flags should be available once requested */ WARN_ON_ONCE(need_flags); } tb->used += space; copied += space; chars += space; /* There is a small chance that we need to split the data over * several buffers. If this is the case we must loop. */ } while (unlikely(size > copied)); return copied; } EXPORT_SYMBOL(__tty_insert_flip_string_flags); /** * tty_prepare_flip_string - make room for characters * @port: tty port * @chars: return pointer for character write area * @size: desired size * * Prepare a block of space in the buffer for data. * * This is used for drivers that need their own block copy routines into the * buffer. There is no guarantee the buffer is a DMA target! * * Returns: the length available and buffer pointer (@chars) to the space which * is now allocated and accounted for as ready for normal characters. */ size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size) { size_t space = __tty_buffer_request_room(port, size, false); if (likely(space)) { struct tty_buffer *tb = port->buf.tail; *chars = char_buf_ptr(tb, tb->used); if (tb->flags) memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); tb->used += space; } return space; } EXPORT_SYMBOL_GPL(tty_prepare_flip_string); /** * tty_ldisc_receive_buf - forward data to line discipline * @ld: line discipline to process input * @p: char buffer * @f: %TTY_NORMAL, %TTY_BREAK, etc. flags buffer * @count: number of bytes to process * * Callers other than flush_to_ldisc() need to exclude the kworker from * concurrent use of the line discipline, see paste_selection(). * * Returns: the number of bytes processed. */ size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f, size_t count) { if (ld->ops->receive_buf2) count = ld->ops->receive_buf2(ld->tty, p, f, count); else { count = min_t(size_t, count, ld->tty->receive_room); if (count && ld->ops->receive_buf) ld->ops->receive_buf(ld->tty, p, f, count); } return count; } EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf); static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head) { head->lookahead = max(head->lookahead, head->read); while (head) { struct tty_buffer *next; unsigned int count; /* * Paired w/ release in __tty_buffer_request_room(); * ensures commit value read is not stale if the head * is advancing to the next buffer. */ next = smp_load_acquire(&head->next); /* * Paired w/ release in __tty_buffer_request_room() or in * tty_buffer_flush(); ensures we see the committed buffer data. */ count = smp_load_acquire(&head->commit) - head->lookahead; if (!count) { head = next; continue; } if (port->client_ops->lookahead_buf) { u8 *p, *f = NULL; p = char_buf_ptr(head, head->lookahead); if (head->flags) f = flag_buf_ptr(head, head->lookahead); port->client_ops->lookahead_buf(port, p, f, count); } head->lookahead += count; } } static size_t receive_buf(struct tty_port *port, struct tty_buffer *head, size_t count) { u8 *p = char_buf_ptr(head, head->read); const u8 *f = NULL; size_t n; if (head->flags) f = flag_buf_ptr(head, head->read); n = port->client_ops->receive_buf(port, p, f, count); if (n > 0) memset(p, 0, n); return n; } /** * flush_to_ldisc - flush data from buffer to ldisc * @work: tty structure passed from work queue. * * This routine is called out of the software interrupt to flush data from the * buffer chain to the line discipline. * * The receive_buf() method is single threaded for each tty instance. * * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'. */ static void flush_to_ldisc(struct work_struct *work) { struct tty_port *port = container_of(work, struct tty_port, buf.work); struct tty_bufhead *buf = &port->buf; mutex_lock(&buf->lock); while (1) { struct tty_buffer *head = buf->head; struct tty_buffer *next; size_t count, rcvd; /* Ldisc or user is trying to gain exclusive access */ if (atomic_read(&buf->priority)) break; /* paired w/ release in __tty_buffer_request_room(); * ensures commit value read is not stale if the head * is advancing to the next buffer */ next = smp_load_acquire(&head->next); /* paired w/ release in __tty_buffer_request_room() or in * tty_buffer_flush(); ensures we see the committed buffer data */ count = smp_load_acquire(&head->commit) - head->read; if (!count) { if (next == NULL) break; buf->head = next; tty_buffer_free(port, head); continue; } rcvd = receive_buf(port, head, count); head->read += rcvd; if (rcvd < count) lookahead_bufs(port, head); if (!rcvd) break; if (need_resched()) cond_resched(); } mutex_unlock(&buf->lock); } static inline void tty_flip_buffer_commit(struct tty_buffer *tail) { /* * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees * buffer data. */ smp_store_release(&tail->commit, tail->used); } /** * tty_flip_buffer_push - push terminal buffers * @port: tty port to push * * Queue a push of the terminal flip buffers to the line discipline. Can be * called from IRQ/atomic context. * * In the event of the queue being busy for flipping the work will be held off * and retried later. */ void tty_flip_buffer_push(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; tty_flip_buffer_commit(buf->tail); queue_work(system_unbound_wq, &buf->work); } EXPORT_SYMBOL(tty_flip_buffer_push); /** * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and * push * @port: tty port * @chars: characters * @size: size * * The function combines tty_insert_flip_string() and tty_flip_buffer_push() * with the exception of properly holding the @port->lock. * * To be used only internally (by pty currently). * * Returns: the number added. */ int tty_insert_flip_string_and_push_buffer(struct tty_port *port, const u8 *chars, size_t size) { struct tty_bufhead *buf = &port->buf; unsigned long flags; spin_lock_irqsave(&port->lock, flags); size = tty_insert_flip_string(port, chars, size); if (size) tty_flip_buffer_commit(buf->tail); spin_unlock_irqrestore(&port->lock, flags); queue_work(system_unbound_wq, &buf->work); return size; } /** * tty_buffer_init - prepare a tty buffer structure * @port: tty port to initialise * * Set up the initial state of the buffer management for a tty device. Must be * called before the other tty buffer functions are used. */ void tty_buffer_init(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; mutex_init(&buf->lock); tty_buffer_reset(&buf->sentinel, 0); buf->head = &buf->sentinel; buf->tail = &buf->sentinel; init_llist_head(&buf->free); atomic_set(&buf->mem_used, 0); atomic_set(&buf->priority, 0); INIT_WORK(&buf->work, flush_to_ldisc); buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT; } /** * tty_buffer_set_limit - change the tty buffer memory limit * @port: tty port to change * @limit: memory limit to set * * Change the tty buffer memory limit. * * Must be called before the other tty buffer functions are used. */ int tty_buffer_set_limit(struct tty_port *port, int limit) { if (limit < MIN_TTYB_SIZE) return -EINVAL; port->buf.mem_limit = limit; return 0; } EXPORT_SYMBOL_GPL(tty_buffer_set_limit); /* slave ptys can claim nested buffer lock when handling BRK and INTR */ void tty_buffer_set_lock_subclass(struct tty_port *port) { lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE); } bool tty_buffer_restart_work(struct tty_port *port) { return queue_work(system_unbound_wq, &port->buf.work); } bool tty_buffer_cancel_work(struct tty_port *port) { return cancel_work_sync(&port->buf.work); } void tty_buffer_flush_work(struct tty_port *port) { flush_work(&port->buf.work); } |
24 49 296 4 122 614 29 647 296 18 79 2 499 481 478 471 143 80 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM block #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_BLOCK_H #include <linux/blktrace_api.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/tracepoint.h> #include <uapi/linux/ioprio.h> #define RWBS_LEN 8 #define IOPRIO_CLASS_STRINGS \ { IOPRIO_CLASS_NONE, "none" }, \ { IOPRIO_CLASS_RT, "rt" }, \ { IOPRIO_CLASS_BE, "be" }, \ { IOPRIO_CLASS_IDLE, "idle" }, \ { IOPRIO_CLASS_INVALID, "invalid"} #ifdef CONFIG_BUFFER_HEAD DECLARE_EVENT_CLASS(block_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh), TP_STRUCT__entry ( __field( dev_t, dev ) __field( sector_t, sector ) __field( size_t, size ) ), TP_fast_assign( __entry->dev = bh->b_bdev->bd_dev; __entry->sector = bh->b_blocknr; __entry->size = bh->b_size; ), TP_printk("%d,%d sector=%llu size=%zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->sector, __entry->size ) ); /** * block_touch_buffer - mark a buffer accessed * @bh: buffer_head being touched * * Called from touch_buffer(). */ DEFINE_EVENT(block_buffer, block_touch_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); /** * block_dirty_buffer - mark a buffer dirty * @bh: buffer_head being dirtied * * Called from mark_buffer_dirty(). */ DEFINE_EVENT(block_buffer, block_dirty_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); #endif /* CONFIG_BUFFER_HEAD */ /** * block_rq_requeue - place block IO request back on a queue * @rq: block IO operation request * * The block operation request @rq is being placed back into queue * @q. For some reason the request was not completed and needs to be * put back in the queue. */ TRACE_EVENT(block_rq_requeue, TP_PROTO(struct request *rq), TP_ARGS(rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), 0) ); DECLARE_EVENT_CLASS(block_rq_completion, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( int , error ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_pos(rq); __entry->nr_sector = nr_bytes >> 9; __entry->error = blk_status_to_errno(error); __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error) ); /** * block_rq_complete - block IO operation completed by device driver * @rq: block operations request * @error: status code * @nr_bytes: number of completed bytes * * The block_rq_complete tracepoint event indicates that some portion * of operation request has been completed by the device driver. If * the @rq->bio is %NULL, then there is absolutely no additional work to * do for the request. If @rq->bio is non-NULL then there is * additional work required to complete the request. */ DEFINE_EVENT(block_rq_completion, block_rq_complete, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes) ); /** * block_rq_error - block IO operation error reported by device driver * @rq: block operations request * @error: status code * @nr_bytes: number of completed bytes * * The block_rq_error tracepoint event indicates that some portion * of operation request has failed as reported by the device driver. */ DEFINE_EVENT(block_rq_completion, block_rq_error, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes) ); DECLARE_EVENT_CLASS(block_rq, TP_PROTO(struct request *rq), TP_ARGS(rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned int, bytes ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->bytes = blk_rq_bytes(rq); __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __entry->bytes, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm) ); /** * block_rq_insert - insert block operation request into queue * @rq: block IO operation request * * Called immediately before block operation request @rq is inserted * into queue @q. The fields in the operation request @rq struct can * be examined to determine which device and sectors the pending * operation would access. */ DEFINE_EVENT(block_rq, block_rq_insert, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_rq_issue - issue pending block IO request operation to device driver * @rq: block IO operation request * * Called when block operation request @rq from queue @q is sent to a * device driver for processing. */ DEFINE_EVENT(block_rq, block_rq_issue, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_rq_merge - merge request with another one in the elevator * @rq: block IO operation request * * Called when block operation request @rq from queue @q is merged to another * request queued in the elevator. */ DEFINE_EVENT(block_rq, block_rq_merge, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_io_start - insert a request for execution * @rq: block IO operation request * * Called when block operation request @rq is queued for execution */ DEFINE_EVENT(block_rq, block_io_start, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_io_done - block IO operation request completed * @rq: block IO operation request * * Called when block operation request @rq is completed */ DEFINE_EVENT(block_rq, block_io_done, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_bio_complete - completed all work on the block operation * @q: queue holding the block operation * @bio: block operation completed * * This tracepoint indicates there is no further work to do on this * block IO operation @bio. */ TRACE_EVENT(block_bio_complete, TP_PROTO(struct request_queue *q, struct bio *bio), TP_ARGS(q, bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned, nr_sector ) __field( int, error ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->error = blk_status_to_errno(bio->bi_status); blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->error) ); DECLARE_EVENT_CLASS(block_bio, TP_PROTO(struct bio *bio), TP_ARGS(bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) ); /** * block_bio_bounce - used bounce buffer when processing block operation * @bio: block operation * * A bounce buffer was used to handle the block operation @bio in @q. * This occurs when hardware limitations prevent a direct transfer of * data between the @bio data memory area and the IO device. Use of a * bounce buffer requires extra copying of data and decreases * performance. */ DEFINE_EVENT(block_bio, block_bio_bounce, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_backmerge - merging block operation to the end of an existing operation * @bio: new block operation to merge * * Merging block request @bio to the end of an existing block request. */ DEFINE_EVENT(block_bio, block_bio_backmerge, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_frontmerge - merging block operation to the beginning of an existing operation * @bio: new block operation to merge * * Merging block IO operation @bio to the beginning of an existing block request. */ DEFINE_EVENT(block_bio, block_bio_frontmerge, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_queue - putting new block IO operation in queue * @bio: new block operation * * About to place the block IO operation @bio into queue @q. */ DEFINE_EVENT(block_bio, block_bio_queue, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_getrq - get a free request entry in queue for block IO operations * @bio: pending block IO operation (can be %NULL) * * A request struct has been allocated to handle the block IO operation @bio. */ DEFINE_EVENT(block_bio, block_getrq, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_plug - keep operations requests in request queue * @q: request queue to plug * * Plug the request queue @q. Do not allow block operation requests * to be sent to the device driver. Instead, accumulate requests in * the queue to improve throughput performance of the block device. */ TRACE_EVENT(block_plug, TP_PROTO(struct request_queue *q), TP_ARGS(q), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s]", __entry->comm) ); DECLARE_EVENT_CLASS(block_unplug, TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit), TP_STRUCT__entry( __field( int, nr_rq ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->nr_rq = depth; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ); /** * block_unplug - release of operations requests in request queue * @q: request queue to unplug * @depth: number of requests just added to the queue * @explicit: whether this was an explicit unplug, or one from schedule() * * Unplug request queue @q because device driver is scheduled to work * on elements in the request queue. */ DEFINE_EVENT(block_unplug, block_unplug, TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit) ); /** * block_split - split a single bio struct into two bio structs * @bio: block operation being split * @new_sector: The starting sector for the new bio * * The bio request @bio needs to be split into two bio requests. The newly * created @bio request starts at @new_sector. This split may be required due to * hardware limitations such as operation crossing device boundaries in a RAID * system. */ TRACE_EVENT(block_split, TP_PROTO(struct bio *bio, unsigned int new_sector), TP_ARGS(bio, new_sector), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( sector_t, new_sector ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->new_sector = new_sector; blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu / %llu [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, (unsigned long long)__entry->new_sector, __entry->comm) ); /** * block_bio_remap - map request for a logical device to the raw device * @bio: revised operation * @dev: original device for the operation * @from: original sector for the operation * * An operation for a logical device has been mapped to the * raw block device. */ TRACE_EVENT(block_bio_remap, TP_PROTO(struct bio *bio, dev_t dev, sector_t from), TP_ARGS(bio, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->old_dev = dev; __entry->old_sector = from; blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), (unsigned long long)__entry->old_sector) ); /** * block_rq_remap - map request for a block operation request * @rq: block IO operation request * @dev: device for the operation * @from: original sector for the operation * * The block operation request @rq in @q has been remapped. The block * operation request @rq holds the current information and @from hold * the original sector. */ TRACE_EVENT(block_rq_remap, TP_PROTO(struct request *rq, dev_t dev, sector_t from), TP_ARGS(rq, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) __field( unsigned int, nr_bios ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = disk_devt(rq->q->disk); __entry->sector = blk_rq_pos(rq); __entry->nr_sector = blk_rq_sectors(rq); __entry->old_dev = dev; __entry->old_sector = from; __entry->nr_bios = blk_rq_count_bios(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), (unsigned long long)__entry->old_sector, __entry->nr_bios) ); #endif /* _TRACE_BLOCK_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
178 9 954 2224 2229 115 115 114 114 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 | /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/backing-dev.h * * low-level device information and state which is propagated up through * to high-level code. */ #ifndef _LINUX_BACKING_DEV_H #define _LINUX_BACKING_DEV_H #include <linux/kernel.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/writeback.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) { kref_get(&bdi->refcnt); return bdi; } struct backing_dev_info *bdi_get_by_id(u64 id); void bdi_put(struct backing_dev_info *bdi); __printf(2, 3) int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); __printf(2, 0) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args); void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); struct backing_dev_info *bdi_alloc(int node_id); void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); void wb_wait_for_completion(struct wb_completion *done); extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { return test_bit(WB_has_dirty_io, &wb->state); } static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) { /* * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are * any dirty wbs. See wb_update_write_bandwidth(). */ return atomic_long_read(&bdi->tot_write_bandwidth); } static inline void wb_stat_mod(struct bdi_writeback *wb, enum wb_stat_item item, s64 amount) { percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); } static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { wb_stat_mod(wb, item, 1); } static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { wb_stat_mod(wb, item, -1); } static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_read_positive(&wb->stat[item]); } static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_sum_positive(&wb->stat[item]); } extern void wb_writeout_inc(struct bdi_writeback *wb); /* * maximal error of a stat counter. */ static inline unsigned long wb_stat_error(void) { #ifdef CONFIG_SMP return nr_cpu_ids * WB_STAT_BATCH; #else return 1; #endif } /* BDI ratio is expressed as part per 1000000 for finer granularity. */ #define BDI_RATIO_SCALE 10000 u64 bdi_get_min_bytes(struct backing_dev_info *bdi); u64 bdi_get_max_bytes(struct backing_dev_info *bdi); int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio); int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio); int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes); int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes); int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit); /* * Flags in backing_dev_info::capability * * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages * should contribute to accounting * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold */ #define BDI_CAP_WRITEBACK (1 << 0) #define BDI_CAP_WRITEBACK_ACCT (1 << 1) #define BDI_CAP_STRICTLIMIT (1 << 2) extern struct backing_dev_info noop_backing_dev_info; int bdi_init(struct backing_dev_info *bdi); /** * writeback_in_progress - determine whether there is writeback in progress * @wb: bdi_writeback of interest * * Determine whether there is writeback waiting to be handled against a * bdi_writeback. */ static inline bool writeback_in_progress(struct bdi_writeback *wb) { return test_bit(WB_writeback_running, &wb->state); } struct backing_dev_info *inode_to_bdi(struct inode *inode); static inline bool mapping_can_writeback(struct address_space *mapping) { return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; } #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp); void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct cgroup_subsys_state *css); /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest * * Cgroup writeback requires support from the filesystem. Also, both memcg and * iocg have to be on the default hierarchy. Test whether all conditions are * met. * * Note that the test result may change dynamically on the same inode * depending on how memcg and iocg are configured. */ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); return cgroup_subsys_on_dfl(memory_cgrp_subsys) && cgroup_subsys_on_dfl(io_cgrp_subsys) && (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } /** * wb_find_current - find wb for %current on a bdi * @bdi: bdi of interest * * Find the wb of @bdi which matches both the memcg and blkcg of %current. * Must be called under rcu_read_lock() which protects the returend wb. * NULL if not found. */ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { struct cgroup_subsys_state *memcg_css; struct bdi_writeback *wb; memcg_css = task_css(current, memory_cgrp_id); if (!memcg_css->parent) return &bdi->wb; wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); /* * %current's blkcg equals the effective blkcg of its memcg. No * need to use the relatively expensive cgroup_get_e_css(). */ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) return wb; return NULL; } /** * wb_get_create_current - get or create wb for %current on a bdi * @bdi: bdi of interest * @gfp: allocation mask * * Equivalent to wb_get_create() on %current's memcg. This function is * called from a relatively hot path and optimizes the common cases using * wb_find_current(). */ static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { struct bdi_writeback *wb; rcu_read_lock(); wb = wb_find_current(bdi); if (wb && unlikely(!wb_tryget(wb))) wb = NULL; rcu_read_unlock(); if (unlikely(!wb)) { struct cgroup_subsys_state *memcg_css; memcg_css = task_get_css(current, memory_cgrp_id); wb = wb_get_create(bdi, memcg_css, gfp); css_put(memcg_css); } return wb; } /** * inode_to_wb - determine the wb of an inode * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be * holding either @inode->i_lock, the i_pages lock, or the * associated wb's list_lock. */ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && (inode->i_sb->s_iflags & SB_I_CGROUPWB) && (!lockdep_is_held(&inode->i_lock) && !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb; } static inline struct bdi_writeback *inode_to_wb_wbc( struct inode *inode, struct writeback_control *wbc) { /* * If wbc does not have inode attached, it means cgroup writeback was * disabled when wbc started. Just use the default wb in that case. */ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; } /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't * holding inode->i_lock, the i_pages lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and * can't sleep during the transaction. IRQs may or may not be disabled on * return. */ static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { rcu_read_lock(); /* * Paired with store_release in inode_switch_wbs_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; if (unlikely(cookie->locked)) xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); /* * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages * lock. inode_to_wb() will bark. Deref directly. */ return inode->i_wb; } /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode * @cookie: @cookie from unlocked_inode_to_wb_begin() */ static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { if (unlikely(cookie->locked)) xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); rcu_read_unlock(); } #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) { return false; } static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { return &bdi->wb; } static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { return &bdi->wb; } static inline struct bdi_writeback *inode_to_wb(struct inode *inode) { return &inode_to_bdi(inode)->wb; } static inline struct bdi_writeback *inode_to_wb_wbc( struct inode *inode, struct writeback_control *wbc) { return inode_to_wb(inode); } static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { return inode_to_wb(inode); } static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { } static inline void wb_memcg_offline(struct mem_cgroup *memcg) { } static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } #endif /* CONFIG_CGROUP_WRITEBACK */ const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ |
5 4 4 117 4 4 88 88 88 88 88 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006, Thomas Gleixner, Russell King * * This file contains the interrupt descriptor management code. Detailed * information is available in Documentation/core-api/genericirq.rst * */ #include <linux/irq.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/maple_tree.h> #include <linux/irqdomain.h> #include <linux/sysfs.h> #include <linux/string_choices.h> #include "internals.h" /* * lockdep: we want to handle all irq_desc locks as a single lock-class: */ static struct lock_class_key irq_desc_lock_class; #if defined(CONFIG_SMP) static int __init irq_affinity_setup(char *str) { alloc_bootmem_cpumask_var(&irq_default_affinity); cpulist_parse(str, irq_default_affinity); /* * Set at least the boot cpu. We don't want to end up with * bugreports caused by random commandline masks */ cpumask_set_cpu(smp_processor_id(), irq_default_affinity); return 1; } __setup("irqaffinity=", irq_affinity_setup); static void __init init_irq_default_affinity(void) { if (!cpumask_available(irq_default_affinity)) zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); if (cpumask_empty(irq_default_affinity)) cpumask_setall(irq_default_affinity); } #else static void __init init_irq_default_affinity(void) { } #endif #ifdef CONFIG_SMP static int alloc_masks(struct irq_desc *desc, int node) { if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, GFP_KERNEL, node)) return -ENOMEM; #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, GFP_KERNEL, node)) { free_cpumask_var(desc->irq_common_data.affinity); return -ENOMEM; } #endif #ifdef CONFIG_GENERIC_PENDING_IRQ if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK free_cpumask_var(desc->irq_common_data.effective_affinity); #endif free_cpumask_var(desc->irq_common_data.affinity); return -ENOMEM; } #endif return 0; } static void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { if (!affinity) affinity = irq_default_affinity; cpumask_copy(desc->irq_common_data.affinity, affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif #ifdef CONFIG_NUMA desc->irq_common_data.node = node; #endif } static void free_masks(struct irq_desc *desc) { #ifdef CONFIG_GENERIC_PENDING_IRQ free_cpumask_var(desc->pending_mask); #endif free_cpumask_var(desc->irq_common_data.affinity); #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK free_cpumask_var(desc->irq_common_data.effective_affinity); #endif } #else static inline int alloc_masks(struct irq_desc *desc, int node) { return 0; } static inline void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } static inline void free_masks(struct irq_desc *desc) { } #endif static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, const struct cpumask *affinity, struct module *owner) { int cpu; desc->irq_common_data.handler_data = NULL; desc->irq_common_data.msi_desc = NULL; desc->irq_data.common = &desc->irq_common_data; desc->irq_data.irq = irq; desc->irq_data.chip = &no_irq_chip; desc->irq_data.chip_data = NULL; irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); desc->handle_irq = handle_bad_irq; desc->depth = 1; desc->irq_count = 0; desc->irqs_unhandled = 0; desc->tot_count = 0; desc->name = NULL; desc->owner = owner; for_each_possible_cpu(cpu) *per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { }; desc_smp_init(desc, node, affinity); } static unsigned int nr_irqs = NR_IRQS; /** * irq_get_nr_irqs() - Number of interrupts supported by the system. */ unsigned int irq_get_nr_irqs(void) { return nr_irqs; } EXPORT_SYMBOL_GPL(irq_get_nr_irqs); /** * irq_set_nr_irqs() - Set the number of interrupts supported by the system. * @nr: New number of interrupts. * * Return: @nr. */ unsigned int irq_set_nr_irqs(unsigned int nr) { nr_irqs = nr; return nr; } EXPORT_SYMBOL_GPL(irq_set_nr_irqs); static DEFINE_MUTEX(sparse_irq_lock); static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | MT_FLAGS_USE_RCU, sparse_irq_lock); static int irq_find_free_area(unsigned int from, unsigned int cnt) { MA_STATE(mas, &sparse_irqs, 0, 0); if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt)) return -ENOSPC; return mas.index; } static unsigned int irq_find_at_or_after(unsigned int offset) { unsigned long index = offset; struct irq_desc *desc; guard(rcu)(); desc = mt_find(&sparse_irqs, &index, nr_irqs); return desc ? irq_desc_get_irq(desc) : nr_irqs; } static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) { MA_STATE(mas, &sparse_irqs, irq, irq); WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0); } static void delete_irq_desc(unsigned int irq) { MA_STATE(mas, &sparse_irqs, irq, irq); mas_erase(&mas); } #ifdef CONFIG_SPARSE_IRQ static const struct kobj_type irq_kobj_type; #endif static int init_desc(struct irq_desc *desc, int irq, int node, unsigned int flags, const struct cpumask *affinity, struct module *owner) { desc->kstat_irqs = alloc_percpu(struct irqstat); if (!desc->kstat_irqs) return -ENOMEM; if (alloc_masks(desc, node)) { free_percpu(desc->kstat_irqs); return -ENOMEM; } raw_spin_lock_init(&desc->lock); lockdep_set_class(&desc->lock, &irq_desc_lock_class); mutex_init(&desc->request_mutex); init_waitqueue_head(&desc->wait_for_threads); desc_set_defaults(irq, desc, node, affinity, owner); irqd_set(&desc->irq_data, flags); irq_resend_init(desc); #ifdef CONFIG_SPARSE_IRQ kobject_init(&desc->kobj, &irq_kobj_type); init_rcu_head(&desc->rcu); #endif return 0; } #ifdef CONFIG_SPARSE_IRQ static void irq_kobj_release(struct kobject *kobj); #ifdef CONFIG_SYSFS static struct kobject *irq_kobj_base; #define IRQ_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; char *p = ""; int cpu; for_each_possible_cpu(cpu) { unsigned int c = irq_desc_kstat_cpu(desc, cpu); ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); p = ","; } ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); return ret; } IRQ_ATTR_RO(per_cpu_count); static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); if (desc->irq_data.chip && desc->irq_data.chip->name) { ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->irq_data.chip->name); } raw_spin_unlock_irq(&desc->lock); return ret; } IRQ_ATTR_RO(chip_name); static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); if (desc->irq_data.domain) ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); raw_spin_unlock_irq(&desc->lock); return ret; } IRQ_ATTR_RO(hwirq); static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); ret = sprintf(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); raw_spin_unlock_irq(&desc->lock); return ret; } IRQ_ATTR_RO(type); static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); ret = sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); raw_spin_unlock_irq(&desc->lock); return ret; } IRQ_ATTR_RO(wakeup); static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); if (desc->name) ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); raw_spin_unlock_irq(&desc->lock); return ret; } IRQ_ATTR_RO(name); static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); struct irqaction *action; ssize_t ret = 0; char *p = ""; raw_spin_lock_irq(&desc->lock); for_each_action_of_desc(desc, action) { ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", p, action->name); p = ","; } raw_spin_unlock_irq(&desc->lock); if (ret) ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); return ret; } IRQ_ATTR_RO(actions); static struct attribute *irq_attrs[] = { &per_cpu_count_attr.attr, &chip_name_attr.attr, &hwirq_attr.attr, &type_attr.attr, &wakeup_attr.attr, &name_attr.attr, &actions_attr.attr, NULL }; ATTRIBUTE_GROUPS(irq); static const struct kobj_type irq_kobj_type = { .release = irq_kobj_release, .sysfs_ops = &kobj_sysfs_ops, .default_groups = irq_groups, }; static void irq_sysfs_add(int irq, struct irq_desc *desc) { if (irq_kobj_base) { /* * Continue even in case of failure as this is nothing * crucial and failures in the late irq_sysfs_init() * cannot be rolled back. */ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) pr_warn("Failed to add kobject for irq %d\n", irq); else desc->istate |= IRQS_SYSFS; } } static void irq_sysfs_del(struct irq_desc *desc) { /* * Only invoke kobject_del() when kobject_add() was successfully * invoked for the descriptor. This covers both early boot, where * sysfs is not initialized yet, and the case of a failed * kobject_add() invocation. */ if (desc->istate & IRQS_SYSFS) kobject_del(&desc->kobj); } static int __init irq_sysfs_init(void) { struct irq_desc *desc; int irq; /* Prevent concurrent irq alloc/free */ irq_lock_sparse(); irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); if (!irq_kobj_base) { irq_unlock_sparse(); return -ENOMEM; } /* Add the already allocated interrupts */ for_each_irq_desc(irq, desc) irq_sysfs_add(irq, desc); irq_unlock_sparse(); return 0; } postcore_initcall(irq_sysfs_init); #else /* !CONFIG_SYSFS */ static const struct kobj_type irq_kobj_type = { .release = irq_kobj_release, }; static void irq_sysfs_add(int irq, struct irq_desc *desc) {} static void irq_sysfs_del(struct irq_desc *desc) {} #endif /* CONFIG_SYSFS */ struct irq_desc *irq_to_desc(unsigned int irq) { return mtree_load(&sparse_irqs, irq); } #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE EXPORT_SYMBOL_GPL(irq_to_desc); #endif void irq_lock_sparse(void) { mutex_lock(&sparse_irq_lock); } void irq_unlock_sparse(void) { mutex_unlock(&sparse_irq_lock); } static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, const struct cpumask *affinity, struct module *owner) { struct irq_desc *desc; int ret; desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); if (!desc) return NULL; ret = init_desc(desc, irq, node, flags, affinity, owner); if (unlikely(ret)) { kfree(desc); return NULL; } return desc; } static void irq_kobj_release(struct kobject *kobj) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); free_masks(desc); free_percpu(desc->kstat_irqs); kfree(desc); } static void delayed_free_desc(struct rcu_head *rhp) { struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); kobject_put(&desc->kobj); } static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); irq_remove_debugfs_entry(desc); unregister_irq_proc(irq, desc); /* * sparse_irq_lock protects also show_interrupts() and * kstat_irq_usr(). Once we deleted the descriptor from the * sparse tree we can free it. Access in proc will fail to * lookup the descriptor. * * The sysfs entry must be serialized against a concurrent * irq_sysfs_init() as well. */ irq_sysfs_del(desc); delete_irq_desc(irq); /* * We free the descriptor, masks and stat fields via RCU. That * allows demultiplex interrupts to do rcu based management of * the child interrupts. * This also allows us to use rcu in kstat_irqs_usr(). */ call_rcu(&desc->rcu, delayed_free_desc); } static int alloc_descs(unsigned int start, unsigned int cnt, int node, const struct irq_affinity_desc *affinity, struct module *owner) { struct irq_desc *desc; int i; /* Validate affinity mask(s) */ if (affinity) { for (i = 0; i < cnt; i++) { if (cpumask_empty(&affinity[i].mask)) return -EINVAL; } } for (i = 0; i < cnt; i++) { const struct cpumask *mask = NULL; unsigned int flags = 0; if (affinity) { if (affinity->is_managed) { flags = IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN; } flags |= IRQD_AFFINITY_SET; mask = &affinity->mask; node = cpu_to_node(cpumask_first(mask)); affinity++; } desc = alloc_desc(start + i, node, flags, mask, owner); if (!desc) goto err; irq_insert_desc(start + i, desc); irq_sysfs_add(start + i, desc); irq_add_debugfs_entry(start + i, desc); } return start; err: for (i--; i >= 0; i--) free_desc(start + i); return -ENOMEM; } static int irq_expand_nr_irqs(unsigned int nr) { if (nr > MAX_SPARSE_IRQS) return -ENOMEM; nr_irqs = nr; return 0; } int __init early_irq_init(void) { int i, initcnt, node = first_online_node; struct irq_desc *desc; init_irq_default_affinity(); /* Let arch update nr_irqs and return the nr of preallocated irqs */ initcnt = arch_probe_nr_irqs(); printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", NR_IRQS, nr_irqs, initcnt); if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS)) nr_irqs = MAX_SPARSE_IRQS; if (WARN_ON(initcnt > MAX_SPARSE_IRQS)) initcnt = MAX_SPARSE_IRQS; if (initcnt > nr_irqs) nr_irqs = initcnt; for (i = 0; i < initcnt; i++) { desc = alloc_desc(i, node, 0, NULL, NULL); irq_insert_desc(i, desc); } return arch_early_irq_init(); } #else /* !CONFIG_SPARSE_IRQ */ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { [0 ... NR_IRQS-1] = { .handle_irq = handle_bad_irq, .depth = 1, .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), } }; int __init early_irq_init(void) { int count, i, node = first_online_node; int ret; init_irq_default_affinity(); printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); count = ARRAY_SIZE(irq_desc); for (i = 0; i < count; i++) { ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL); if (unlikely(ret)) goto __free_desc_res; } return arch_early_irq_init(); __free_desc_res: while (--i >= 0) { free_masks(irq_desc + i); free_percpu(irq_desc[i].kstat_irqs); } return ret; } struct irq_desc *irq_to_desc(unsigned int irq) { return (irq < NR_IRQS) ? irq_desc + irq : NULL; } EXPORT_SYMBOL(irq_to_desc); static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; raw_spin_lock_irqsave(&desc->lock, flags); desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); raw_spin_unlock_irqrestore(&desc->lock, flags); delete_irq_desc(irq); } static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, const struct irq_affinity_desc *affinity, struct module *owner) { u32 i; for (i = 0; i < cnt; i++) { struct irq_desc *desc = irq_to_desc(start + i); desc->owner = owner; irq_insert_desc(start + i, desc); } return start; } static int irq_expand_nr_irqs(unsigned int nr) { return -ENOMEM; } void irq_mark_irq(unsigned int irq) { mutex_lock(&sparse_irq_lock); irq_insert_desc(irq, irq_desc + irq); mutex_unlock(&sparse_irq_lock); } #ifdef CONFIG_GENERIC_IRQ_LEGACY void irq_init_desc(unsigned int irq) { free_desc(irq); } #endif #endif /* !CONFIG_SPARSE_IRQ */ int handle_irq_desc(struct irq_desc *desc) { struct irq_data *data; if (!desc) return -EINVAL; data = irq_desc_get_irq_data(desc); if (WARN_ON_ONCE(!in_hardirq() && irqd_is_handle_enforce_irqctx(data))) return -EPERM; generic_handle_irq_desc(desc); return 0; } /** * generic_handle_irq - Invoke the handler for a particular irq * @irq: The irq number to handle * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an IRQ context with irq regs * initialized. */ int generic_handle_irq(unsigned int irq) { return handle_irq_desc(irq_to_desc(irq)); } EXPORT_SYMBOL_GPL(generic_handle_irq); /** * generic_handle_irq_safe - Invoke the handler for a particular irq from any * context. * @irq: The irq number to handle * * Returns: 0 on success, a negative value on error. * * This function can be called from any context (IRQ or process context). It * will report an error if not invoked from IRQ context and the irq has been * marked to enforce IRQ-context only. */ int generic_handle_irq_safe(unsigned int irq) { unsigned long flags; int ret; local_irq_save(flags); ret = handle_irq_desc(irq_to_desc(irq)); local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(generic_handle_irq_safe); #ifdef CONFIG_IRQ_DOMAIN /** * generic_handle_domain_irq - Invoke the handler for a HW irq belonging * to a domain. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an IRQ context with irq regs * initialized. */ int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) { return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); } EXPORT_SYMBOL_GPL(generic_handle_domain_irq); /** * generic_handle_irq_safe - Invoke the handler for a HW irq belonging * to a domain from any context. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, a negative value on error. * * This function can be called from any context (IRQ or process * context). If the interrupt is marked as 'enforce IRQ-context only' then * the function must be invoked from hard interrupt context. */ int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) { unsigned long flags; int ret; local_irq_save(flags); ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq)); local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); /** * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging * to a domain. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an NMI context with irq regs * initialized. **/ int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq) { WARN_ON_ONCE(!in_nmi()); return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); } #endif /* Dynamic interrupt handling */ /** * irq_free_descs - free irq descriptors * @from: Start of descriptor range * @cnt: Number of consecutive irqs to free */ void irq_free_descs(unsigned int from, unsigned int cnt) { int i; if (from >= nr_irqs || (from + cnt) > nr_irqs) return; mutex_lock(&sparse_irq_lock); for (i = 0; i < cnt; i++) free_desc(from + i); mutex_unlock(&sparse_irq_lock); } EXPORT_SYMBOL_GPL(irq_free_descs); /** * __irq_alloc_descs - allocate and initialize a range of irq descriptors * @irq: Allocate for specific irq number if irq >= 0 * @from: Start the search from this irq number * @cnt: Number of consecutive irqs to allocate. * @node: Preferred node on which the irq descriptor should be allocated * @owner: Owning module (can be NULL) * @affinity: Optional pointer to an affinity mask array of size @cnt which * hints where the irq descriptors should be allocated and which * default affinities to use * * Returns the first irq number or error code */ int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, const struct irq_affinity_desc *affinity) { int start, ret; if (!cnt) return -EINVAL; if (irq >= 0) { if (from > irq) return -EINVAL; from = irq; } else { /* * For interrupts which are freely allocated the * architecture can force a lower bound to the @from * argument. x86 uses this to exclude the GSI space. */ from = arch_dynirq_lower_bound(from); } mutex_lock(&sparse_irq_lock); start = irq_find_free_area(from, cnt); ret = -EEXIST; if (irq >=0 && start != irq) goto unlock; if (start + cnt > nr_irqs) { ret = irq_expand_nr_irqs(start + cnt); if (ret) goto unlock; } ret = alloc_descs(start, cnt, node, affinity, owner); unlock: mutex_unlock(&sparse_irq_lock); return ret; } EXPORT_SYMBOL_GPL(__irq_alloc_descs); /** * irq_get_next_irq - get next allocated irq number * @offset: where to start the search * * Returns next irq number after offset or nr_irqs if none is found. */ unsigned int irq_get_next_irq(unsigned int offset) { return irq_find_at_or_after(offset); } struct irq_desc * __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, unsigned int check) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { if (check & _IRQ_DESC_CHECK) { if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) return NULL; if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) return NULL; } if (bus) chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, *flags); } return desc; } void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) __releases(&desc->lock) { raw_spin_unlock_irqrestore(&desc->lock, flags); if (bus) chip_bus_sync_unlock(desc); } int irq_set_percpu_devid_partition(unsigned int irq, const struct cpumask *affinity) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || desc->percpu_enabled) return -EINVAL; desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); if (!desc->percpu_enabled) return -ENOMEM; desc->percpu_affinity = affinity ? : cpu_possible_mask; irq_set_percpu_devid_flags(irq); return 0; } int irq_set_percpu_devid(unsigned int irq) { return irq_set_percpu_devid_partition(irq, NULL); } int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !desc->percpu_enabled) return -EINVAL; if (affinity) cpumask_copy(affinity, desc->percpu_affinity); return 0; } EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); void kstat_incr_irq_this_cpu(unsigned int irq) { kstat_incr_irqs_this_cpu(irq_to_desc(irq)); } /** * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu * @irq: The interrupt number * @cpu: The cpu number * * Returns the sum of interrupt counts on @cpu since boot for * @irq. The caller must ensure that the interrupt is not removed * concurrently. */ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { struct irq_desc *desc = irq_to_desc(irq); return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; } static unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask) { unsigned int sum = 0; int cpu; if (!irq_settings_is_per_cpu_devid(desc) && !irq_settings_is_per_cpu(desc) && !irq_is_nmi(desc)) return data_race(desc->tot_count); for_each_cpu(cpu, cpumask) sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu)); return sum; } static unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !desc->kstat_irqs) return 0; return kstat_irqs_desc(desc, cpu_possible_mask); } #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT void kstat_snapshot_irqs(void) { struct irq_desc *desc; unsigned int irq; for_each_irq_desc(irq, desc) { if (!desc->kstat_irqs) continue; this_cpu_write(desc->kstat_irqs->ref, this_cpu_read(desc->kstat_irqs->cnt)); } } unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !desc->kstat_irqs) return 0; return this_cpu_read(desc->kstat_irqs->cnt) - this_cpu_read(desc->kstat_irqs->ref); } #endif /** * kstat_irqs_usr - Get the statistics for an interrupt from thread context * @irq: The interrupt number * * Returns the sum of interrupt counts on all cpus since boot for @irq. * * It uses rcu to protect the access since a concurrent removal of an * interrupt descriptor is observing an rcu grace period before * delayed_free_desc()/irq_kobj_release(). */ unsigned int kstat_irqs_usr(unsigned int irq) { unsigned int sum; rcu_read_lock(); sum = kstat_irqs(irq); rcu_read_unlock(); return sum; } #ifdef CONFIG_LOCKDEP void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, struct lock_class_key *request_class) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { lockdep_set_class(&desc->lock, lock_class); lockdep_set_class(&desc->request_mutex, request_class); } } EXPORT_SYMBOL_GPL(__irq_set_lockdep_class); #endif |
9 9 11 11 71 71 13 13 1 1 5 5 9 3 9 8 4 2 2 2 2 5 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 | // SPDX-License-Identifier: GPL-2.0+ /* * Support for dynamic clock devices * * Copyright (C) 2010 OMICRON electronics GmbH */ #include <linux/device.h> #include <linux/export.h> #include <linux/file.h> #include <linux/posix-clock.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include "posix-timers.h" /* * Returns NULL if the posix_clock instance attached to 'fp' is old and stale. */ static struct posix_clock *get_posix_clock(struct file *fp) { struct posix_clock_context *pccontext = fp->private_data; struct posix_clock *clk = pccontext->clk; down_read(&clk->rwsem); if (!clk->zombie) return clk; up_read(&clk->rwsem); return NULL; } static void put_posix_clock(struct posix_clock *clk) { up_read(&clk->rwsem); } static ssize_t posix_clock_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) { struct posix_clock_context *pccontext = fp->private_data; struct posix_clock *clk = get_posix_clock(fp); int err = -EINVAL; if (!clk) return -ENODEV; if (clk->ops.read) err = clk->ops.read(pccontext, fp->f_flags, buf, count); put_posix_clock(clk); return err; } static __poll_t posix_clock_poll(struct file *fp, poll_table *wait) { struct posix_clock_context *pccontext = fp->private_data; struct posix_clock *clk = get_posix_clock(fp); __poll_t result = 0; if (!clk) return EPOLLERR; if (clk->ops.poll) result = clk->ops.poll(pccontext, fp, wait); put_posix_clock(clk); return result; } static long posix_clock_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { struct posix_clock_context *pccontext = fp->private_data; struct posix_clock *clk = get_posix_clock(fp); int err = -ENOTTY; if (!clk) return -ENODEV; if (clk->ops.ioctl) err = clk->ops.ioctl(pccontext, cmd, arg); put_posix_clock(clk); return err; } static int posix_clock_open(struct inode *inode, struct file *fp) { int err; struct posix_clock *clk = container_of(inode->i_cdev, struct posix_clock, cdev); struct posix_clock_context *pccontext; down_read(&clk->rwsem); if (clk->zombie) { err = -ENODEV; goto out; } pccontext = kzalloc(sizeof(*pccontext), GFP_KERNEL); if (!pccontext) { err = -ENOMEM; goto out; } pccontext->clk = clk; pccontext->fp = fp; if (clk->ops.open) { err = clk->ops.open(pccontext, fp->f_mode); if (err) { kfree(pccontext); goto out; } } fp->private_data = pccontext; get_device(clk->dev); err = 0; out: up_read(&clk->rwsem); return err; } static int posix_clock_release(struct inode *inode, struct file *fp) { struct posix_clock_context *pccontext = fp->private_data; struct posix_clock *clk; int err = 0; if (!pccontext) return -ENODEV; clk = pccontext->clk; if (clk->ops.release) err = clk->ops.release(pccontext); put_device(clk->dev); kfree(pccontext); fp->private_data = NULL; return err; } static const struct file_operations posix_clock_file_operations = { .owner = THIS_MODULE, .read = posix_clock_read, .poll = posix_clock_poll, .unlocked_ioctl = posix_clock_ioctl, .compat_ioctl = posix_clock_ioctl, .open = posix_clock_open, .release = posix_clock_release, }; int posix_clock_register(struct posix_clock *clk, struct device *dev) { int err; init_rwsem(&clk->rwsem); cdev_init(&clk->cdev, &posix_clock_file_operations); err = cdev_device_add(&clk->cdev, dev); if (err) { pr_err("%s unable to add device %d:%d\n", dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt)); return err; } clk->cdev.owner = clk->ops.owner; clk->dev = dev; return 0; } EXPORT_SYMBOL_GPL(posix_clock_register); void posix_clock_unregister(struct posix_clock *clk) { cdev_device_del(&clk->cdev, clk->dev); down_write(&clk->rwsem); clk->zombie = true; up_write(&clk->rwsem); put_device(clk->dev); } EXPORT_SYMBOL_GPL(posix_clock_unregister); struct posix_clock_desc { struct file *fp; struct posix_clock *clk; }; static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd) { struct file *fp = fget(clockid_to_fd(id)); int err = -EINVAL; if (!fp) return err; if (fp->f_op->open != posix_clock_open || !fp->private_data) goto out; cd->fp = fp; cd->clk = get_posix_clock(fp); err = cd->clk ? 0 : -ENODEV; out: if (err) fput(fp); return err; } static void put_clock_desc(struct posix_clock_desc *cd) { put_posix_clock(cd->clk); fput(cd->fp); } static int pc_clock_adjtime(clockid_t id, struct __kernel_timex *tx) { struct posix_clock_desc cd; int err; err = get_clock_desc(id, &cd); if (err) return err; if (tx->modes && (cd.fp->f_mode & FMODE_WRITE) == 0) { err = -EACCES; goto out; } if (cd.clk->ops.clock_adjtime) err = cd.clk->ops.clock_adjtime(cd.clk, tx); else err = -EOPNOTSUPP; out: put_clock_desc(&cd); return err; } static int pc_clock_gettime(clockid_t id, struct timespec64 *ts) { struct posix_clock_desc cd; int err; err = get_clock_desc(id, &cd); if (err) return err; if (cd.clk->ops.clock_gettime) err = cd.clk->ops.clock_gettime(cd.clk, ts); else err = -EOPNOTSUPP; put_clock_desc(&cd); return err; } static int pc_clock_getres(clockid_t id, struct timespec64 *ts) { struct posix_clock_desc cd; int err; err = get_clock_desc(id, &cd); if (err) return err; if (cd.clk->ops.clock_getres) err = cd.clk->ops.clock_getres(cd.clk, ts); else err = -EOPNOTSUPP; put_clock_desc(&cd); return err; } static int pc_clock_settime(clockid_t id, const struct timespec64 *ts) { struct posix_clock_desc cd; int err; if (!timespec64_valid_strict(ts)) return -EINVAL; err = get_clock_desc(id, &cd); if (err) return err; if ((cd.fp->f_mode & FMODE_WRITE) == 0) { err = -EACCES; goto out; } if (cd.clk->ops.clock_settime) err = cd.clk->ops.clock_settime(cd.clk, ts); else err = -EOPNOTSUPP; out: put_clock_desc(&cd); return err; } const struct k_clock clock_posix_dynamic = { .clock_getres = pc_clock_getres, .clock_set = pc_clock_settime, .clock_get_timespec = pc_clock_gettime, .clock_adj = pc_clock_adjtime, }; |
16 133 107 7 7 5 5 10 4 165 144 41 43 20 25 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_UDP_TUNNEL_H #define __NET_UDP_TUNNEL_H #include <net/ip_tunnels.h> #include <net/udp.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/ipv6_stubs.h> #endif struct udp_port_cfg { u8 family; /* Used only for kernel-created sockets */ union { struct in_addr local_ip; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr local_ip6; #endif }; union { struct in_addr peer_ip; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr peer_ip6; #endif }; __be16 local_udp_port; __be16 peer_udp_port; int bind_ifindex; unsigned int use_udp_checksums:1, use_udp6_tx_checksums:1, use_udp6_rx_checksums:1, ipv6_v6only:1; }; int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp); #if IS_ENABLED(CONFIG_IPV6) int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp); #else static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { return 0; } #endif static inline int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { if (cfg->family == AF_INET) return udp_sock_create4(net, cfg, sockp); if (cfg->family == AF_INET6) return udp_sock_create6(net, cfg, sockp); return -EPFNOSUPPORT; } typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk, struct sk_buff *skb); typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, struct list_head *head, struct sk_buff *skb); typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, int nhoff); struct udp_tunnel_sock_cfg { void *sk_user_data; /* user data used by encap_rcv call back */ /* Used for setting up udp_sock fields, see udp.h for details */ __u8 encap_type; udp_tunnel_encap_rcv_t encap_rcv; udp_tunnel_encap_err_lookup_t encap_err_lookup; udp_tunnel_encap_err_rcv_t encap_err_rcv; udp_tunnel_encap_destroy_t encap_destroy; udp_tunnel_gro_receive_t gro_receive; udp_tunnel_gro_complete_t gro_complete; }; /* Setup the given (UDP) sock to receive UDP encapsulated packets */ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, struct udp_tunnel_sock_cfg *sock_cfg); /* -- List of parsable UDP tunnel types -- * * Adding to this list will result in serious debate. The main issue is * that this list is essentially a list of workarounds for either poorly * designed tunnels, or poorly designed device offloads. * * The parsing supported via these types should really be used for Rx * traffic only as the network stack will have already inserted offsets for * the location of the headers in the skb. In addition any ports that are * pushed should be kept within the namespace without leaking to other * devices such as VFs or other ports on the same device. * * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the * need to use this for Rx checksum offload. It should not be necessary to * call this function to perform Tx offloads on outgoing traffic. */ enum udp_parsable_tunnel_type { UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */ UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */ UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */ }; struct udp_tunnel_info { unsigned short type; sa_family_t sa_family; __be16 port; u8 hw_priv; }; /* Notify network devices of offloadable types */ void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, unsigned short type); void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, unsigned short type); void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type); void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type); static inline void udp_tunnel_get_rx_info(struct net_device *dev) { ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev); } static inline void udp_tunnel_drop_rx_info(struct net_device *dev) { ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev); } /* Transmit the skb using UDP encapsulation. */ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, bool xnet, bool nocheck); int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr, __u8 prio, __u8 ttl, __be32 label, __be16 src_port, __be16 dst_port, bool nocheck); void udp_tunnel_sock_release(struct socket *sock); struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, struct net_device *dev, struct net *net, int oif, __be32 *saddr, const struct ip_tunnel_key *key, __be16 sport, __be16 dport, u8 tos, struct dst_cache *dst_cache); struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb, struct net_device *dev, struct net *net, struct socket *sock, int oif, struct in6_addr *saddr, const struct ip_tunnel_key *key, __be16 sport, __be16 dport, u8 dsfield, struct dst_cache *dst_cache); struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, const unsigned long *flags, __be64 tunnel_id, int md_size); #ifdef CONFIG_INET static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) { int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; return iptunnel_handle_offloads(skb, type); } #endif static inline void udp_tunnel_encap_enable(struct sock *sk) { if (udp_test_and_set_bit(ENCAP_ENABLED, sk)) return; #if IS_ENABLED(CONFIG_IPV6) if (READ_ONCE(sk->sk_family) == PF_INET6) ipv6_stub->udpv6_encap_enable(); #endif udp_encap_enable(); } #define UDP_TUNNEL_NIC_MAX_TABLES 4 enum udp_tunnel_nic_info_flags { /* Device callbacks may sleep */ UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0), /* Device only supports offloads when it's open, all ports * will be removed before close and re-added after open. */ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1), /* Device supports only IPv4 tunnels */ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2), /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN. * This port must not be counted towards n_entries of any table. * Driver will not receive any callback associated with port 4789. */ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3), }; struct udp_tunnel_nic; #define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2) struct udp_tunnel_nic_shared { struct udp_tunnel_nic *udp_tunnel_nic_info; struct list_head devices; }; struct udp_tunnel_nic_shared_node { struct net_device *dev; struct list_head list; }; /** * struct udp_tunnel_nic_info - driver UDP tunnel offload information * @set_port: callback for adding a new port * @unset_port: callback for removing a port * @sync_table: callback for syncing the entire port table at once * @shared: reference to device global state (optional) * @flags: device flags from enum udp_tunnel_nic_info_flags * @tables: UDP port tables this device has * @tables.n_entries: number of entries in this table * @tables.tunnel_types: types of tunnels this table accepts * * Drivers are expected to provide either @set_port and @unset_port callbacks * or the @sync_table callback. Callbacks are invoked with rtnl lock held. * * Devices which (misguidedly) share the UDP tunnel port table across multiple * netdevs should allocate an instance of struct udp_tunnel_nic_shared and * point @shared at it. * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices * sharing a table. * * Known limitations: * - UDP tunnel port notifications are fundamentally best-effort - * it is likely the driver will both see skbs which use a UDP tunnel port, * while not being a tunneled skb, and tunnel skbs from other ports - * drivers should only use these ports for non-critical RX-side offloads, * e.g. the checksum offload; * - none of the devices care about the socket family at present, so we don't * track it. Please extend this code if you care. */ struct udp_tunnel_nic_info { /* one-by-one */ int (*set_port)(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti); int (*unset_port)(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti); /* all at once */ int (*sync_table)(struct net_device *dev, unsigned int table); struct udp_tunnel_nic_shared *shared; unsigned int flags; struct udp_tunnel_nic_table_info { unsigned int n_entries; unsigned int tunnel_types; } tables[UDP_TUNNEL_NIC_MAX_TABLES]; }; /* UDP tunnel module dependencies * * Tunnel drivers are expected to have a hard dependency on the udp_tunnel * module. NIC drivers are not, they just attach their * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come. * Loading a tunnel driver will cause the udp_tunnel module to be loaded * and only then will all the required state structures be allocated. * Since we want a weak dependency from the drivers and the core to udp_tunnel * we call things through the following stubs. */ struct udp_tunnel_nic_ops { void (*get_port)(struct net_device *dev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); void (*set_port_priv)(struct net_device *dev, unsigned int table, unsigned int idx, u8 priv); void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*reset_ntf)(struct net_device *dev); size_t (*dump_size)(struct net_device *dev, unsigned int table); int (*dump_write)(struct net_device *dev, unsigned int table, struct sk_buff *skb); }; #ifdef CONFIG_INET extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops; #else #define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL) #endif static inline void udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { /* This helper is used from .sync_table, we indicate empty entries * by zero'ed @ti. Drivers which need to know the details of a port * when it gets deleted should use the .set_port / .unset_port * callbacks. * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings. */ memset(ti, 0, sizeof(*ti)); if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->get_port(dev, table, idx, ti); } static inline void udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, unsigned int idx, u8 priv) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv); } static inline void udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) { if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->add_port(dev, ti); } static inline void udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) { if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->del_port(dev, ti); } /** * udp_tunnel_nic_reset_ntf() - device-originating reset notification * @dev: network interface device structure * * Called by the driver to inform the core that the entire UDP tunnel port * state has been lost, usually due to device reset. Core will assume device * forgot all the ports and issue .set_port and .sync_table callbacks as * necessary. * * This function must be called with rtnl lock held, and will issue all * the callbacks before returning. */ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->reset_ntf(dev); } static inline size_t udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) { if (!udp_tunnel_nic_ops) return 0; return udp_tunnel_nic_ops->dump_size(dev, table); } static inline int udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, struct sk_buff *skb) { if (!udp_tunnel_nic_ops) return 0; return udp_tunnel_nic_ops->dump_write(dev, table, skb); } #endif |
257 3 247 8 256 255 256 255 100 88 26 5 5 5 5 98 2 35 63 21 28 58 86 98 33 64 1 98 299 35 143 36 223 81 80 83 6 22 23 60 80 3 25 14 3 58 1 10 1 56 41 25 2 14 52 53 21 14 9 62 4 12 16 4 32 16 4 23 26 26 9 15 1 1 25 9 12 6 13 9 5 5 5 94 93 93 4 45 1 44 16 28 13 12 32 61 83 21 62 60 62 4 11 62 61 62 15 22 18 2 3 1 1 34 81 23 8 41 2 8 7 6 67 22 2 71 82 6 18 62 61 62 62 61 6 11 45 3 17 62 62 62 62 62 2 62 16 1 13 13 61 13 13 9 62 7 6 70 69 68 40 33 2 1 3 28 28 69 28 2 27 69 2 166 7 159 166 104 104 207 208 2 101 106 150 13 9 129 80 97 61 62 14 95 1 79 14 209 1 79 129 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 | // SPDX-License-Identifier: GPL-2.0-or-later /* * inet_diag.c Module for monitoring INET transport protocols sockets. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/cache.h> #include <linux/init.h> #include <linux/time.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/inet_connection_sock.h> #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/inet6_hashtables.h> #include <net/bpf_sk_storage.h> #include <net/netlink.h> #include <linux/inet.h> #include <linux/stddef.h> #include <linux/inet_diag.h> #include <linux/sock_diag.h> static const struct inet_diag_handler __rcu **inet_diag_table; struct inet_diag_entry { const __be32 *saddr; const __be32 *daddr; u16 sport; u16 dport; u16 family; u16 userlocks; u32 ifindex; u32 mark; #ifdef CONFIG_SOCK_CGROUP_DATA u64 cgroup_id; #endif }; static const struct inet_diag_handler *inet_diag_lock_handler(int proto) { const struct inet_diag_handler *handler; if (proto < 0 || proto >= IPPROTO_MAX) return NULL; if (!READ_ONCE(inet_diag_table[proto])) sock_load_diag_module(AF_INET, proto); rcu_read_lock(); handler = rcu_dereference(inet_diag_table[proto]); if (handler && !try_module_get(handler->owner)) handler = NULL; rcu_read_unlock(); return handler; } static void inet_diag_unlock_handler(const struct inet_diag_handler *handler) { module_put(handler->owner); } void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) { r->idiag_family = sk->sk_family; r->id.idiag_sport = htons(sk->sk_num); r->id.idiag_dport = sk->sk_dport; r->id.idiag_if = sk->sk_bound_dev_if; sock_diag_save_cookie(sk, r->id.idiag_cookie); #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) { *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr; *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr; } else #endif { memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); r->id.idiag_src[0] = sk->sk_rcv_saddr; r->id.idiag_dst[0] = sk->sk_daddr; } } EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill); static size_t inet_sk_attr_size(struct sock *sk, const struct inet_diag_req_v2 *req, bool net_admin) { const struct inet_diag_handler *handler; size_t aux = 0; rcu_read_lock(); handler = rcu_dereference(inet_diag_table[req->sdiag_protocol]); DEBUG_NET_WARN_ON_ONCE(!handler); if (handler && handler->idiag_get_aux_size) aux = handler->idiag_get_aux_size(sk, net_admin); rcu_read_unlock(); return nla_total_size(sizeof(struct tcp_info)) + nla_total_size(sizeof(struct inet_diag_msg)) + inet_diag_msg_attrs_size() + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + nla_total_size(TCP_CA_NAME_MAX) + nla_total_size(sizeof(struct tcpvegas_info)) + aux + 64; } int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, struct user_namespace *user_ns, bool net_admin) { const struct inet_sock *inet = inet_sk(sk); struct inet_diag_sockopt inet_sockopt; if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) goto errout; /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, * hence this needs to be included regardless of socket family. */ if (ext & (1 << (INET_DIAG_TOS - 1))) if (nla_put_u8(skb, INET_DIAG_TOS, READ_ONCE(inet->tos)) < 0) goto errout; #if IS_ENABLED(CONFIG_IPV6) if (r->idiag_family == AF_INET6) { if (ext & (1 << (INET_DIAG_TCLASS - 1))) if (nla_put_u8(skb, INET_DIAG_TCLASS, inet6_sk(sk)->tclass) < 0) goto errout; if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk))) goto errout; } #endif if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, READ_ONCE(sk->sk_mark))) goto errout; if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || ext & (1 << (INET_DIAG_TCLASS - 1))) { u32 classid = 0; #ifdef CONFIG_SOCK_CGROUP_DATA classid = sock_cgroup_classid(&sk->sk_cgrp_data); #endif /* Fallback to socket priority if class id isn't set. * Classful qdiscs use it as direct reference to class. * For cgroup2 classid is always zero. */ if (!classid) classid = READ_ONCE(sk->sk_priority); if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) goto errout; } #ifdef CONFIG_SOCK_CGROUP_DATA if (nla_put_u64_64bit(skb, INET_DIAG_CGROUP_ID, cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)), INET_DIAG_PAD)) goto errout; #endif r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk); memset(&inet_sockopt, 0, sizeof(inet_sockopt)); inet_sockopt.recverr = inet_test_bit(RECVERR, sk); inet_sockopt.is_icsk = inet_test_bit(IS_ICSK, sk); inet_sockopt.freebind = inet_test_bit(FREEBIND, sk); inet_sockopt.hdrincl = inet_test_bit(HDRINCL, sk); inet_sockopt.mc_loop = inet_test_bit(MC_LOOP, sk); inet_sockopt.transparent = inet_test_bit(TRANSPARENT, sk); inet_sockopt.mc_all = inet_test_bit(MC_ALL, sk); inet_sockopt.nodefrag = inet_test_bit(NODEFRAG, sk); inet_sockopt.bind_address_no_port = inet_test_bit(BIND_ADDRESS_NO_PORT, sk); inet_sockopt.recverr_rfc4884 = inet_test_bit(RECVERR_RFC4884, sk); inet_sockopt.defer_connect = inet_test_bit(DEFER_CONNECT, sk); if (nla_put(skb, INET_DIAG_SOCKOPT, sizeof(inet_sockopt), &inet_sockopt)) goto errout; return 0; errout: return 1; } EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill); static int inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen, struct nlattr **req_nlas) { struct nlattr *nla; int remaining; nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) { int type = nla_type(nla); if (type == INET_DIAG_REQ_PROTOCOL && nla_len(nla) != sizeof(u32)) return -EINVAL; if (type < __INET_DIAG_REQ_MAX) req_nlas[type] = nla; } return 0; } static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req, const struct inet_diag_dump_data *data) { if (data->req_nlas[INET_DIAG_REQ_PROTOCOL]) return nla_get_u32(data->req_nlas[INET_DIAG_REQ_PROTOCOL]); return req->sdiag_protocol; } #define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *req, u16 nlmsg_flags, bool net_admin) { const struct tcp_congestion_ops *ca_ops; const struct inet_diag_handler *handler; struct inet_diag_dump_data *cb_data; int ext = req->idiag_ext; struct inet_diag_msg *r; struct nlmsghdr *nlh; struct nlattr *attr; void *info = NULL; u8 icsk_pending; int protocol; cb_data = cb->data; protocol = inet_diag_get_protocol(req, cb_data); /* inet_diag_lock_handler() made sure inet_diag_table[] is stable. */ handler = rcu_dereference_protected(inet_diag_table[protocol], 1); DEBUG_NET_WARN_ON_ONCE(!handler); if (!handler) return -ENXIO; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); BUG_ON(!sk_fullsock(sk)); inet_diag_msg_common_fill(r, sk); r->idiag_state = sk->sk_state; r->idiag_timer = 0; r->idiag_retrans = 0; r->idiag_expires = 0; if (inet_diag_msg_attrs_fill(sk, skb, r, ext, sk_user_ns(NETLINK_CB(cb->skb).sk), net_admin)) goto errout; if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { struct inet_diag_meminfo minfo = { .idiag_rmem = sk_rmem_alloc_get(sk), .idiag_wmem = READ_ONCE(sk->sk_wmem_queued), .idiag_fmem = READ_ONCE(sk->sk_forward_alloc), .idiag_tmem = sk_wmem_alloc_get(sk), }; if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0) goto errout; } if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) goto errout; /* * RAW sockets might have user-defined protocols assigned, * so report the one supplied on socket creation. */ if (sk->sk_type == SOCK_RAW) { if (nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol)) goto errout; } if (!icsk) { handler->idiag_get_info(sk, r, NULL); goto out; } icsk_pending = smp_load_acquire(&icsk->icsk_pending); if (icsk_pending == ICSK_TIME_RETRANS || icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk_pending == ICSK_TIME_LOSS_PROBE) { r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; r->idiag_expires = jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies); } else if (icsk_pending == ICSK_TIME_PROBE0) { r->idiag_timer = 4; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies); } else if (timer_pending(&sk->sk_timer)) { r->idiag_timer = 2; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies); } if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) { attr = nla_reserve_64bit(skb, INET_DIAG_INFO, handler->idiag_info_size, INET_DIAG_PAD); if (!attr) goto errout; info = nla_data(attr); } if (ext & (1 << (INET_DIAG_CONG - 1))) { int err = 0; rcu_read_lock(); ca_ops = READ_ONCE(icsk->icsk_ca_ops); if (ca_ops) err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name); rcu_read_unlock(); if (err < 0) goto errout; } handler->idiag_get_info(sk, r, info); if (ext & (1 << (INET_DIAG_INFO - 1)) && handler->idiag_get_aux) if (handler->idiag_get_aux(sk, net_admin, skb) < 0) goto errout; if (sk->sk_state < TCP_TIME_WAIT) { union tcp_cc_info info; size_t sz = 0; int attr; rcu_read_lock(); ca_ops = READ_ONCE(icsk->icsk_ca_ops); if (ca_ops && ca_ops->get_info) sz = ca_ops->get_info(sk, ext, &attr, &info); rcu_read_unlock(); if (sz && nla_put(skb, attr, sz, &info) < 0) goto errout; } /* Keep it at the end for potential retry with a larger skb, * or else do best-effort fitting, which is only done for the * first_nlmsg. */ if (cb_data->bpf_stg_diag) { bool first_nlmsg = ((unsigned char *)nlh == skb->data); unsigned int prev_min_dump_alloc; unsigned int total_nla_size = 0; unsigned int msg_len; int err; msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb, INET_DIAG_SK_BPF_STORAGES, &total_nla_size); if (!err) goto out; total_nla_size += msg_len; prev_min_dump_alloc = cb->min_dump_alloc; if (total_nla_size > prev_min_dump_alloc) cb->min_dump_alloc = min_t(u32, total_nla_size, MAX_DUMP_ALLOC_SIZE); if (!first_nlmsg) goto errout; if (cb->min_dump_alloc > prev_min_dump_alloc) /* Retry with pskb_expand_head() with * __GFP_DIRECT_RECLAIM */ goto errout; WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc); /* Send what we have for this sk * and move on to the next sk in the following * dump() */ } out: nlmsg_end(skb, nlh); return 0; errout: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } EXPORT_SYMBOL_GPL(inet_sk_diag_fill); static int inet_twsk_diag_fill(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, u16 nlmsg_flags, bool net_admin) { struct inet_timewait_sock *tw = inet_twsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); BUG_ON(tw->tw_state != TCP_TIME_WAIT); inet_diag_msg_common_fill(r, sk); r->idiag_retrans = 0; r->idiag_state = READ_ONCE(tw->tw_substate); r->idiag_timer = 3; tmo = tw->tw_timer.expires - jiffies; r->idiag_expires = jiffies_delta_to_msecs(tmo); r->idiag_rqueue = 0; r->idiag_wqueue = 0; r->idiag_uid = 0; r->idiag_inode = 0; if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, tw->tw_mark)) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } nlmsg_end(skb, nlh); return 0; } static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, u16 nlmsg_flags, bool net_admin) { struct request_sock *reqsk = inet_reqsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); inet_diag_msg_common_fill(r, sk); r->idiag_state = TCP_SYN_RECV; r->idiag_timer = 1; r->idiag_retrans = reqsk->num_retrans; BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) != offsetof(struct sock, sk_cookie)); tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies; r->idiag_expires = jiffies_delta_to_msecs(tmo); r->idiag_rqueue = 0; r->idiag_wqueue = 0; r->idiag_uid = 0; r->idiag_inode = 0; if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, inet_rsk(reqsk)->ir_mark)) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } nlmsg_end(skb, nlh); return 0; } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, u16 nlmsg_flags, bool net_admin) { if (sk->sk_state == TCP_TIME_WAIT) return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags, net_admin); if (sk->sk_state == TCP_NEW_SYN_RECV) return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin); return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags, net_admin); } struct sock *inet_diag_find_one_icsk(struct net *net, struct inet_hashinfo *hashinfo, const struct inet_diag_req_v2 *req) { struct sock *sk; rcu_read_lock(); if (req->sdiag_family == AF_INET) sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0], req->id.idiag_dport, req->id.idiag_src[0], req->id.idiag_sport, req->id.idiag_if); #if IS_ENABLED(CONFIG_IPV6) else if (req->sdiag_family == AF_INET6) { if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) && ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src)) sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3], req->id.idiag_dport, req->id.idiag_src[3], req->id.idiag_sport, req->id.idiag_if); else sk = inet6_lookup(net, hashinfo, NULL, 0, (struct in6_addr *)req->id.idiag_dst, req->id.idiag_dport, (struct in6_addr *)req->id.idiag_src, req->id.idiag_sport, req->id.idiag_if); } #endif else { rcu_read_unlock(); return ERR_PTR(-EINVAL); } rcu_read_unlock(); if (!sk) return ERR_PTR(-ENOENT); if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) { sock_gen_put(sk); return ERR_PTR(-ENOENT); } return sk; } EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { struct sk_buff *in_skb = cb->skb; bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN); struct net *net = sock_net(in_skb->sk); struct sk_buff *rep; struct sock *sk; int err; sk = inet_diag_find_one_icsk(net, hashinfo, req); if (IS_ERR(sk)) return PTR_ERR(sk); rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL); if (!rep) { err = -ENOMEM; goto out; } err = sk_diag_fill(sk, rep, cb, req, 0, net_admin); if (err < 0) { WARN_ON(err == -EMSGSIZE); nlmsg_free(rep); goto out; } err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); out: if (sk) sock_gen_put(sk); return err; } EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb, const struct nlmsghdr *nlh, int hdrlen, const struct inet_diag_req_v2 *req) { const struct inet_diag_handler *handler; struct inet_diag_dump_data dump_data; int err, protocol; memset(&dump_data, 0, sizeof(dump_data)); err = inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas); if (err) return err; protocol = inet_diag_get_protocol(req, &dump_data); handler = inet_diag_lock_handler(protocol); if (!handler) return -ENOENT; if (cmd == SOCK_DIAG_BY_FAMILY) { struct netlink_callback cb = { .nlh = nlh, .skb = in_skb, .data = &dump_data, }; err = handler->dump_one(&cb, req); } else if (cmd == SOCK_DESTROY && handler->destroy) { err = handler->destroy(in_skb, req); } else { err = -EOPNOTSUPP; } inet_diag_unlock_handler(handler); return err; } static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits) { int words = bits >> 5; bits &= 0x1f; if (words) { if (memcmp(a1, a2, words << 2)) return 0; } if (bits) { __be32 w1, w2; __be32 mask; w1 = a1[words]; w2 = a2[words]; mask = htonl((0xffffffff) << (32 - bits)); if ((w1 ^ w2) & mask) return 0; } return 1; } static int inet_diag_bc_run(const struct nlattr *_bc, const struct inet_diag_entry *entry) { const void *bc = nla_data(_bc); int len = nla_len(_bc); while (len > 0) { int yes = 1; const struct inet_diag_bc_op *op = bc; switch (op->code) { case INET_DIAG_BC_NOP: break; case INET_DIAG_BC_JMP: yes = 0; break; case INET_DIAG_BC_S_EQ: yes = entry->sport == op[1].no; break; case INET_DIAG_BC_S_GE: yes = entry->sport >= op[1].no; break; case INET_DIAG_BC_S_LE: yes = entry->sport <= op[1].no; break; case INET_DIAG_BC_D_EQ: yes = entry->dport == op[1].no; break; case INET_DIAG_BC_D_GE: yes = entry->dport >= op[1].no; break; case INET_DIAG_BC_D_LE: yes = entry->dport <= op[1].no; break; case INET_DIAG_BC_AUTO: yes = !(entry->userlocks & SOCK_BINDPORT_LOCK); break; case INET_DIAG_BC_S_COND: case INET_DIAG_BC_D_COND: { const struct inet_diag_hostcond *cond; const __be32 *addr; cond = (const struct inet_diag_hostcond *)(op + 1); if (cond->port != -1 && cond->port != (op->code == INET_DIAG_BC_S_COND ? entry->sport : entry->dport)) { yes = 0; break; } if (op->code == INET_DIAG_BC_S_COND) addr = entry->saddr; else addr = entry->daddr; if (cond->family != AF_UNSPEC && cond->family != entry->family) { if (entry->family == AF_INET6 && cond->family == AF_INET) { if (addr[0] == 0 && addr[1] == 0 && addr[2] == htonl(0xffff) && bitstring_match(addr + 3, cond->addr, cond->prefix_len)) break; } yes = 0; break; } if (cond->prefix_len == 0) break; if (bitstring_match(addr, cond->addr, cond->prefix_len)) break; yes = 0; break; } case INET_DIAG_BC_DEV_COND: { u32 ifindex; ifindex = *((const u32 *)(op + 1)); if (ifindex != entry->ifindex) yes = 0; break; } case INET_DIAG_BC_MARK_COND: { struct inet_diag_markcond *cond; cond = (struct inet_diag_markcond *)(op + 1); if ((entry->mark & cond->mask) != cond->mark) yes = 0; break; } #ifdef CONFIG_SOCK_CGROUP_DATA case INET_DIAG_BC_CGROUP_COND: { u64 cgroup_id; cgroup_id = get_unaligned((const u64 *)(op + 1)); if (cgroup_id != entry->cgroup_id) yes = 0; break; } #endif } if (yes) { len -= op->yes; bc += op->yes; } else { len -= op->no; bc += op->no; } } return len == 0; } /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV) */ static void entry_fill_addrs(struct inet_diag_entry *entry, const struct sock *sk) { #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) { entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32; entry->daddr = sk->sk_v6_daddr.s6_addr32; } else #endif { entry->saddr = &sk->sk_rcv_saddr; entry->daddr = &sk->sk_daddr; } } int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct inet_diag_entry entry; if (!bc) return 1; entry.family = sk->sk_family; entry_fill_addrs(&entry, sk); entry.sport = inet->inet_num; entry.dport = ntohs(inet->inet_dport); entry.ifindex = sk->sk_bound_dev_if; entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; if (sk_fullsock(sk)) entry.mark = READ_ONCE(sk->sk_mark); else if (sk->sk_state == TCP_NEW_SYN_RECV) entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark; else if (sk->sk_state == TCP_TIME_WAIT) entry.mark = inet_twsk(sk)->tw_mark; else entry.mark = 0; #ifdef CONFIG_SOCK_CGROUP_DATA entry.cgroup_id = sk_fullsock(sk) ? cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)) : 0; #endif return inet_diag_bc_run(bc, &entry); } EXPORT_SYMBOL_GPL(inet_diag_bc_sk); static int valid_cc(const void *bc, int len, int cc) { while (len >= 0) { const struct inet_diag_bc_op *op = bc; if (cc > len) return 0; if (cc == len) return 1; if (op->yes < 4 || op->yes & 3) return 0; len -= op->yes; bc += op->yes; } return 0; } /* data is u32 ifindex */ static bool valid_devcond(const struct inet_diag_bc_op *op, int len, int *min_len) { /* Check ifindex space. */ *min_len += sizeof(u32); if (len < *min_len) return false; return true; } /* Validate an inet_diag_hostcond. */ static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, int *min_len) { struct inet_diag_hostcond *cond; int addr_len; /* Check hostcond space. */ *min_len += sizeof(struct inet_diag_hostcond); if (len < *min_len) return false; cond = (struct inet_diag_hostcond *)(op + 1); /* Check address family and address length. */ switch (cond->family) { case AF_UNSPEC: addr_len = 0; break; case AF_INET: addr_len = sizeof(struct in_addr); break; case AF_INET6: addr_len = sizeof(struct in6_addr); break; default: return false; } *min_len += addr_len; if (len < *min_len) return false; /* Check prefix length (in bits) vs address length (in bytes). */ if (cond->prefix_len > 8 * addr_len) return false; return true; } /* Validate a port comparison operator. */ static bool valid_port_comparison(const struct inet_diag_bc_op *op, int len, int *min_len) { /* Port comparisons put the port in a follow-on inet_diag_bc_op. */ *min_len += sizeof(struct inet_diag_bc_op); if (len < *min_len) return false; return true; } static bool valid_markcond(const struct inet_diag_bc_op *op, int len, int *min_len) { *min_len += sizeof(struct inet_diag_markcond); return len >= *min_len; } #ifdef CONFIG_SOCK_CGROUP_DATA static bool valid_cgroupcond(const struct inet_diag_bc_op *op, int len, int *min_len) { *min_len += sizeof(u64); return len >= *min_len; } #endif static int inet_diag_bc_audit(const struct nlattr *attr, const struct sk_buff *skb) { bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN); const void *bytecode, *bc; int bytecode_len, len; if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op)) return -EINVAL; bytecode = bc = nla_data(attr); len = bytecode_len = nla_len(attr); while (len > 0) { int min_len = sizeof(struct inet_diag_bc_op); const struct inet_diag_bc_op *op = bc; switch (op->code) { case INET_DIAG_BC_S_COND: case INET_DIAG_BC_D_COND: if (!valid_hostcond(bc, len, &min_len)) return -EINVAL; break; case INET_DIAG_BC_DEV_COND: if (!valid_devcond(bc, len, &min_len)) return -EINVAL; break; case INET_DIAG_BC_S_EQ: case INET_DIAG_BC_S_GE: case INET_DIAG_BC_S_LE: case INET_DIAG_BC_D_EQ: case INET_DIAG_BC_D_GE: case INET_DIAG_BC_D_LE: if (!valid_port_comparison(bc, len, &min_len)) return -EINVAL; break; case INET_DIAG_BC_MARK_COND: if (!net_admin) return -EPERM; if (!valid_markcond(bc, len, &min_len)) return -EINVAL; break; #ifdef CONFIG_SOCK_CGROUP_DATA case INET_DIAG_BC_CGROUP_COND: if (!valid_cgroupcond(bc, len, &min_len)) return -EINVAL; break; #endif case INET_DIAG_BC_AUTO: case INET_DIAG_BC_JMP: case INET_DIAG_BC_NOP: break; default: return -EINVAL; } if (op->code != INET_DIAG_BC_NOP) { if (op->no < min_len || op->no > len + 4 || op->no & 3) return -EINVAL; if (op->no < len && !valid_cc(bytecode, bytecode_len, len - op->no)) return -EINVAL; } if (op->yes < min_len || op->yes > len + 4 || op->yes & 3) return -EINVAL; bc += op->yes; len -= op->yes; } return len == 0 ? 0 : -EINVAL; } static void twsk_build_assert(void) { BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) != offsetof(struct sock, sk_family)); BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) != offsetof(struct inet_sock, inet_num)); BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) != offsetof(struct inet_sock, inet_dport)); BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) != offsetof(struct inet_sock, inet_rcv_saddr)); BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) != offsetof(struct inet_sock, inet_daddr)); #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) != offsetof(struct sock, sk_v6_rcv_saddr)); BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) != offsetof(struct sock, sk_v6_daddr)); #endif } void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct inet_diag_dump_data *cb_data = cb->data; struct net *net = sock_net(skb->sk); u32 idiag_states = r->idiag_states; int i, num, s_i, s_num; struct nlattr *bc; struct sock *sk; bc = cb_data->inet_diag_nla_bc; if (idiag_states & TCPF_SYN_RECV) idiag_states |= TCPF_NEW_SYN_RECV; s_i = cb->args[1]; s_num = num = cb->args[2]; if (cb->args[0] == 0) { if (!(idiag_states & TCPF_LISTEN) || r->id.idiag_dport) goto skip_listen_ht; for (i = s_i; i <= hashinfo->lhash2_mask; i++) { struct inet_listen_hashbucket *ilb; struct hlist_nulls_node *node; num = 0; ilb = &hashinfo->lhash2[i]; if (hlist_nulls_empty(&ilb->nulls_head)) { s_num = 0; continue; } spin_lock(&ilb->lock); sk_nulls_for_each(sk, node, &ilb->nulls_head) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) continue; if (num < s_num) { num++; continue; } if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) goto next_listen; if (r->id.idiag_sport != inet->inet_sport && r->id.idiag_sport) goto next_listen; if (!inet_diag_bc_sk(bc, sk)) goto next_listen; if (inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, NLM_F_MULTI, net_admin) < 0) { spin_unlock(&ilb->lock); goto done; } next_listen: ++num; } spin_unlock(&ilb->lock); s_num = 0; } skip_listen_ht: cb->args[0] = 1; s_i = num = s_num = 0; } /* Process a maximum of SKARR_SZ sockets at a time when walking hash buckets * with bh disabled. */ #define SKARR_SZ 16 /* Dump bound but inactive (not listening, connecting, etc.) sockets */ if (cb->args[0] == 1) { if (!(idiag_states & TCPF_BOUND_INACTIVE)) goto skip_bind_ht; for (i = s_i; i < hashinfo->bhash_size; i++) { struct inet_bind_hashbucket *ibb; struct inet_bind2_bucket *tb2; struct sock *sk_arr[SKARR_SZ]; int num_arr[SKARR_SZ]; int idx, accum, res; resume_bind_walk: num = 0; accum = 0; ibb = &hashinfo->bhash2[i]; if (hlist_empty(&ibb->chain)) { s_num = 0; continue; } spin_lock_bh(&ibb->lock); inet_bind_bucket_for_each(tb2, &ibb->chain) { if (!net_eq(ib2_net(tb2), net)) continue; sk_for_each_bound(sk, &tb2->owners) { struct inet_sock *inet = inet_sk(sk); if (num < s_num) goto next_bind; if (sk->sk_state != TCP_CLOSE || !inet->inet_num) goto next_bind; if (r->sdiag_family != AF_UNSPEC && r->sdiag_family != sk->sk_family) goto next_bind; if (!inet_diag_bc_sk(bc, sk)) goto next_bind; sock_hold(sk); num_arr[accum] = num; sk_arr[accum] = sk; if (++accum == SKARR_SZ) goto pause_bind_walk; next_bind: num++; } } pause_bind_walk: spin_unlock_bh(&ibb->lock); res = 0; for (idx = 0; idx < accum; idx++) { if (res >= 0) { res = inet_sk_diag_fill(sk_arr[idx], NULL, skb, cb, r, NLM_F_MULTI, net_admin); if (res < 0) num = num_arr[idx]; } sock_put(sk_arr[idx]); } if (res < 0) goto done; cond_resched(); if (accum == SKARR_SZ) { s_num = num + 1; goto resume_bind_walk; } s_num = 0; } skip_bind_ht: cb->args[0] = 2; s_i = num = s_num = 0; } if (!(idiag_states & ~TCPF_LISTEN)) goto out; for (i = s_i; i <= hashinfo->ehash_mask; i++) { struct inet_ehash_bucket *head = &hashinfo->ehash[i]; spinlock_t *lock = inet_ehash_lockp(hashinfo, i); struct hlist_nulls_node *node; struct sock *sk_arr[SKARR_SZ]; int num_arr[SKARR_SZ]; int idx, accum, res; if (hlist_nulls_empty(&head->chain)) continue; if (i > s_i) s_num = 0; next_chunk: num = 0; accum = 0; spin_lock_bh(lock); sk_nulls_for_each(sk, node, &head->chain) { int state; if (!net_eq(sock_net(sk), net)) continue; if (num < s_num) goto next_normal; state = (sk->sk_state == TCP_TIME_WAIT) ? READ_ONCE(inet_twsk(sk)->tw_substate) : sk->sk_state; if (!(idiag_states & (1 << state))) goto next_normal; if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) goto next_normal; if (r->id.idiag_sport != htons(sk->sk_num) && r->id.idiag_sport) goto next_normal; if (r->id.idiag_dport != sk->sk_dport && r->id.idiag_dport) goto next_normal; twsk_build_assert(); if (!inet_diag_bc_sk(bc, sk)) goto next_normal; if (!refcount_inc_not_zero(&sk->sk_refcnt)) goto next_normal; num_arr[accum] = num; sk_arr[accum] = sk; if (++accum == SKARR_SZ) break; next_normal: ++num; } spin_unlock_bh(lock); res = 0; for (idx = 0; idx < accum; idx++) { if (res >= 0) { res = sk_diag_fill(sk_arr[idx], skb, cb, r, NLM_F_MULTI, net_admin); if (res < 0) num = num_arr[idx]; } sock_gen_put(sk_arr[idx]); } if (res < 0) break; cond_resched(); if (accum == SKARR_SZ) { s_num = num + 1; goto next_chunk; } } done: cb->args[1] = i; cb->args[2] = num; out: ; } EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { struct inet_diag_dump_data *cb_data = cb->data; const struct inet_diag_handler *handler; u32 prev_min_dump_alloc; int protocol, err = 0; protocol = inet_diag_get_protocol(r, cb_data); again: prev_min_dump_alloc = cb->min_dump_alloc; handler = inet_diag_lock_handler(protocol); if (handler) { handler->dump(skb, cb, r); inet_diag_unlock_handler(handler); } else { err = -ENOENT; } /* The skb is not large enough to fit one sk info and * inet_sk_diag_fill() has requested for a larger skb. */ if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) { err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL); if (!err) goto again; } return err ? : skb->len; } static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh)); } static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen) { const struct nlmsghdr *nlh = cb->nlh; struct inet_diag_dump_data *cb_data; struct sk_buff *skb = cb->skb; struct nlattr *nla; int err; cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL); if (!cb_data) return -ENOMEM; err = inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas); if (err) { kfree(cb_data); return err; } nla = cb_data->inet_diag_nla_bc; if (nla) { err = inet_diag_bc_audit(nla, skb); if (err) { kfree(cb_data); return err; } } nla = cb_data->inet_diag_nla_bpf_stgs; if (nla) { struct bpf_sk_storage_diag *bpf_stg_diag; bpf_stg_diag = bpf_sk_storage_diag_alloc(nla); if (IS_ERR(bpf_stg_diag)) { kfree(cb_data); return PTR_ERR(bpf_stg_diag); } cb_data->bpf_stg_diag = bpf_stg_diag; } cb->data = cb_data; return 0; } static int inet_diag_dump_start(struct netlink_callback *cb) { return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2)); } static int inet_diag_dump_start_compat(struct netlink_callback *cb) { return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req)); } static int inet_diag_dump_done(struct netlink_callback *cb) { struct inet_diag_dump_data *cb_data = cb->data; bpf_sk_storage_diag_free(cb_data->bpf_stg_diag); kfree(cb->data); return 0; } static int inet_diag_type2proto(int type) { switch (type) { case TCPDIAG_GETSOCK: return IPPROTO_TCP; case DCCPDIAG_GETSOCK: return IPPROTO_DCCP; default: return 0; } } static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb) { struct inet_diag_req *rc = nlmsg_data(cb->nlh); struct inet_diag_req_v2 req; req.sdiag_family = AF_UNSPEC; /* compatibility */ req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); req.idiag_ext = rc->idiag_ext; req.pad = 0; req.idiag_states = rc->idiag_states; req.id = rc->id; return __inet_diag_dump(skb, cb, &req); } static int inet_diag_get_exact_compat(struct sk_buff *in_skb, const struct nlmsghdr *nlh) { struct inet_diag_req *rc = nlmsg_data(nlh); struct inet_diag_req_v2 req; req.sdiag_family = rc->idiag_family; req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type); req.idiag_ext = rc->idiag_ext; req.pad = 0; req.idiag_states = rc->idiag_states; req.id = rc->id; return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, sizeof(struct inet_diag_req), &req); } static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) { int hdrlen = sizeof(struct inet_diag_req); struct net *net = sock_net(skb->sk); if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX || nlmsg_len(nlh) < hdrlen) return -EINVAL; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .start = inet_diag_dump_start_compat, .done = inet_diag_dump_done, .dump = inet_diag_dump_compat, }; return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); } return inet_diag_get_exact_compat(skb, nlh); } static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h) { int hdrlen = sizeof(struct inet_diag_req_v2); struct net *net = sock_net(skb->sk); if (nlmsg_len(h) < hdrlen) return -EINVAL; if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && h->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .start = inet_diag_dump_start, .done = inet_diag_dump_done, .dump = inet_diag_dump, }; return netlink_dump_start(net->diag_nlsk, skb, h, &c); } return inet_diag_cmd_exact(h->nlmsg_type, skb, h, hdrlen, nlmsg_data(h)); } static int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk) { const struct inet_diag_handler *handler; struct nlmsghdr *nlh; struct nlattr *attr; struct inet_diag_msg *r; void *info = NULL; int err = 0; nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0); if (!nlh) return -ENOMEM; r = nlmsg_data(nlh); memset(r, 0, sizeof(*r)); inet_diag_msg_common_fill(r, sk); if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM) r->id.idiag_sport = inet_sk(sk)->inet_sport; r->idiag_state = sk->sk_state; if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) { nlmsg_cancel(skb, nlh); return err; } handler = inet_diag_lock_handler(sk->sk_protocol); if (!handler) { nlmsg_cancel(skb, nlh); return -ENOENT; } attr = handler->idiag_info_size ? nla_reserve_64bit(skb, INET_DIAG_INFO, handler->idiag_info_size, INET_DIAG_PAD) : NULL; if (attr) info = nla_data(attr); handler->idiag_get_info(sk, r, info); inet_diag_unlock_handler(handler); nlmsg_end(skb, nlh); return 0; } static const struct sock_diag_handler inet_diag_handler = { .owner = THIS_MODULE, .family = AF_INET, .dump = inet_diag_handler_cmd, .get_info = inet_diag_handler_get_info, .destroy = inet_diag_handler_cmd, }; static const struct sock_diag_handler inet6_diag_handler = { .owner = THIS_MODULE, .family = AF_INET6, .dump = inet_diag_handler_cmd, .get_info = inet_diag_handler_get_info, .destroy = inet_diag_handler_cmd, }; int inet_diag_register(const struct inet_diag_handler *h) { const __u16 type = h->idiag_type; if (type >= IPPROTO_MAX) return -EINVAL; return !cmpxchg((const struct inet_diag_handler **)&inet_diag_table[type], NULL, h) ? 0 : -EEXIST; } EXPORT_SYMBOL_GPL(inet_diag_register); void inet_diag_unregister(const struct inet_diag_handler *h) { const __u16 type = h->idiag_type; if (type >= IPPROTO_MAX) return; xchg((const struct inet_diag_handler **)&inet_diag_table[type], NULL); } EXPORT_SYMBOL_GPL(inet_diag_unregister); static const struct sock_diag_inet_compat inet_diag_compat = { .owner = THIS_MODULE, .fn = inet_diag_rcv_msg_compat, }; static int __init inet_diag_init(void) { const int inet_diag_table_size = (IPPROTO_MAX * sizeof(struct inet_diag_handler *)); int err = -ENOMEM; inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); if (!inet_diag_table) goto out; err = sock_diag_register(&inet_diag_handler); if (err) goto out_free_nl; err = sock_diag_register(&inet6_diag_handler); if (err) goto out_free_inet; sock_diag_register_inet_compat(&inet_diag_compat); out: return err; out_free_inet: sock_diag_unregister(&inet_diag_handler); out_free_nl: kfree(inet_diag_table); goto out; } static void __exit inet_diag_exit(void) { sock_diag_unregister(&inet6_diag_handler); sock_diag_unregister(&inet_diag_handler); sock_diag_unregister_inet_compat(&inet_diag_compat); kfree(inet_diag_table); } module_init(inet_diag_init); module_exit(inet_diag_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("INET/INET6: socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */); |
2 2 11 4 2 2 2 4 4 2 2 8 4 4 4 1 1 1 1 1 2 2 2 2 1 1 465 466 465 61 61 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 | // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/bits.h> #include <linux/bitfield.h> #include <linux/idr.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/skbuff.h> #include <linux/xarray.h> #include <net/devlink.h> #include <net/net_shaper.h> #include "shaper_nl_gen.h" #include "../core/dev.h" #define NET_SHAPER_SCOPE_SHIFT 26 #define NET_SHAPER_ID_MASK GENMASK(NET_SHAPER_SCOPE_SHIFT - 1, 0) #define NET_SHAPER_SCOPE_MASK GENMASK(31, NET_SHAPER_SCOPE_SHIFT) #define NET_SHAPER_ID_UNSPEC NET_SHAPER_ID_MASK struct net_shaper_hierarchy { struct xarray shapers; }; struct net_shaper_nl_ctx { struct net_shaper_binding binding; netdevice_tracker dev_tracker; unsigned long start_index; }; static struct net_shaper_binding *net_shaper_binding_from_ctx(void *ctx) { return &((struct net_shaper_nl_ctx *)ctx)->binding; } static void net_shaper_lock(struct net_shaper_binding *binding) { switch (binding->type) { case NET_SHAPER_BINDING_TYPE_NETDEV: netdev_lock(binding->netdev); break; } } static void net_shaper_unlock(struct net_shaper_binding *binding) { switch (binding->type) { case NET_SHAPER_BINDING_TYPE_NETDEV: netdev_unlock(binding->netdev); break; } } static struct net_shaper_hierarchy * net_shaper_hierarchy(struct net_shaper_binding *binding) { /* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */ if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV) return READ_ONCE(binding->netdev->net_shaper_hierarchy); /* No other type supported yet. */ return NULL; } static const struct net_shaper_ops * net_shaper_ops(struct net_shaper_binding *binding) { if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV) return binding->netdev->netdev_ops->net_shaper_ops; /* No other type supported yet. */ return NULL; } /* Count the number of [multi] attributes of the given type. */ static int net_shaper_list_len(struct genl_info *info, int type) { struct nlattr *attr; int rem, cnt = 0; nla_for_each_attr_type(attr, type, genlmsg_data(info->genlhdr), genlmsg_len(info->genlhdr), rem) cnt++; return cnt; } static int net_shaper_handle_size(void) { return nla_total_size(nla_total_size(sizeof(u32)) + nla_total_size(sizeof(u32))); } static int net_shaper_fill_binding(struct sk_buff *msg, const struct net_shaper_binding *binding, u32 type) { /* Should never happen, as currently only NETDEV is supported. */ if (WARN_ON_ONCE(binding->type != NET_SHAPER_BINDING_TYPE_NETDEV)) return -EINVAL; if (nla_put_u32(msg, type, binding->netdev->ifindex)) return -EMSGSIZE; return 0; } static int net_shaper_fill_handle(struct sk_buff *msg, const struct net_shaper_handle *handle, u32 type) { struct nlattr *handle_attr; if (handle->scope == NET_SHAPER_SCOPE_UNSPEC) return 0; handle_attr = nla_nest_start(msg, type); if (!handle_attr) return -EMSGSIZE; if (nla_put_u32(msg, NET_SHAPER_A_HANDLE_SCOPE, handle->scope) || (handle->scope >= NET_SHAPER_SCOPE_QUEUE && nla_put_u32(msg, NET_SHAPER_A_HANDLE_ID, handle->id))) goto handle_nest_cancel; nla_nest_end(msg, handle_attr); return 0; handle_nest_cancel: nla_nest_cancel(msg, handle_attr); return -EMSGSIZE; } static int net_shaper_fill_one(struct sk_buff *msg, const struct net_shaper_binding *binding, const struct net_shaper *shaper, const struct genl_info *info) { void *hdr; hdr = genlmsg_iput(msg, info); if (!hdr) return -EMSGSIZE; if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) || net_shaper_fill_handle(msg, &shaper->parent, NET_SHAPER_A_PARENT) || net_shaper_fill_handle(msg, &shaper->handle, NET_SHAPER_A_HANDLE) || ((shaper->bw_min || shaper->bw_max || shaper->burst) && nla_put_u32(msg, NET_SHAPER_A_METRIC, shaper->metric)) || (shaper->bw_min && nla_put_uint(msg, NET_SHAPER_A_BW_MIN, shaper->bw_min)) || (shaper->bw_max && nla_put_uint(msg, NET_SHAPER_A_BW_MAX, shaper->bw_max)) || (shaper->burst && nla_put_uint(msg, NET_SHAPER_A_BURST, shaper->burst)) || (shaper->priority && nla_put_u32(msg, NET_SHAPER_A_PRIORITY, shaper->priority)) || (shaper->weight && nla_put_u32(msg, NET_SHAPER_A_WEIGHT, shaper->weight))) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } /* Initialize the context fetching the relevant device and * acquiring a reference to it. */ static int net_shaper_ctx_setup(const struct genl_info *info, int type, struct net_shaper_nl_ctx *ctx) { struct net *ns = genl_info_net(info); struct net_device *dev; int ifindex; if (GENL_REQ_ATTR_CHECK(info, type)) return -EINVAL; ifindex = nla_get_u32(info->attrs[type]); dev = netdev_get_by_index(ns, ifindex, &ctx->dev_tracker, GFP_KERNEL); if (!dev) { NL_SET_BAD_ATTR(info->extack, info->attrs[type]); return -ENOENT; } if (!dev->netdev_ops->net_shaper_ops) { NL_SET_BAD_ATTR(info->extack, info->attrs[type]); netdev_put(dev, &ctx->dev_tracker); return -EOPNOTSUPP; } ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV; ctx->binding.netdev = dev; return 0; } static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx) { if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV) netdev_put(ctx->binding.netdev, &ctx->dev_tracker); } static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle) { return FIELD_PREP(NET_SHAPER_SCOPE_MASK, handle->scope) | FIELD_PREP(NET_SHAPER_ID_MASK, handle->id); } static void net_shaper_index_to_handle(u32 index, struct net_shaper_handle *handle) { handle->scope = FIELD_GET(NET_SHAPER_SCOPE_MASK, index); handle->id = FIELD_GET(NET_SHAPER_ID_MASK, index); } static void net_shaper_default_parent(const struct net_shaper_handle *handle, struct net_shaper_handle *parent) { switch (handle->scope) { case NET_SHAPER_SCOPE_UNSPEC: case NET_SHAPER_SCOPE_NETDEV: case __NET_SHAPER_SCOPE_MAX: parent->scope = NET_SHAPER_SCOPE_UNSPEC; break; case NET_SHAPER_SCOPE_QUEUE: case NET_SHAPER_SCOPE_NODE: parent->scope = NET_SHAPER_SCOPE_NETDEV; break; } parent->id = 0; } /* * MARK_0 is already in use due to XA_FLAGS_ALLOC, can't reuse such flag as * it's cleared by xa_store(). */ #define NET_SHAPER_NOT_VALID XA_MARK_1 static struct net_shaper * net_shaper_lookup(struct net_shaper_binding *binding, const struct net_shaper_handle *handle) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); u32 index = net_shaper_handle_to_index(handle); if (!hierarchy || xa_get_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID)) return NULL; return xa_load(&hierarchy->shapers, index); } /* Allocate on demand the per device shaper's hierarchy container. * Called under the net shaper lock */ static struct net_shaper_hierarchy * net_shaper_hierarchy_setup(struct net_shaper_binding *binding) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); if (hierarchy) return hierarchy; hierarchy = kmalloc(sizeof(*hierarchy), GFP_KERNEL); if (!hierarchy) return NULL; /* The flag is required for ID allocation */ xa_init_flags(&hierarchy->shapers, XA_FLAGS_ALLOC); switch (binding->type) { case NET_SHAPER_BINDING_TYPE_NETDEV: /* Pairs with READ_ONCE in net_shaper_hierarchy. */ WRITE_ONCE(binding->netdev->net_shaper_hierarchy, hierarchy); break; } return hierarchy; } /* Prepare the hierarchy container to actually insert the given shaper, doing * in advance the needed allocations. */ static int net_shaper_pre_insert(struct net_shaper_binding *binding, struct net_shaper_handle *handle, struct netlink_ext_ack *extack) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *prev, *cur; bool id_allocated = false; int ret, index; if (!hierarchy) return -ENOMEM; index = net_shaper_handle_to_index(handle); cur = xa_load(&hierarchy->shapers, index); if (cur) return 0; /* Allocated a new id, if needed. */ if (handle->scope == NET_SHAPER_SCOPE_NODE && handle->id == NET_SHAPER_ID_UNSPEC) { u32 min, max; handle->id = NET_SHAPER_ID_MASK - 1; max = net_shaper_handle_to_index(handle); handle->id = 0; min = net_shaper_handle_to_index(handle); ret = xa_alloc(&hierarchy->shapers, &index, NULL, XA_LIMIT(min, max), GFP_KERNEL); if (ret < 0) { NL_SET_ERR_MSG(extack, "Can't allocate new id for NODE shaper"); return ret; } net_shaper_index_to_handle(index, handle); id_allocated = true; } cur = kzalloc(sizeof(*cur), GFP_KERNEL); if (!cur) { ret = -ENOMEM; goto free_id; } /* Mark 'tentative' shaper inside the hierarchy container. * xa_set_mark is a no-op if the previous store fails. */ xa_lock(&hierarchy->shapers); prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL); __xa_set_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID); xa_unlock(&hierarchy->shapers); if (xa_err(prev)) { NL_SET_ERR_MSG(extack, "Can't insert shaper into device store"); kfree_rcu(cur, rcu); ret = xa_err(prev); goto free_id; } return 0; free_id: if (id_allocated) xa_erase(&hierarchy->shapers, index); return ret; } /* Commit the tentative insert with the actual values. * Must be called only after a successful net_shaper_pre_insert(). */ static void net_shaper_commit(struct net_shaper_binding *binding, int nr_shapers, const struct net_shaper *shapers) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *cur; int index; int i; xa_lock(&hierarchy->shapers); for (i = 0; i < nr_shapers; ++i) { index = net_shaper_handle_to_index(&shapers[i].handle); cur = xa_load(&hierarchy->shapers, index); if (WARN_ON_ONCE(!cur)) continue; /* Successful update: drop the tentative mark * and update the hierarchy container. */ __xa_clear_mark(&hierarchy->shapers, index, NET_SHAPER_NOT_VALID); *cur = shapers[i]; } xa_unlock(&hierarchy->shapers); } /* Rollback all the tentative inserts from the hierarchy. */ static void net_shaper_rollback(struct net_shaper_binding *binding) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *cur; unsigned long index; if (!hierarchy) return; xa_lock(&hierarchy->shapers); xa_for_each_marked(&hierarchy->shapers, index, cur, NET_SHAPER_NOT_VALID) { __xa_erase(&hierarchy->shapers, index); kfree(cur); } xa_unlock(&hierarchy->shapers); } static int net_shaper_parse_handle(const struct nlattr *attr, const struct genl_info *info, struct net_shaper_handle *handle) { struct nlattr *tb[NET_SHAPER_A_HANDLE_MAX + 1]; struct nlattr *id_attr; u32 id = 0; int ret; ret = nla_parse_nested(tb, NET_SHAPER_A_HANDLE_MAX, attr, net_shaper_handle_nl_policy, info->extack); if (ret < 0) return ret; if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NET_SHAPER_A_HANDLE_SCOPE)) return -EINVAL; handle->scope = nla_get_u32(tb[NET_SHAPER_A_HANDLE_SCOPE]); /* The default id for NODE scope shapers is an invalid one * to help the 'group' operation discriminate between new * NODE shaper creation (ID_UNSPEC) and reuse of existing * shaper (any other value). */ id_attr = tb[NET_SHAPER_A_HANDLE_ID]; if (id_attr) id = nla_get_u32(id_attr); else if (handle->scope == NET_SHAPER_SCOPE_NODE) id = NET_SHAPER_ID_UNSPEC; handle->id = id; return 0; } static int net_shaper_validate_caps(struct net_shaper_binding *binding, struct nlattr **tb, const struct genl_info *info, struct net_shaper *shaper) { const struct net_shaper_ops *ops = net_shaper_ops(binding); struct nlattr *bad = NULL; unsigned long caps = 0; ops->capabilities(binding, shaper->handle.scope, &caps); if (tb[NET_SHAPER_A_PRIORITY] && !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_PRIORITY))) bad = tb[NET_SHAPER_A_PRIORITY]; if (tb[NET_SHAPER_A_WEIGHT] && !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_WEIGHT))) bad = tb[NET_SHAPER_A_WEIGHT]; if (tb[NET_SHAPER_A_BW_MIN] && !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN))) bad = tb[NET_SHAPER_A_BW_MIN]; if (tb[NET_SHAPER_A_BW_MAX] && !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX))) bad = tb[NET_SHAPER_A_BW_MAX]; if (tb[NET_SHAPER_A_BURST] && !(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_BURST))) bad = tb[NET_SHAPER_A_BURST]; if (!caps) bad = tb[NET_SHAPER_A_HANDLE]; if (bad) { NL_SET_BAD_ATTR(info->extack, bad); return -EOPNOTSUPP; } if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE && binding->type == NET_SHAPER_BINDING_TYPE_NETDEV && shaper->handle.id >= binding->netdev->real_num_tx_queues) { NL_SET_ERR_MSG_FMT(info->extack, "Not existing queue id %d max %d", shaper->handle.id, binding->netdev->real_num_tx_queues); return -ENOENT; } /* The metric is really used only if there is *any* rate-related * setting, either in current attributes set or in pre-existing * values. */ if (shaper->burst || shaper->bw_min || shaper->bw_max) { u32 metric_cap = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS + shaper->metric; /* The metric test can fail even when the user did not * specify the METRIC attribute. Pointing to rate related * attribute will be confusing, as the attribute itself * could be indeed supported, with a different metric. * Be more specific. */ if (!(caps & BIT(metric_cap))) { NL_SET_ERR_MSG_FMT(info->extack, "Bad metric %d", shaper->metric); return -EOPNOTSUPP; } } return 0; } static int net_shaper_parse_info(struct net_shaper_binding *binding, struct nlattr **tb, const struct genl_info *info, struct net_shaper *shaper, bool *exists) { struct net_shaper *old; int ret; /* The shaper handle is the only mandatory attribute. */ if (NL_REQ_ATTR_CHECK(info->extack, NULL, tb, NET_SHAPER_A_HANDLE)) return -EINVAL; ret = net_shaper_parse_handle(tb[NET_SHAPER_A_HANDLE], info, &shaper->handle); if (ret) return ret; if (shaper->handle.scope == NET_SHAPER_SCOPE_UNSPEC) { NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]); return -EINVAL; } /* Fetch existing hierarchy, if any, so that user provide info will * incrementally update the existing shaper configuration. */ old = net_shaper_lookup(binding, &shaper->handle); if (old) *shaper = *old; *exists = !!old; if (tb[NET_SHAPER_A_METRIC]) shaper->metric = nla_get_u32(tb[NET_SHAPER_A_METRIC]); if (tb[NET_SHAPER_A_BW_MIN]) shaper->bw_min = nla_get_uint(tb[NET_SHAPER_A_BW_MIN]); if (tb[NET_SHAPER_A_BW_MAX]) shaper->bw_max = nla_get_uint(tb[NET_SHAPER_A_BW_MAX]); if (tb[NET_SHAPER_A_BURST]) shaper->burst = nla_get_uint(tb[NET_SHAPER_A_BURST]); if (tb[NET_SHAPER_A_PRIORITY]) shaper->priority = nla_get_u32(tb[NET_SHAPER_A_PRIORITY]); if (tb[NET_SHAPER_A_WEIGHT]) shaper->weight = nla_get_u32(tb[NET_SHAPER_A_WEIGHT]); ret = net_shaper_validate_caps(binding, tb, info, shaper); if (ret < 0) return ret; return 0; } static int net_shaper_validate_nesting(struct net_shaper_binding *binding, const struct net_shaper *shaper, struct netlink_ext_ack *extack) { const struct net_shaper_ops *ops = net_shaper_ops(binding); unsigned long caps = 0; ops->capabilities(binding, shaper->handle.scope, &caps); if (!(caps & BIT(NET_SHAPER_A_CAPS_SUPPORT_NESTING))) { NL_SET_ERR_MSG_FMT(extack, "Nesting not supported for scope %d", shaper->handle.scope); return -EOPNOTSUPP; } return 0; } /* Fetch the existing leaf and update it with the user-provided * attributes. */ static int net_shaper_parse_leaf(struct net_shaper_binding *binding, const struct nlattr *attr, const struct genl_info *info, const struct net_shaper *node, struct net_shaper *shaper) { struct nlattr *tb[NET_SHAPER_A_WEIGHT + 1]; bool exists; int ret; ret = nla_parse_nested(tb, NET_SHAPER_A_WEIGHT, attr, net_shaper_leaf_info_nl_policy, info->extack); if (ret < 0) return ret; ret = net_shaper_parse_info(binding, tb, info, shaper, &exists); if (ret < 0) return ret; if (shaper->handle.scope != NET_SHAPER_SCOPE_QUEUE) { NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]); return -EINVAL; } if (node->handle.scope == NET_SHAPER_SCOPE_NODE) { ret = net_shaper_validate_nesting(binding, shaper, info->extack); if (ret < 0) return ret; } if (!exists) net_shaper_default_parent(&shaper->handle, &shaper->parent); return 0; } /* Alike net_parse_shaper_info(), but additionally allow the user specifying * the shaper's parent handle. */ static int net_shaper_parse_node(struct net_shaper_binding *binding, struct nlattr **tb, const struct genl_info *info, struct net_shaper *shaper) { bool exists; int ret; ret = net_shaper_parse_info(binding, tb, info, shaper, &exists); if (ret) return ret; if (shaper->handle.scope != NET_SHAPER_SCOPE_NODE && shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) { NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_HANDLE]); return -EINVAL; } if (tb[NET_SHAPER_A_PARENT]) { ret = net_shaper_parse_handle(tb[NET_SHAPER_A_PARENT], info, &shaper->parent); if (ret) return ret; if (shaper->parent.scope != NET_SHAPER_SCOPE_NODE && shaper->parent.scope != NET_SHAPER_SCOPE_NETDEV) { NL_SET_BAD_ATTR(info->extack, tb[NET_SHAPER_A_PARENT]); return -EINVAL; } } return 0; } static int net_shaper_generic_pre(struct genl_info *info, int type) { struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx; BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx)); return net_shaper_ctx_setup(info, type, ctx); } int net_shaper_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { return net_shaper_generic_pre(info, NET_SHAPER_A_IFINDEX); } static void net_shaper_generic_post(struct genl_info *info) { net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)info->ctx); } void net_shaper_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { net_shaper_generic_post(info); } int net_shaper_nl_pre_dumpit(struct netlink_callback *cb) { struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; const struct genl_info *info = genl_info_dump(cb); return net_shaper_ctx_setup(info, NET_SHAPER_A_IFINDEX, ctx); } int net_shaper_nl_post_dumpit(struct netlink_callback *cb) { net_shaper_ctx_cleanup((struct net_shaper_nl_ctx *)cb->ctx); return 0; } int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { return net_shaper_generic_pre(info, NET_SHAPER_A_CAPS_IFINDEX); } void net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { net_shaper_generic_post(info); } int net_shaper_nl_cap_pre_dumpit(struct netlink_callback *cb) { struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; return net_shaper_ctx_setup(genl_info_dump(cb), NET_SHAPER_A_CAPS_IFINDEX, ctx); } int net_shaper_nl_cap_post_dumpit(struct netlink_callback *cb) { struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; net_shaper_ctx_cleanup(ctx); return 0; } int net_shaper_nl_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net_shaper_binding *binding; struct net_shaper_handle handle; struct net_shaper *shaper; struct sk_buff *msg; int ret; if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE)) return -EINVAL; binding = net_shaper_binding_from_ctx(info->ctx); ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info, &handle); if (ret < 0) return ret; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; rcu_read_lock(); shaper = net_shaper_lookup(binding, &handle); if (!shaper) { NL_SET_BAD_ATTR(info->extack, info->attrs[NET_SHAPER_A_HANDLE]); rcu_read_unlock(); ret = -ENOENT; goto free_msg; } ret = net_shaper_fill_one(msg, binding, shaper, info); rcu_read_unlock(); if (ret) goto free_msg; ret = genlmsg_reply(msg, info); if (ret) goto free_msg; return 0; free_msg: nlmsg_free(msg); return ret; } int net_shaper_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; const struct genl_info *info = genl_info_dump(cb); struct net_shaper_hierarchy *hierarchy; struct net_shaper_binding *binding; struct net_shaper *shaper; int ret = 0; /* Don't error out dumps performed before any set operation. */ binding = net_shaper_binding_from_ctx(ctx); hierarchy = net_shaper_hierarchy(binding); if (!hierarchy) return 0; rcu_read_lock(); for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index, U32_MAX, XA_PRESENT)); ctx->start_index++) { ret = net_shaper_fill_one(skb, binding, shaper, info); if (ret) break; } rcu_read_unlock(); return ret; } int net_shaper_nl_set_doit(struct sk_buff *skb, struct genl_info *info) { struct net_shaper_hierarchy *hierarchy; struct net_shaper_binding *binding; const struct net_shaper_ops *ops; struct net_shaper_handle handle; struct net_shaper shaper = {}; bool exists; int ret; binding = net_shaper_binding_from_ctx(info->ctx); net_shaper_lock(binding); ret = net_shaper_parse_info(binding, info->attrs, info, &shaper, &exists); if (ret) goto unlock; if (!exists) net_shaper_default_parent(&shaper.handle, &shaper.parent); hierarchy = net_shaper_hierarchy_setup(binding); if (!hierarchy) { ret = -ENOMEM; goto unlock; } /* The 'set' operation can't create node-scope shapers. */ handle = shaper.handle; if (handle.scope == NET_SHAPER_SCOPE_NODE && !net_shaper_lookup(binding, &handle)) { ret = -ENOENT; goto unlock; } ret = net_shaper_pre_insert(binding, &handle, info->extack); if (ret) goto unlock; ops = net_shaper_ops(binding); ret = ops->set(binding, &shaper, info->extack); if (ret) { net_shaper_rollback(binding); goto unlock; } net_shaper_commit(binding, 1, &shaper); unlock: net_shaper_unlock(binding); return ret; } static int __net_shaper_delete(struct net_shaper_binding *binding, struct net_shaper *shaper, struct netlink_ext_ack *extack) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper_handle parent_handle, handle = shaper->handle; const struct net_shaper_ops *ops = net_shaper_ops(binding); int ret; again: parent_handle = shaper->parent; ret = ops->delete(binding, &handle, extack); if (ret < 0) return ret; xa_erase(&hierarchy->shapers, net_shaper_handle_to_index(&handle)); kfree_rcu(shaper, rcu); /* Eventually delete the parent, if it is left over with no leaves. */ if (parent_handle.scope == NET_SHAPER_SCOPE_NODE) { shaper = net_shaper_lookup(binding, &parent_handle); if (shaper && !--shaper->leaves) { handle = parent_handle; goto again; } } return 0; } static int net_shaper_handle_cmp(const struct net_shaper_handle *a, const struct net_shaper_handle *b) { /* Must avoid holes in struct net_shaper_handle. */ BUILD_BUG_ON(sizeof(*a) != 8); return memcmp(a, b, sizeof(*a)); } static int net_shaper_parent_from_leaves(int leaves_count, const struct net_shaper *leaves, struct net_shaper *node, struct netlink_ext_ack *extack) { struct net_shaper_handle parent = leaves[0].parent; int i; for (i = 1; i < leaves_count; ++i) { if (net_shaper_handle_cmp(&leaves[i].parent, &parent)) { NL_SET_ERR_MSG_FMT(extack, "All the leaves shapers must have the same old parent"); return -EINVAL; } } node->parent = parent; return 0; } static int __net_shaper_group(struct net_shaper_binding *binding, bool update_node, int leaves_count, struct net_shaper *leaves, struct net_shaper *node, struct netlink_ext_ack *extack) { const struct net_shaper_ops *ops = net_shaper_ops(binding); struct net_shaper_handle leaf_handle; struct net_shaper *parent = NULL; bool new_node = false; int i, ret; if (node->handle.scope == NET_SHAPER_SCOPE_NODE) { new_node = node->handle.id == NET_SHAPER_ID_UNSPEC; if (!new_node && !net_shaper_lookup(binding, &node->handle)) { /* The related attribute is not available when * reaching here from the delete() op. */ NL_SET_ERR_MSG_FMT(extack, "Node shaper %d:%d does not exists", node->handle.scope, node->handle.id); return -ENOENT; } /* When unspecified, the node parent scope is inherited from * the leaves. */ if (node->parent.scope == NET_SHAPER_SCOPE_UNSPEC) { ret = net_shaper_parent_from_leaves(leaves_count, leaves, node, extack); if (ret) return ret; } } else { net_shaper_default_parent(&node->handle, &node->parent); } if (node->parent.scope == NET_SHAPER_SCOPE_NODE) { parent = net_shaper_lookup(binding, &node->parent); if (!parent) { NL_SET_ERR_MSG_FMT(extack, "Node parent shaper %d:%d does not exists", node->parent.scope, node->parent.id); return -ENOENT; } ret = net_shaper_validate_nesting(binding, node, extack); if (ret < 0) return ret; } if (update_node) { /* For newly created node scope shaper, the following will * update the handle, due to id allocation. */ ret = net_shaper_pre_insert(binding, &node->handle, extack); if (ret) return ret; } for (i = 0; i < leaves_count; ++i) { leaf_handle = leaves[i].handle; ret = net_shaper_pre_insert(binding, &leaf_handle, extack); if (ret) goto rollback; if (!net_shaper_handle_cmp(&leaves[i].parent, &node->handle)) continue; /* The leaves shapers will be nested to the node, update the * linking accordingly. */ leaves[i].parent = node->handle; node->leaves++; } ret = ops->group(binding, leaves_count, leaves, node, extack); if (ret < 0) goto rollback; /* The node's parent gains a new leaf only when the node itself * is created by this group operation */ if (new_node && parent) parent->leaves++; if (update_node) net_shaper_commit(binding, 1, node); net_shaper_commit(binding, leaves_count, leaves); return 0; rollback: net_shaper_rollback(binding); return ret; } static int net_shaper_pre_del_node(struct net_shaper_binding *binding, const struct net_shaper *shaper, struct netlink_ext_ack *extack) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *cur, *leaves, node = {}; int ret, leaves_count = 0; unsigned long index; bool update_node; if (!shaper->leaves) return 0; /* Fetch the new node information. */ node.handle = shaper->parent; cur = net_shaper_lookup(binding, &node.handle); if (cur) { node = *cur; } else { /* A scope NODE shaper can be nested only to the NETDEV scope * shaper without creating the latter, this check may fail only * if the data is in inconsistent status. */ if (WARN_ON_ONCE(node.handle.scope != NET_SHAPER_SCOPE_NETDEV)) return -EINVAL; } leaves = kcalloc(shaper->leaves, sizeof(struct net_shaper), GFP_KERNEL); if (!leaves) return -ENOMEM; /* Build the leaves arrays. */ xa_for_each(&hierarchy->shapers, index, cur) { if (net_shaper_handle_cmp(&cur->parent, &shaper->handle)) continue; if (WARN_ON_ONCE(leaves_count == shaper->leaves)) { ret = -EINVAL; goto free; } leaves[leaves_count++] = *cur; } /* When re-linking to the netdev shaper, avoid the eventual, implicit, * creation of the new node, would be surprising since the user is * doing a delete operation. */ update_node = node.handle.scope != NET_SHAPER_SCOPE_NETDEV; ret = __net_shaper_group(binding, update_node, leaves_count, leaves, &node, extack); free: kfree(leaves); return ret; } int net_shaper_nl_delete_doit(struct sk_buff *skb, struct genl_info *info) { struct net_shaper_hierarchy *hierarchy; struct net_shaper_binding *binding; struct net_shaper_handle handle; struct net_shaper *shaper; int ret; if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_HANDLE)) return -EINVAL; binding = net_shaper_binding_from_ctx(info->ctx); net_shaper_lock(binding); ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info, &handle); if (ret) goto unlock; hierarchy = net_shaper_hierarchy(binding); if (!hierarchy) { ret = -ENOENT; goto unlock; } shaper = net_shaper_lookup(binding, &handle); if (!shaper) { ret = -ENOENT; goto unlock; } if (handle.scope == NET_SHAPER_SCOPE_NODE) { ret = net_shaper_pre_del_node(binding, shaper, info->extack); if (ret) goto unlock; } ret = __net_shaper_delete(binding, shaper, info->extack); unlock: net_shaper_unlock(binding); return ret; } static int net_shaper_group_send_reply(struct net_shaper_binding *binding, const struct net_shaper_handle *handle, struct genl_info *info, struct sk_buff *msg) { void *hdr; hdr = genlmsg_iput(msg, info); if (!hdr) goto free_msg; if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_IFINDEX) || net_shaper_fill_handle(msg, handle, NET_SHAPER_A_HANDLE)) goto free_msg; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); free_msg: /* Should never happen as msg is pre-allocated with enough space. */ WARN_ONCE(true, "calculated message payload length (%d)", net_shaper_handle_size()); nlmsg_free(msg); return -EMSGSIZE; } int net_shaper_nl_group_doit(struct sk_buff *skb, struct genl_info *info) { struct net_shaper **old_nodes, *leaves, node = {}; struct net_shaper_hierarchy *hierarchy; struct net_shaper_binding *binding; int i, ret, rem, leaves_count; int old_nodes_count = 0; struct sk_buff *msg; struct nlattr *attr; if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_LEAVES)) return -EINVAL; binding = net_shaper_binding_from_ctx(info->ctx); /* The group operation is optional. */ if (!net_shaper_ops(binding)->group) return -EOPNOTSUPP; net_shaper_lock(binding); leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES); if (!leaves_count) { NL_SET_BAD_ATTR(info->extack, info->attrs[NET_SHAPER_A_LEAVES]); ret = -EINVAL; goto unlock; } leaves = kcalloc(leaves_count, sizeof(struct net_shaper) + sizeof(struct net_shaper *), GFP_KERNEL); if (!leaves) { ret = -ENOMEM; goto unlock; } old_nodes = (void *)&leaves[leaves_count]; ret = net_shaper_parse_node(binding, info->attrs, info, &node); if (ret) goto free_leaves; i = 0; nla_for_each_attr_type(attr, NET_SHAPER_A_LEAVES, genlmsg_data(info->genlhdr), genlmsg_len(info->genlhdr), rem) { if (WARN_ON_ONCE(i >= leaves_count)) goto free_leaves; ret = net_shaper_parse_leaf(binding, attr, info, &node, &leaves[i]); if (ret) goto free_leaves; i++; } /* Prepare the msg reply in advance, to avoid device operation * rollback on allocation failure. */ msg = genlmsg_new(net_shaper_handle_size(), GFP_KERNEL); if (!msg) goto free_leaves; hierarchy = net_shaper_hierarchy_setup(binding); if (!hierarchy) { ret = -ENOMEM; goto free_msg; } /* Record the node shapers that this group() operation can make * childless for later cleanup. */ for (i = 0; i < leaves_count; i++) { if (leaves[i].parent.scope == NET_SHAPER_SCOPE_NODE && net_shaper_handle_cmp(&leaves[i].parent, &node.handle)) { struct net_shaper *tmp; tmp = net_shaper_lookup(binding, &leaves[i].parent); if (!tmp) continue; old_nodes[old_nodes_count++] = tmp; } } ret = __net_shaper_group(binding, true, leaves_count, leaves, &node, info->extack); if (ret) goto free_msg; /* Check if we need to delete any node left alone by the new leaves * linkage. */ for (i = 0; i < old_nodes_count; ++i) { struct net_shaper *tmp = old_nodes[i]; if (--tmp->leaves > 0) continue; /* Errors here are not fatal: the grouping operation is * completed, and user-space can still explicitly clean-up * left-over nodes. */ __net_shaper_delete(binding, tmp, info->extack); } ret = net_shaper_group_send_reply(binding, &node.handle, info, msg); if (ret) GENL_SET_ERR_MSG_FMT(info, "Can't send reply"); free_leaves: kfree(leaves); unlock: net_shaper_unlock(binding); return ret; free_msg: kfree_skb(msg); goto free_leaves; } static int net_shaper_cap_fill_one(struct sk_buff *msg, struct net_shaper_binding *binding, enum net_shaper_scope scope, unsigned long flags, const struct genl_info *info) { unsigned long cur; void *hdr; hdr = genlmsg_iput(msg, info); if (!hdr) return -EMSGSIZE; if (net_shaper_fill_binding(msg, binding, NET_SHAPER_A_CAPS_IFINDEX) || nla_put_u32(msg, NET_SHAPER_A_CAPS_SCOPE, scope)) goto nla_put_failure; for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS; cur <= NET_SHAPER_A_CAPS_MAX; ++cur) { if (flags & BIT(cur) && nla_put_flag(msg, cur)) goto nla_put_failure; } genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } int net_shaper_nl_cap_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net_shaper_binding *binding; const struct net_shaper_ops *ops; enum net_shaper_scope scope; unsigned long flags = 0; struct sk_buff *msg; int ret; if (GENL_REQ_ATTR_CHECK(info, NET_SHAPER_A_CAPS_SCOPE)) return -EINVAL; binding = net_shaper_binding_from_ctx(info->ctx); scope = nla_get_u32(info->attrs[NET_SHAPER_A_CAPS_SCOPE]); ops = net_shaper_ops(binding); ops->capabilities(binding, scope, &flags); if (!flags) return -EOPNOTSUPP; msg = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; ret = net_shaper_cap_fill_one(msg, binding, scope, flags, info); if (ret) goto free_msg; ret = genlmsg_reply(msg, info); if (ret) goto free_msg; return 0; free_msg: nlmsg_free(msg); return ret; } int net_shaper_nl_cap_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { const struct genl_info *info = genl_info_dump(cb); struct net_shaper_binding *binding; const struct net_shaper_ops *ops; enum net_shaper_scope scope; int ret; binding = net_shaper_binding_from_ctx(cb->ctx); ops = net_shaper_ops(binding); for (scope = 0; scope <= NET_SHAPER_SCOPE_MAX; ++scope) { unsigned long flags = 0; ops->capabilities(binding, scope, &flags); if (!flags) continue; ret = net_shaper_cap_fill_one(skb, binding, scope, flags, info); if (ret) return ret; } return 0; } static void net_shaper_flush(struct net_shaper_binding *binding) { struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); struct net_shaper *cur; unsigned long index; if (!hierarchy) return; net_shaper_lock(binding); xa_lock(&hierarchy->shapers); xa_for_each(&hierarchy->shapers, index, cur) { __xa_erase(&hierarchy->shapers, index); kfree(cur); } xa_unlock(&hierarchy->shapers); net_shaper_unlock(binding); kfree(hierarchy); } void net_shaper_flush_netdev(struct net_device *dev) { struct net_shaper_binding binding = { .type = NET_SHAPER_BINDING_TYPE_NETDEV, .netdev = dev, }; net_shaper_flush(&binding); } void net_shaper_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { struct net_shaper_hierarchy *hierarchy; struct net_shaper_binding binding; int i; binding.type = NET_SHAPER_BINDING_TYPE_NETDEV; binding.netdev = dev; hierarchy = net_shaper_hierarchy(&binding); if (!hierarchy) return; /* Only drivers implementing shapers support ensure * the lock is acquired in advance. */ netdev_assert_locked(dev); /* Take action only when decreasing the tx queue number. */ for (i = txq; i < dev->real_num_tx_queues; ++i) { struct net_shaper_handle handle, parent_handle; struct net_shaper *shaper; u32 index; handle.scope = NET_SHAPER_SCOPE_QUEUE; handle.id = i; shaper = net_shaper_lookup(&binding, &handle); if (!shaper) continue; /* Don't touch the H/W for the queue shaper, the drivers already * deleted the queue and related resources. */ parent_handle = shaper->parent; index = net_shaper_handle_to_index(&handle); xa_erase(&hierarchy->shapers, index); kfree_rcu(shaper, rcu); /* The recursion on parent does the full job. */ if (parent_handle.scope != NET_SHAPER_SCOPE_NODE) continue; shaper = net_shaper_lookup(&binding, &parent_handle); if (shaper && !--shaper->leaves) __net_shaper_delete(&binding, shaper, NULL); } } static int __init shaper_init(void) { return genl_register_family(&net_shaper_nl_family); } subsys_initcall(shaper_init); |
137 694 100 3912 3922 1363 2206 197 410 138 599 1833 47 10 704 381 344 344 344 344 345 344 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 | // SPDX-License-Identifier: GPL-2.0-only /* * IPv6 library code, needed by static components when full IPv6 support is * not configured or static. */ #include <linux/export.h> #include <net/ipv6.h> #include <net/ipv6_stubs.h> #include <net/addrconf.h> #include <net/ip.h> /* if ipv6 module registers this function is used by xfrm to force all * sockets to relookup their nodes - this is fairly expensive, be * careful */ void (*__fib6_flush_trees)(struct net *); EXPORT_SYMBOL(__fib6_flush_trees); #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) static inline unsigned int ipv6_addr_scope2type(unsigned int scope) { switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | IPV6_ADDR_LOOPBACK); case IPV6_ADDR_SCOPE_LINKLOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL) | IPV6_ADDR_LINKLOCAL); case IPV6_ADDR_SCOPE_SITELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL) | IPV6_ADDR_SITELOCAL); } return IPV6_ADDR_SCOPE_TYPE(scope); } int __ipv6_addr_type(const struct in6_addr *addr) { __be32 st; st = addr->s6_addr32[0]; /* Consider all addresses with the first three bits different of 000 and 111 as unicasts. */ if ((st & htonl(0xE0000000)) != htonl(0x00000000) && (st & htonl(0xE0000000)) != htonl(0xE0000000)) return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { /* multicast */ /* addr-select 3.1 */ return (IPV6_ADDR_MULTICAST | ipv6_addr_scope2type(IPV6_ADDR_MC_SCOPE(addr))); } if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */ if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */ if ((st & htonl(0xFE000000)) == htonl(0xFC000000)) return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* RFC 4193 */ if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) { if (addr->s6_addr32[2] == 0) { if (addr->s6_addr32[3] == 0) return IPV6_ADDR_ANY; if (addr->s6_addr32[3] == htonl(0x00000001)) return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.4 */ return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ } if (addr->s6_addr32[2] == htonl(0x0000ffff)) return (IPV6_ADDR_MAPPED | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ } return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */ } EXPORT_SYMBOL(__ipv6_addr_type); static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); static BLOCKING_NOTIFIER_HEAD(inet6addr_validator_chain); int register_inet6addr_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&inet6addr_chain, nb); } EXPORT_SYMBOL(register_inet6addr_notifier); int unregister_inet6addr_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&inet6addr_chain, nb); } EXPORT_SYMBOL(unregister_inet6addr_notifier); int inet6addr_notifier_call_chain(unsigned long val, void *v) { return atomic_notifier_call_chain(&inet6addr_chain, val, v); } EXPORT_SYMBOL(inet6addr_notifier_call_chain); int register_inet6addr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&inet6addr_validator_chain, nb); } EXPORT_SYMBOL(register_inet6addr_validator_notifier); int unregister_inet6addr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&inet6addr_validator_chain, nb); } EXPORT_SYMBOL(unregister_inet6addr_validator_notifier); int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) { return blocking_notifier_call_chain(&inet6addr_validator_chain, val, v); } EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { return ERR_PTR(-EAFNOSUPPORT); } static int eafnosupport_ipv6_route_input(struct sk_buff *skb) { return -EAFNOSUPPORT; } static struct fib6_table *eafnosupport_fib6_get_table(struct net *net, u32 id) { return NULL; } static int eafnosupport_fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags) { return -EAFNOSUPPORT; } static int eafnosupport_fib6_lookup(struct net *net, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags) { return -EAFNOSUPPORT; } static void eafnosupport_fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict) { } static u32 eafnosupport_ip6_mtu_from_fib6(const struct fib6_result *res, const struct in6_addr *daddr, const struct in6_addr *saddr) { return 0; } static int eafnosupport_fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel"); return -EAFNOSUPPORT; } static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify) { return -EAFNOSUPPORT; } static int eafnosupport_ipv6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)) { kfree_skb(skb); return -EAFNOSUPPORT; } static struct net_device *eafnosupport_ipv6_dev_find(struct net *net, const struct in6_addr *addr, struct net_device *dev) { return ERR_PTR(-EAFNOSUPPORT); } const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, .ipv6_route_input = eafnosupport_ipv6_route_input, .fib6_get_table = eafnosupport_fib6_get_table, .fib6_table_lookup = eafnosupport_fib6_table_lookup, .fib6_lookup = eafnosupport_fib6_lookup, .fib6_select_path = eafnosupport_fib6_select_path, .ip6_mtu_from_fib6 = eafnosupport_ip6_mtu_from_fib6, .fib6_nh_init = eafnosupport_fib6_nh_init, .ip6_del_rt = eafnosupport_ip6_del_rt, .ipv6_fragment = eafnosupport_ipv6_fragment, .ipv6_dev_find = eafnosupport_ipv6_dev_find, }; EXPORT_SYMBOL_GPL(ipv6_stub); /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8) = IN6ADDR_LOOPBACK_INIT; EXPORT_SYMBOL(in6addr_loopback); const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8) = IN6ADDR_ANY_INIT; EXPORT_SYMBOL(in6addr_any); const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8) = IN6ADDR_LINKLOCAL_ALLNODES_INIT; EXPORT_SYMBOL(in6addr_linklocal_allnodes); const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_linklocal_allrouters); const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8) = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT; EXPORT_SYMBOL(in6addr_interfacelocal_allnodes); const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_interfacelocal_allrouters); const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_SITELOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_sitelocal_allrouters); static void snmp6_free_dev(struct inet6_dev *idev) { kfree(idev->stats.icmpv6msgdev); kfree(idev->stats.icmpv6dev); free_percpu(idev->stats.ipv6); } static void in6_dev_finish_destroy_rcu(struct rcu_head *head) { struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu); snmp6_free_dev(idev); kfree(idev); } /* Nobody refers to this device, we may destroy it. */ void in6_dev_finish_destroy(struct inet6_dev *idev) { struct net_device *dev = idev->dev; WARN_ON(!list_empty(&idev->addr_list)); WARN_ON(rcu_access_pointer(idev->mc_list)); WARN_ON(timer_pending(&idev->rs_timer)); #ifdef NET_REFCNT_DEBUG pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif netdev_put(dev, &idev->dev_tracker); if (!idev->dead) { pr_warn("Freeing alive inet6 device %p\n", idev); return; } call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu); } EXPORT_SYMBOL(in6_dev_finish_destroy); |
909 95 1 98 94 16 183 85 94 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Internal procfs definitions * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/proc_fs.h> #include <linux/proc_ns.h> #include <linux/refcount.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/binfmts.h> #include <linux/sched/coredump.h> #include <linux/sched/task.h> #include <linux/mm.h> struct ctl_table_header; struct mempolicy; /* * This is not completely implemented yet. The idea is to * create an in-memory tree (like the actual /proc filesystem * tree) of these proc_dir_entries, so that we can dynamically * add new files to /proc. * * parent/subdir are used for the directory structure (every /proc file has a * parent, but "subdir" is empty for all non-directory entries). * subdir_node is used to build the rb tree "subdir" of the parent. */ struct proc_dir_entry { /* * number of callers into module in progress; * negative -> it's going away RSN */ atomic_t in_use; refcount_t refcnt; struct list_head pde_openers; /* who did ->open, but not ->release */ /* protects ->pde_openers and all struct pde_opener instances */ spinlock_t pde_unload_lock; struct completion *pde_unload_completion; const struct inode_operations *proc_iops; union { const struct proc_ops *proc_ops; const struct file_operations *proc_dir_ops; }; const struct dentry_operations *proc_dops; union { const struct seq_operations *seq_ops; int (*single_show)(struct seq_file *, void *); }; proc_write_t write; void *data; unsigned int state_size; unsigned int low_ino; nlink_t nlink; kuid_t uid; kgid_t gid; loff_t size; struct proc_dir_entry *parent; struct rb_root subdir; struct rb_node subdir_node; char *name; umode_t mode; u8 flags; u8 namelen; char inline_name[]; } __randomize_layout; #define SIZEOF_PDE ( \ sizeof(struct proc_dir_entry) < 128 ? 128 : \ sizeof(struct proc_dir_entry) < 192 ? 192 : \ sizeof(struct proc_dir_entry) < 256 ? 256 : \ sizeof(struct proc_dir_entry) < 512 ? 512 : \ 0) #define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry)) static inline bool pde_is_permanent(const struct proc_dir_entry *pde) { return pde->flags & PROC_ENTRY_PERMANENT; } static inline void pde_make_permanent(struct proc_dir_entry *pde) { pde->flags |= PROC_ENTRY_PERMANENT; } static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde) { return pde->flags & PROC_ENTRY_proc_read_iter; } static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde) { #ifdef CONFIG_COMPAT return pde->flags & PROC_ENTRY_proc_compat_ioctl; #else return false; #endif } extern struct kmem_cache *proc_dir_entry_cache; void pde_free(struct proc_dir_entry *pde); union proc_op { int (*proc_get_link)(struct dentry *, struct path *); int (*proc_show)(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); int lsmid; }; struct proc_inode { struct pid *pid; unsigned int fd; union proc_op op; struct proc_dir_entry *pde; struct ctl_table_header *sysctl; const struct ctl_table *sysctl_entry; struct hlist_node sibling_inodes; const struct proc_ns_operations *ns_ops; struct inode vfs_inode; } __randomize_layout; /* * General functions */ static inline struct proc_inode *PROC_I(const struct inode *inode) { return container_of(inode, struct proc_inode, vfs_inode); } static inline struct proc_dir_entry *PDE(const struct inode *inode) { return PROC_I(inode)->pde; } static inline struct pid *proc_pid(const struct inode *inode) { return PROC_I(inode)->pid; } static inline struct task_struct *get_proc_task(const struct inode *inode) { return get_pid_task(proc_pid(inode), PIDTYPE_PID); } void task_dump_owner(struct task_struct *task, umode_t mode, kuid_t *ruid, kgid_t *rgid); unsigned name_to_int(const struct qstr *qstr); /* * Offset of the first process in the /proc root directory.. */ #define FIRST_PROCESS_ENTRY 256 /* Worst case buffer size needed for holding an integer. */ #define PROC_NUMBUF 13 #ifdef CONFIG_PAGE_MAPCOUNT /** * folio_precise_page_mapcount() - Number of mappings of this folio page. * @folio: The folio. * @page: The page. * * The number of present user page table entries that reference this page * as tracked via the RMAP: either referenced directly (PTE) or as part of * a larger area that covers this page (e.g., PMD). * * Use this function only for the calculation of existing statistics * (USS, PSS, mapcount_max) and for debugging purposes (/proc/kpagecount). * * Do not add new users. * * Returns: The number of mappings of this folio page. 0 for * folios that are not mapped to user space or are not tracked via the RMAP * (e.g., shared zeropage). */ static inline int folio_precise_page_mapcount(struct folio *folio, struct page *page) { int mapcount = atomic_read(&page->_mapcount) + 1; if (page_mapcount_is_type(mapcount)) mapcount = 0; if (folio_test_large(folio)) mapcount += folio_entire_mapcount(folio); return mapcount; } #else /* !CONFIG_PAGE_MAPCOUNT */ static inline int folio_precise_page_mapcount(struct folio *folio, struct page *page) { BUILD_BUG(); } #endif /* CONFIG_PAGE_MAPCOUNT */ /** * folio_average_page_mapcount() - Average number of mappings per page in this * folio * @folio: The folio. * * The average number of user page table entries that reference each page in * this folio as tracked via the RMAP: either referenced directly (PTE) or * as part of a larger area that covers this page (e.g., PMD). * * The average is calculated by rounding to the nearest integer; however, * to avoid duplicated code in current callers, the average is at least * 1 if any page of the folio is mapped. * * Returns: The average number of mappings per page in this folio. */ static inline int folio_average_page_mapcount(struct folio *folio) { int mapcount, entire_mapcount, avg; if (!folio_test_large(folio)) return atomic_read(&folio->_mapcount) + 1; mapcount = folio_large_mapcount(folio); if (unlikely(mapcount <= 0)) return 0; entire_mapcount = folio_entire_mapcount(folio); if (mapcount <= entire_mapcount) return entire_mapcount; mapcount -= entire_mapcount; /* Round to closest integer ... */ avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio); /* ... but return at least 1. */ return max_t(int, avg + entire_mapcount, 1); } /* * array.c */ extern const struct file_operations proc_tid_children_operations; extern void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape); extern int proc_tid_stat(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); extern int proc_pid_status(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); extern int proc_pid_statm(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); /* * base.c */ extern const struct dentry_operations pid_dentry_operations; extern int pid_getattr(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); extern int proc_setattr(struct mnt_idmap *, struct dentry *, struct iattr *); extern void proc_pid_evict_inode(struct proc_inode *); extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t); extern void pid_update_inode(struct task_struct *, struct inode *); extern int pid_delete_dentry(const struct dentry *); extern int proc_pid_readdir(struct file *, struct dir_context *); struct dentry *proc_pid_lookup(struct dentry *, unsigned int); extern loff_t mem_lseek(struct file *, loff_t, int); /* Lookups */ typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); bool proc_fill_cache(struct file *, struct dir_context *, const char *, unsigned int, instantiate_t, struct task_struct *, const void *); /* * generic.c */ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, struct proc_dir_entry **parent, void *data); struct proc_dir_entry *proc_register(struct proc_dir_entry *dir, struct proc_dir_entry *dp); extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *); extern int proc_readdir(struct file *, struct dir_context *); int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *); static inline void pde_get(struct proc_dir_entry *pde) { refcount_inc(&pde->refcnt); } extern void pde_put(struct proc_dir_entry *); static inline bool is_empty_pde(const struct proc_dir_entry *pde) { return S_ISDIR(pde->mode) && !pde->proc_iops; } extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, loff_t *); /* * inode.c */ struct pde_opener { struct list_head lh; struct file *file; bool closing; struct completion *c; } __randomize_layout; extern const struct inode_operations proc_link_inode_operations; extern const struct inode_operations proc_pid_link_inode_operations; extern const struct super_operations proc_sops; void proc_init_kmemcache(void); void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock); void set_proc_pid_nlink(void); extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *); extern void proc_entry_rundown(struct proc_dir_entry *); /* * proc_namespaces.c */ extern const struct inode_operations proc_ns_dir_inode_operations; extern const struct file_operations proc_ns_dir_operations; /* * proc_net.c */ extern const struct file_operations proc_net_operations; extern const struct inode_operations proc_net_inode_operations; #ifdef CONFIG_NET extern int proc_net_init(void); #else static inline int proc_net_init(void) { return 0; } #endif /* * proc_self.c */ extern int proc_setup_self(struct super_block *); /* * proc_thread_self.c */ extern int proc_setup_thread_self(struct super_block *); extern void proc_thread_self_init(void); /* * proc_sysctl.c */ #ifdef CONFIG_PROC_SYSCTL extern int proc_sys_init(void); extern void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head); #else static inline void proc_sys_init(void) { } static inline void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head) { } #endif /* * proc_tty.c */ #ifdef CONFIG_TTY extern void proc_tty_init(void); #else static inline void proc_tty_init(void) {} #endif /* * root.c */ extern struct proc_dir_entry proc_root; extern void proc_self_init(void); /* * task_[no]mmu.c */ struct mem_size_stats; struct proc_maps_private { struct inode *inode; struct task_struct *task; struct mm_struct *mm; struct vma_iterator iter; #ifdef CONFIG_NUMA struct mempolicy *task_mempolicy; #endif } __randomize_layout; struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode); extern const struct file_operations proc_pid_maps_operations; extern const struct file_operations proc_pid_numa_maps_operations; extern const struct file_operations proc_pid_smaps_operations; extern const struct file_operations proc_pid_smaps_rollup_operations; extern const struct file_operations proc_clear_refs_operations; extern const struct file_operations proc_pagemap_operations; extern unsigned long task_vsize(struct mm_struct *); extern unsigned long task_statm(struct mm_struct *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); extern void task_mem(struct seq_file *, struct mm_struct *); extern const struct dentry_operations proc_net_dentry_ops; static inline void pde_force_lookup(struct proc_dir_entry *pde) { /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ pde->proc_dops = &proc_net_dentry_ops; } /* * Add a new procfs dentry that can't serve as a mountpoint. That should * encompass anything that is ephemeral and can just disappear while the * process is still around. */ static inline struct dentry *proc_splice_unmountable(struct inode *inode, struct dentry *dentry, const struct dentry_operations *d_ops) { d_set_d_op(dentry, d_ops); dont_mount(dentry); return d_splice_alias(inode, dentry); } |
22 22 22 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 | // SPDX-License-Identifier: ISC /* * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "coredump.h" #include <linux/devcoredump.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/utsname.h> #include "debug.h" #include "hw.h" static const struct ath10k_mem_section qca6174_hw21_register_sections[] = { {0x800, 0x810}, {0x820, 0x82C}, {0x830, 0x8F4}, {0x90C, 0x91C}, {0xA14, 0xA18}, {0xA84, 0xA94}, {0xAA8, 0xAD4}, {0xADC, 0xB40}, {0x1000, 0x10A4}, {0x10BC, 0x111C}, {0x1134, 0x1138}, {0x1144, 0x114C}, {0x1150, 0x115C}, {0x1160, 0x1178}, {0x1240, 0x1260}, {0x2000, 0x207C}, {0x3000, 0x3014}, {0x4000, 0x4014}, {0x5000, 0x5124}, {0x6000, 0x6040}, {0x6080, 0x60CC}, {0x6100, 0x611C}, {0x6140, 0x61D8}, {0x6200, 0x6238}, {0x6240, 0x628C}, {0x62C0, 0x62EC}, {0x6380, 0x63E8}, {0x6400, 0x6440}, {0x6480, 0x64CC}, {0x6500, 0x651C}, {0x6540, 0x6580}, {0x6600, 0x6638}, {0x6640, 0x668C}, {0x66C0, 0x66EC}, {0x6780, 0x67E8}, {0x7080, 0x708C}, {0x70C0, 0x70C8}, {0x7400, 0x741C}, {0x7440, 0x7454}, {0x7800, 0x7818}, {0x8000, 0x8004}, {0x8010, 0x8064}, {0x8080, 0x8084}, {0x80A0, 0x80A4}, {0x80C0, 0x80C4}, {0x80E0, 0x80F4}, {0x8100, 0x8104}, {0x8110, 0x812C}, {0x9000, 0x9004}, {0x9800, 0x982C}, {0x9830, 0x9838}, {0x9840, 0x986C}, {0x9870, 0x9898}, {0x9A00, 0x9C00}, {0xD580, 0xD59C}, {0xF000, 0xF0E0}, {0xF140, 0xF190}, {0xF250, 0xF25C}, {0xF260, 0xF268}, {0xF26C, 0xF2A8}, {0x10008, 0x1000C}, {0x10014, 0x10018}, {0x1001C, 0x10020}, {0x10024, 0x10028}, {0x10030, 0x10034}, {0x10040, 0x10054}, {0x10058, 0x1007C}, {0x10080, 0x100C4}, {0x100C8, 0x10114}, {0x1012C, 0x10130}, {0x10138, 0x10144}, {0x10200, 0x10220}, {0x10230, 0x10250}, {0x10260, 0x10280}, {0x10290, 0x102B0}, {0x102C0, 0x102DC}, {0x102E0, 0x102F4}, {0x102FC, 0x1037C}, {0x10380, 0x10390}, {0x10800, 0x10828}, {0x10840, 0x10844}, {0x10880, 0x10884}, {0x108C0, 0x108E8}, {0x10900, 0x10928}, {0x10940, 0x10944}, {0x10980, 0x10984}, {0x109C0, 0x109E8}, {0x10A00, 0x10A28}, {0x10A40, 0x10A50}, {0x11000, 0x11028}, {0x11030, 0x11034}, {0x11038, 0x11068}, {0x11070, 0x11074}, {0x11078, 0x110A8}, {0x110B0, 0x110B4}, {0x110B8, 0x110E8}, {0x110F0, 0x110F4}, {0x110F8, 0x11128}, {0x11138, 0x11144}, {0x11178, 0x11180}, {0x111B8, 0x111C0}, {0x111F8, 0x11200}, {0x11238, 0x1123C}, {0x11270, 0x11274}, {0x11278, 0x1127C}, {0x112B0, 0x112B4}, {0x112B8, 0x112BC}, {0x112F0, 0x112F4}, {0x112F8, 0x112FC}, {0x11338, 0x1133C}, {0x11378, 0x1137C}, {0x113B8, 0x113BC}, {0x113F8, 0x113FC}, {0x11438, 0x11440}, {0x11478, 0x11480}, {0x114B8, 0x114BC}, {0x114F8, 0x114FC}, {0x11538, 0x1153C}, {0x11578, 0x1157C}, {0x115B8, 0x115BC}, {0x115F8, 0x115FC}, {0x11638, 0x1163C}, {0x11678, 0x1167C}, {0x116B8, 0x116BC}, {0x116F8, 0x116FC}, {0x11738, 0x1173C}, {0x11778, 0x1177C}, {0x117B8, 0x117BC}, {0x117F8, 0x117FC}, {0x17000, 0x1701C}, {0x17020, 0x170AC}, {0x18000, 0x18050}, {0x18054, 0x18074}, {0x18080, 0x180D4}, {0x180DC, 0x18104}, {0x18108, 0x1813C}, {0x18144, 0x18148}, {0x18168, 0x18174}, {0x18178, 0x18180}, {0x181C8, 0x181E0}, {0x181E4, 0x181E8}, {0x181EC, 0x1820C}, {0x1825C, 0x18280}, {0x18284, 0x18290}, {0x18294, 0x182A0}, {0x18300, 0x18304}, {0x18314, 0x18320}, {0x18328, 0x18350}, {0x1835C, 0x1836C}, {0x18370, 0x18390}, {0x18398, 0x183AC}, {0x183BC, 0x183D8}, {0x183DC, 0x183F4}, {0x18400, 0x186F4}, {0x186F8, 0x1871C}, {0x18720, 0x18790}, {0x19800, 0x19830}, {0x19834, 0x19840}, {0x19880, 0x1989C}, {0x198A4, 0x198B0}, {0x198BC, 0x19900}, {0x19C00, 0x19C88}, {0x19D00, 0x19D20}, {0x19E00, 0x19E7C}, {0x19E80, 0x19E94}, {0x19E98, 0x19EAC}, {0x19EB0, 0x19EBC}, {0x19F70, 0x19F74}, {0x19F80, 0x19F8C}, {0x19FA0, 0x19FB4}, {0x19FC0, 0x19FD8}, {0x1A000, 0x1A200}, {0x1A204, 0x1A210}, {0x1A228, 0x1A22C}, {0x1A230, 0x1A248}, {0x1A250, 0x1A270}, {0x1A280, 0x1A290}, {0x1A2A0, 0x1A2A4}, {0x1A2C0, 0x1A2EC}, {0x1A300, 0x1A3BC}, {0x1A3F0, 0x1A3F4}, {0x1A3F8, 0x1A434}, {0x1A438, 0x1A444}, {0x1A448, 0x1A468}, {0x1A580, 0x1A58C}, {0x1A644, 0x1A654}, {0x1A670, 0x1A698}, {0x1A6AC, 0x1A6B0}, {0x1A6D0, 0x1A6D4}, {0x1A6EC, 0x1A70C}, {0x1A710, 0x1A738}, {0x1A7C0, 0x1A7D0}, {0x1A7D4, 0x1A7D8}, {0x1A7DC, 0x1A7E4}, {0x1A7F0, 0x1A7F8}, {0x1A888, 0x1A89C}, {0x1A8A8, 0x1A8AC}, {0x1A8C0, 0x1A8DC}, {0x1A8F0, 0x1A8FC}, {0x1AE04, 0x1AE08}, {0x1AE18, 0x1AE24}, {0x1AF80, 0x1AF8C}, {0x1AFA0, 0x1AFB4}, {0x1B000, 0x1B200}, {0x1B284, 0x1B288}, {0x1B2D0, 0x1B2D8}, {0x1B2DC, 0x1B2EC}, {0x1B300, 0x1B340}, {0x1B374, 0x1B378}, {0x1B380, 0x1B384}, {0x1B388, 0x1B38C}, {0x1B404, 0x1B408}, {0x1B420, 0x1B428}, {0x1B440, 0x1B444}, {0x1B448, 0x1B44C}, {0x1B450, 0x1B458}, {0x1B45C, 0x1B468}, {0x1B584, 0x1B58C}, {0x1B68C, 0x1B690}, {0x1B6AC, 0x1B6B0}, {0x1B7F0, 0x1B7F8}, {0x1C800, 0x1CC00}, {0x1CE00, 0x1CE04}, {0x1CF80, 0x1CF84}, {0x1D200, 0x1D800}, {0x1E000, 0x20014}, {0x20100, 0x20124}, {0x21400, 0x217A8}, {0x21800, 0x21BA8}, {0x21C00, 0x21FA8}, {0x22000, 0x223A8}, {0x22400, 0x227A8}, {0x22800, 0x22BA8}, {0x22C00, 0x22FA8}, {0x23000, 0x233A8}, {0x24000, 0x24034}, {0x26000, 0x26064}, {0x27000, 0x27024}, {0x34000, 0x3400C}, {0x34400, 0x3445C}, {0x34800, 0x3485C}, {0x34C00, 0x34C5C}, {0x35000, 0x3505C}, {0x35400, 0x3545C}, {0x35800, 0x3585C}, {0x35C00, 0x35C5C}, {0x36000, 0x3605C}, {0x38000, 0x38064}, {0x38070, 0x380E0}, {0x3A000, 0x3A064}, {0x40000, 0x400A4}, {0x80000, 0x8000C}, {0x80010, 0x80020}, }; static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = { {0x800, 0x810}, {0x820, 0x82C}, {0x830, 0x8F4}, {0x90C, 0x91C}, {0xA14, 0xA18}, {0xA84, 0xA94}, {0xAA8, 0xAD4}, {0xADC, 0xB40}, {0x1000, 0x10A4}, {0x10BC, 0x111C}, {0x1134, 0x1138}, {0x1144, 0x114C}, {0x1150, 0x115C}, {0x1160, 0x1178}, {0x1240, 0x1260}, {0x2000, 0x207C}, {0x3000, 0x3014}, {0x4000, 0x4014}, {0x5000, 0x5124}, {0x6000, 0x6040}, {0x6080, 0x60CC}, {0x6100, 0x611C}, {0x6140, 0x61D8}, {0x6200, 0x6238}, {0x6240, 0x628C}, {0x62C0, 0x62EC}, {0x6380, 0x63E8}, {0x6400, 0x6440}, {0x6480, 0x64CC}, {0x6500, 0x651C}, {0x6540, 0x6580}, {0x6600, 0x6638}, {0x6640, 0x668C}, {0x66C0, 0x66EC}, {0x6780, 0x67E8}, {0x7080, 0x708C}, {0x70C0, 0x70C8}, {0x7400, 0x741C}, {0x7440, 0x7454}, {0x7800, 0x7818}, {0x8010, 0x8060}, {0x8080, 0x8084}, {0x80A0, 0x80A4}, {0x80C0, 0x80C4}, {0x80E0, 0x80ec}, {0x8110, 0x8128}, {0x9000, 0x9004}, {0xF000, 0xF0E0}, {0xF140, 0xF190}, {0xF250, 0xF25C}, {0xF260, 0xF268}, {0xF26C, 0xF2A8}, {0x10008, 0x1000C}, {0x10014, 0x10018}, {0x1001C, 0x10020}, {0x10024, 0x10028}, {0x10030, 0x10034}, {0x10040, 0x10054}, {0x10058, 0x1007C}, {0x10080, 0x100C4}, {0x100C8, 0x10114}, {0x1012C, 0x10130}, {0x10138, 0x10144}, {0x10200, 0x10220}, {0x10230, 0x10250}, {0x10260, 0x10280}, {0x10290, 0x102B0}, {0x102C0, 0x102DC}, {0x102E0, 0x102F4}, {0x102FC, 0x1037C}, {0x10380, 0x10390}, {0x10800, 0x10828}, {0x10840, 0x10844}, {0x10880, 0x10884}, {0x108C0, 0x108E8}, {0x10900, 0x10928}, {0x10940, 0x10944}, {0x10980, 0x10984}, {0x109C0, 0x109E8}, {0x10A00, 0x10A28}, {0x10A40, 0x10A50}, {0x11000, 0x11028}, {0x11030, 0x11034}, {0x11038, 0x11068}, {0x11070, 0x11074}, {0x11078, 0x110A8}, {0x110B0, 0x110B4}, {0x110B8, 0x110E8}, {0x110F0, 0x110F4}, {0x110F8, 0x11128}, {0x11138, 0x11144}, {0x11178, 0x11180}, {0x111B8, 0x111C0}, {0x111F8, 0x11200}, {0x11238, 0x1123C}, {0x11270, 0x11274}, {0x11278, 0x1127C}, {0x112B0, 0x112B4}, {0x112B8, 0x112BC}, {0x112F0, 0x112F4}, {0x112F8, 0x112FC}, {0x11338, 0x1133C}, {0x11378, 0x1137C}, {0x113B8, 0x113BC}, {0x113F8, 0x113FC}, {0x11438, 0x11440}, {0x11478, 0x11480}, {0x114B8, 0x114BC}, {0x114F8, 0x114FC}, {0x11538, 0x1153C}, {0x11578, 0x1157C}, {0x115B8, 0x115BC}, {0x115F8, 0x115FC}, {0x11638, 0x1163C}, {0x11678, 0x1167C}, {0x116B8, 0x116BC}, {0x116F8, 0x116FC}, {0x11738, 0x1173C}, {0x11778, 0x1177C}, {0x117B8, 0x117BC}, {0x117F8, 0x117FC}, {0x17000, 0x1701C}, {0x17020, 0x170AC}, {0x18000, 0x18050}, {0x18054, 0x18074}, {0x18080, 0x180D4}, {0x180DC, 0x18104}, {0x18108, 0x1813C}, {0x18144, 0x18148}, {0x18168, 0x18174}, {0x18178, 0x18180}, {0x181C8, 0x181E0}, {0x181E4, 0x181E8}, {0x181EC, 0x1820C}, {0x1825C, 0x18280}, {0x18284, 0x18290}, {0x18294, 0x182A0}, {0x18300, 0x18304}, {0x18314, 0x18320}, {0x18328, 0x18350}, {0x1835C, 0x1836C}, {0x18370, 0x18390}, {0x18398, 0x183AC}, {0x183BC, 0x183D8}, {0x183DC, 0x183F4}, {0x18400, 0x186F4}, {0x186F8, 0x1871C}, {0x18720, 0x18790}, {0x19800, 0x19830}, {0x19834, 0x19840}, {0x19880, 0x1989C}, {0x198A4, 0x198B0}, {0x198BC, 0x19900}, {0x19C00, 0x19C88}, {0x19D00, 0x19D20}, {0x19E00, 0x19E7C}, {0x19E80, 0x19E94}, {0x19E98, 0x19EAC}, {0x19EB0, 0x19EBC}, {0x19F70, 0x19F74}, {0x19F80, 0x19F8C}, {0x19FA0, 0x19FB4}, {0x19FC0, 0x19FD8}, {0x1A000, 0x1A200}, {0x1A204, 0x1A210}, {0x1A228, 0x1A22C}, {0x1A230, 0x1A248}, {0x1A250, 0x1A270}, {0x1A280, 0x1A290}, {0x1A2A0, 0x1A2A4}, {0x1A2C0, 0x1A2EC}, {0x1A300, 0x1A3BC}, {0x1A3F0, 0x1A3F4}, {0x1A3F8, 0x1A434}, {0x1A438, 0x1A444}, {0x1A448, 0x1A468}, {0x1A580, 0x1A58C}, {0x1A644, 0x1A654}, {0x1A670, 0x1A698}, {0x1A6AC, 0x1A6B0}, {0x1A6D0, 0x1A6D4}, {0x1A6EC, 0x1A70C}, {0x1A710, 0x1A738}, {0x1A7C0, 0x1A7D0}, {0x1A7D4, 0x1A7D8}, {0x1A7DC, 0x1A7E4}, {0x1A7F0, 0x1A7F8}, {0x1A888, 0x1A89C}, {0x1A8A8, 0x1A8AC}, {0x1A8C0, 0x1A8DC}, {0x1A8F0, 0x1A8FC}, {0x1AE04, 0x1AE08}, {0x1AE18, 0x1AE24}, {0x1AF80, 0x1AF8C}, {0x1AFA0, 0x1AFB4}, {0x1B000, 0x1B200}, {0x1B284, 0x1B288}, {0x1B2D0, 0x1B2D8}, {0x1B2DC, 0x1B2EC}, {0x1B300, 0x1B340}, {0x1B374, 0x1B378}, {0x1B380, 0x1B384}, {0x1B388, 0x1B38C}, {0x1B404, 0x1B408}, {0x1B420, 0x1B428}, {0x1B440, 0x1B444}, {0x1B448, 0x1B44C}, {0x1B450, 0x1B458}, {0x1B45C, 0x1B468}, {0x1B584, 0x1B58C}, {0x1B68C, 0x1B690}, {0x1B6AC, 0x1B6B0}, {0x1B7F0, 0x1B7F8}, {0x1C800, 0x1CC00}, {0x1CE00, 0x1CE04}, {0x1CF80, 0x1CF84}, {0x1D200, 0x1D800}, {0x1E000, 0x20014}, {0x20100, 0x20124}, {0x21400, 0x217A8}, {0x21800, 0x21BA8}, {0x21C00, 0x21FA8}, {0x22000, 0x223A8}, {0x22400, 0x227A8}, {0x22800, 0x22BA8}, {0x22C00, 0x22FA8}, {0x23000, 0x233A8}, {0x24000, 0x24034}, /* EFUSE0,1,2 is disabled here * because its state may be reset * * {0x24800, 0x24804}, * {0x25000, 0x25004}, * {0x25800, 0x25804}, */ {0x26000, 0x26064}, {0x27000, 0x27024}, {0x34000, 0x3400C}, {0x34400, 0x3445C}, {0x34800, 0x3485C}, {0x34C00, 0x34C5C}, {0x35000, 0x3505C}, {0x35400, 0x3545C}, {0x35800, 0x3585C}, {0x35C00, 0x35C5C}, {0x36000, 0x3605C}, {0x38000, 0x38064}, {0x38070, 0x380E0}, {0x3A000, 0x3A074}, /* DBI windows is skipped here, it can be only accessed when pcie * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 && * PCIE_CTRL_APP_LTSSM_ENALBE=0. * {0x3C000 , 0x3C004}, */ {0x40000, 0x400A4}, /* SI register is skipped here. * Because it will cause bus hang * * {0x50000, 0x50018}, */ {0x80000, 0x8000C}, {0x80010, 0x80020}, }; static const struct ath10k_mem_section qca6174_hw30_register_sections[] = { {0x800, 0x810}, {0x820, 0x82C}, {0x830, 0x8F4}, {0x90C, 0x91C}, {0xA14, 0xA18}, {0xA84, 0xA94}, {0xAA8, 0xAD4}, {0xADC, 0xB40}, {0x1000, 0x10A4}, {0x10BC, 0x111C}, {0x1134, 0x1138}, {0x1144, 0x114C}, {0x1150, 0x115C}, {0x1160, 0x1178}, {0x1240, 0x1260}, {0x2000, 0x207C}, {0x3000, 0x3014}, {0x4000, 0x4014}, {0x5000, 0x5124}, {0x6000, 0x6040}, {0x6080, 0x60CC}, {0x6100, 0x611C}, {0x6140, 0x61D8}, {0x6200, 0x6238}, {0x6240, 0x628C}, {0x62C0, 0x62EC}, {0x6380, 0x63E8}, {0x6400, 0x6440}, {0x6480, 0x64CC}, {0x6500, 0x651C}, {0x6540, 0x6580}, {0x6600, 0x6638}, {0x6640, 0x668C}, {0x66C0, 0x66EC}, {0x6780, 0x67E8}, {0x7080, 0x708C}, {0x70C0, 0x70C8}, {0x7400, 0x741C}, {0x7440, 0x7454}, {0x7800, 0x7818}, {0x8000, 0x8004}, {0x8010, 0x8064}, {0x8080, 0x8084}, {0x80A0, 0x80A4}, {0x80C0, 0x80C4}, {0x80E0, 0x80F4}, {0x8100, 0x8104}, {0x8110, 0x812C}, {0x9000, 0x9004}, {0x9800, 0x982C}, {0x9830, 0x9838}, {0x9840, 0x986C}, {0x9870, 0x9898}, {0x9A00, 0x9C00}, {0xD580, 0xD59C}, {0xF000, 0xF0E0}, {0xF140, 0xF190}, {0xF250, 0xF25C}, {0xF260, 0xF268}, {0xF26C, 0xF2A8}, {0x10008, 0x1000C}, {0x10014, 0x10018}, {0x1001C, 0x10020}, {0x10024, 0x10028}, {0x10030, 0x10034}, {0x10040, 0x10054}, {0x10058, 0x1007C}, {0x10080, 0x100C4}, {0x100C8, 0x10114}, {0x1012C, 0x10130}, {0x10138, 0x10144}, {0x10200, 0x10220}, {0x10230, 0x10250}, {0x10260, 0x10280}, {0x10290, 0x102B0}, {0x102C0, 0x102DC}, {0x102E0, 0x102F4}, {0x102FC, 0x1037C}, {0x10380, 0x10390}, {0x10800, 0x10828}, {0x10840, 0x10844}, {0x10880, 0x10884}, {0x108C0, 0x108E8}, {0x10900, 0x10928}, {0x10940, 0x10944}, {0x10980, 0x10984}, {0x109C0, 0x109E8}, {0x10A00, 0x10A28}, {0x10A40, 0x10A50}, {0x11000, 0x11028}, {0x11030, 0x11034}, {0x11038, 0x11068}, {0x11070, 0x11074}, {0x11078, 0x110A8}, {0x110B0, 0x110B4}, {0x110B8, 0x110E8}, {0x110F0, 0x110F4}, {0x110F8, 0x11128}, {0x11138, 0x11144}, {0x11178, 0x11180}, {0x111B8, 0x111C0}, {0x111F8, 0x11200}, {0x11238, 0x1123C}, {0x11270, 0x11274}, {0x11278, 0x1127C}, {0x112B0, 0x112B4}, {0x112B8, 0x112BC}, {0x112F0, 0x112F4}, {0x112F8, 0x112FC}, {0x11338, 0x1133C}, {0x11378, 0x1137C}, {0x113B8, 0x113BC}, {0x113F8, 0x113FC}, {0x11438, 0x11440}, {0x11478, 0x11480}, {0x114B8, 0x114BC}, {0x114F8, 0x114FC}, {0x11538, 0x1153C}, {0x11578, 0x1157C}, {0x115B8, 0x115BC}, {0x115F8, 0x115FC}, {0x11638, 0x1163C}, {0x11678, 0x1167C}, {0x116B8, 0x116BC}, {0x116F8, 0x116FC}, {0x11738, 0x1173C}, {0x11778, 0x1177C}, {0x117B8, 0x117BC}, {0x117F8, 0x117FC}, {0x17000, 0x1701C}, {0x17020, 0x170AC}, {0x18000, 0x18050}, {0x18054, 0x18074}, {0x18080, 0x180D4}, {0x180DC, 0x18104}, {0x18108, 0x1813C}, {0x18144, 0x18148}, {0x18168, 0x18174}, {0x18178, 0x18180}, {0x181C8, 0x181E0}, {0x181E4, 0x181E8}, {0x181EC, 0x1820C}, {0x1825C, 0x18280}, {0x18284, 0x18290}, {0x18294, 0x182A0}, {0x18300, 0x18304}, {0x18314, 0x18320}, {0x18328, 0x18350}, {0x1835C, 0x1836C}, {0x18370, 0x18390}, {0x18398, 0x183AC}, {0x183BC, 0x183D8}, {0x183DC, 0x183F4}, {0x18400, 0x186F4}, {0x186F8, 0x1871C}, {0x18720, 0x18790}, {0x19800, 0x19830}, {0x19834, 0x19840}, {0x19880, 0x1989C}, {0x198A4, 0x198B0}, {0x198BC, 0x19900}, {0x19C00, 0x19C88}, {0x19D00, 0x19D20}, {0x19E00, 0x19E7C}, {0x19E80, 0x19E94}, {0x19E98, 0x19EAC}, {0x19EB0, 0x19EBC}, {0x19F70, 0x19F74}, {0x19F80, 0x19F8C}, {0x19FA0, 0x19FB4}, {0x19FC0, 0x19FD8}, {0x1A000, 0x1A200}, {0x1A204, 0x1A210}, {0x1A228, 0x1A22C}, {0x1A230, 0x1A248}, {0x1A250, 0x1A270}, {0x1A280, 0x1A290}, {0x1A2A0, 0x1A2A4}, {0x1A2C0, 0x1A2EC}, {0x1A300, 0x1A3BC}, {0x1A3F0, 0x1A3F4}, {0x1A3F8, 0x1A434}, {0x1A438, 0x1A444}, {0x1A448, 0x1A468}, {0x1A580, 0x1A58C}, {0x1A644, 0x1A654}, {0x1A670, 0x1A698}, {0x1A6AC, 0x1A6B0}, {0x1A6D0, 0x1A6D4}, {0x1A6EC, 0x1A70C}, {0x1A710, 0x1A738}, {0x1A7C0, 0x1A7D0}, {0x1A7D4, 0x1A7D8}, {0x1A7DC, 0x1A7E4}, {0x1A7F0, 0x1A7F8}, {0x1A888, 0x1A89C}, {0x1A8A8, 0x1A8AC}, {0x1A8C0, 0x1A8DC}, {0x1A8F0, 0x1A8FC}, {0x1AE04, 0x1AE08}, {0x1AE18, 0x1AE24}, {0x1AF80, 0x1AF8C}, {0x1AFA0, 0x1AFB4}, {0x1B000, 0x1B200}, {0x1B284, 0x1B288}, {0x1B2D0, 0x1B2D8}, {0x1B2DC, 0x1B2EC}, {0x1B300, 0x1B340}, {0x1B374, 0x1B378}, {0x1B380, 0x1B384}, {0x1B388, 0x1B38C}, {0x1B404, 0x1B408}, {0x1B420, 0x1B428}, {0x1B440, 0x1B444}, {0x1B448, 0x1B44C}, {0x1B450, 0x1B458}, {0x1B45C, 0x1B468}, {0x1B584, 0x1B58C}, {0x1B68C, 0x1B690}, {0x1B6AC, 0x1B6B0}, {0x1B7F0, 0x1B7F8}, {0x1C800, 0x1CC00}, {0x1CE00, 0x1CE04}, {0x1CF80, 0x1CF84}, {0x1D200, 0x1D800}, {0x1E000, 0x20014}, {0x20100, 0x20124}, {0x21400, 0x217A8}, {0x21800, 0x21BA8}, {0x21C00, 0x21FA8}, {0x22000, 0x223A8}, {0x22400, 0x227A8}, {0x22800, 0x22BA8}, {0x22C00, 0x22FA8}, {0x23000, 0x233A8}, {0x24000, 0x24034}, {0x26000, 0x26064}, {0x27000, 0x27024}, {0x34000, 0x3400C}, {0x34400, 0x3445C}, {0x34800, 0x3485C}, {0x34C00, 0x34C5C}, {0x35000, 0x3505C}, {0x35400, 0x3545C}, {0x35800, 0x3585C}, {0x35C00, 0x35C5C}, {0x36000, 0x3605C}, {0x38000, 0x38064}, {0x38070, 0x380E0}, {0x3A000, 0x3A074}, {0x40000, 0x400A4}, {0x80000, 0x8000C}, {0x80010, 0x80020}, }; static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x70000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, /* RTC_SOC_BASE_ADDRESS */ .start = 0x0, /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */ .len = 0x800 - 0x0, .name = "REG_PART1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, /* STEREO_BASE_ADDRESS */ .start = 0x27000, /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */ .len = 0x60000 - 0x27000, .name = "REG_PART2", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x70000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_AXI, .start = 0xa0000, .len = 0x18000, .name = "AXI", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x800, .len = 0x80020 - 0x800, .name = "REG_TOTAL", .section_table = { .sections = qca6174_hw21_register_sections, .size = ARRAY_SIZE(qca6174_hw21_register_sections), }, }, }; static const struct ath10k_mem_region qca6174_hw30_sdio_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0xa8000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_AXI, .start = 0xa0000, .len = 0x18000, .name = "AXI", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IRAM1, .start = 0x00980000, .len = 0x00080000, .name = "IRAM1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IRAM2, .start = 0x00a00000, .len = 0x00040000, .name = "IRAM2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x800, .len = 0x80020 - 0x800, .name = "REG_TOTAL", .section_table = { .sections = qca6174_hw30_sdio_register_sections, .size = ARRAY_SIZE(qca6174_hw30_sdio_register_sections), }, }, }; static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0xa8000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_AXI, .start = 0xa0000, .len = 0x18000, .name = "AXI", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x800, .len = 0x80020 - 0x800, .name = "REG_TOTAL", .section_table = { .sections = qca6174_hw30_register_sections, .size = ARRAY_SIZE(qca6174_hw30_register_sections), }, }, /* IRAM dump must be put last */ { .type = ATH10K_MEM_REGION_TYPE_IRAM1, .start = 0x00980000, .len = 0x00080000, .name = "IRAM1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IRAM2, .start = 0x00a00000, .len = 0x00040000, .name = "IRAM2", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x50000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x4000, .len = 0x2000, .name = "REG_PART1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x8000, .len = 0x58000, .name = "REG_PART2", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x60000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x980000, .len = 0x50000, .name = "IRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOSRAM, .start = 0xC0000, .len = 0x40000, .name = "SRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x30000, .len = 0x7000, .name = "APB REG 1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x3f000, .len = 0x3000, .name = "APB REG 2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x43000, .len = 0x3000, .name = "WIFI REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x4A000, .len = 0x5000, .name = "CE REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x80000, .len = 0x6000, .name = "SOC REG", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x80000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x980000, .len = 0x50000, .name = "IRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOSRAM, .start = 0xC0000, .len = 0x40000, .name = "SRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x30000, .len = 0x7000, .name = "APB REG 1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x3f000, .len = 0x3000, .name = "APB REG 2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x43000, .len = 0x3000, .name = "WIFI REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x4A000, .len = 0x5000, .name = "CE REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x80000, .len = 0x6000, .name = "SOC REG", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_section ipq4019_soc_reg_range[] = { {0x080000, 0x080004}, {0x080020, 0x080024}, {0x080028, 0x080050}, {0x0800d4, 0x0800ec}, {0x08010c, 0x080118}, {0x080284, 0x080290}, {0x0802a8, 0x0802b8}, {0x0802dc, 0x08030c}, {0x082000, 0x083fff} }; static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x68000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0xC0000, .len = 0x40000, .name = "SRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x980000, .len = 0x50000, .name = "IRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x30000, .len = 0x7000, .name = "APB REG 1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x3f000, .len = 0x3000, .name = "APB REG 2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x43000, .len = 0x3000, .name = "WIFI REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x4A000, .len = 0x5000, .name = "CE REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x080000, .len = 0x083fff - 0x080000, .name = "REG_TOTAL", .section_table = { .sections = ipq4019_soc_reg_range, .size = ARRAY_SIZE(ipq4019_soc_reg_range), }, }, }; static const struct ath10k_mem_region wcn399x_hw10_mem_regions[] = { { /* MSA region start is not fixed, hence it is assigned at runtime */ .type = ATH10K_MEM_REGION_TYPE_MSA, .len = 0x100000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { { .hw_id = QCA6174_HW_1_0_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), }, }, { .hw_id = QCA6174_HW_1_1_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), }, }, { .hw_id = QCA6174_HW_1_3_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), }, }, { .hw_id = QCA6174_HW_2_1_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw21_mem_regions, .size = ARRAY_SIZE(qca6174_hw21_mem_regions), }, }, { .hw_id = QCA6174_HW_3_0_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), }, }, { .hw_id = QCA6174_HW_3_2_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), }, }, { .hw_id = QCA6174_HW_3_2_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_SDIO, .region_table = { .regions = qca6174_hw30_sdio_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_sdio_mem_regions), }, }, { .hw_id = QCA9377_HW_1_1_DEV_VERSION, .hw_rev = ATH10K_HW_QCA9377, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), }, }, { .hw_id = QCA988X_HW_2_0_VERSION, .hw_rev = ATH10K_HW_QCA988X, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca988x_hw20_mem_regions, .size = ARRAY_SIZE(qca988x_hw20_mem_regions), }, }, { .hw_id = QCA9984_HW_1_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA9984, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca9984_hw10_mem_regions, .size = ARRAY_SIZE(qca9984_hw10_mem_regions), }, }, { .hw_id = QCA9888_HW_2_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA9888, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca9984_hw10_mem_regions, .size = ARRAY_SIZE(qca9984_hw10_mem_regions), }, }, { .hw_id = QCA99X0_HW_2_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA99X0, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca99x0_hw20_mem_regions, .size = ARRAY_SIZE(qca99x0_hw20_mem_regions), }, }, { .hw_id = QCA4019_HW_1_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA4019, .bus = ATH10K_BUS_AHB, .region_table = { .regions = qca4019_hw10_mem_regions, .size = ARRAY_SIZE(qca4019_hw10_mem_regions), }, }, { .hw_id = WCN3990_HW_1_0_DEV_VERSION, .hw_rev = ATH10K_HW_WCN3990, .bus = ATH10K_BUS_SNOC, .region_table = { .regions = wcn399x_hw10_mem_regions, .size = ARRAY_SIZE(wcn399x_hw10_mem_regions), }, }, }; static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) { const struct ath10k_hw_mem_layout *hw; const struct ath10k_mem_region *mem_region; size_t size = 0; int i; hw = ath10k_coredump_get_mem_layout(ar); if (!hw) return 0; mem_region = &hw->region_table.regions[0]; for (i = 0; i < hw->region_table.size; i++) { size += mem_region->len; mem_region++; } /* reserve space for the headers */ size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr); /* make sure it is aligned 16 bytes for debug message print out */ size = ALIGN(size, 16); return size; } const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar) { if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) return NULL; return _ath10k_coredump_get_mem_layout(ar); } EXPORT_SYMBOL(ath10k_coredump_get_mem_layout); const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar) { int i; if (WARN_ON(ar->target_version == 0)) return NULL; for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) { if (ar->target_version == hw_mem_layouts[i].hw_id && ar->hw_rev == hw_mem_layouts[i].hw_rev && hw_mem_layouts[i].bus == ar->hif.bus) return &hw_mem_layouts[i]; } return NULL; } struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; lockdep_assert_held(&ar->dump_mutex); if (ath10k_coredump_mask == 0) /* coredump disabled */ return NULL; guid_gen(&crash_data->guid); ktime_get_real_ts64(&crash_data->timestamp); return crash_data; } EXPORT_SYMBOL(ath10k_coredump_new); static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; struct ath10k_ce_crash_hdr *ce_hdr; struct ath10k_dump_file_data *dump_data; struct ath10k_tlv_dump_data *dump_tlv; size_t hdr_len = sizeof(*dump_data); size_t len, sofar = 0; unsigned char *buf; len = hdr_len; if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) len += sizeof(*dump_tlv) + sizeof(crash_data->registers); if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) len += sizeof(*dump_tlv) + sizeof(*ce_hdr) + CE_COUNT * sizeof(ce_hdr->entries[0]); if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; sofar += hdr_len; /* This is going to get big when we start dumping FW RAM and such, * so go ahead and use vmalloc. */ buf = vzalloc(len); if (!buf) return NULL; mutex_lock(&ar->dump_mutex); dump_data = (struct ath10k_dump_file_data *)(buf); strscpy(dump_data->df_magic, "ATH10K-FW-DUMP", sizeof(dump_data->df_magic)); dump_data->len = cpu_to_le32(len); dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); guid_copy(&dump_data->guid, &crash_data->guid); dump_data->chip_id = cpu_to_le32(ar->bus_param.chip_id); dump_data->bus_type = cpu_to_le32(0); dump_data->target_version = cpu_to_le32(ar->target_version); dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); dump_data->phy_capability = cpu_to_le32(ar->phy_capability); dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); strscpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, sizeof(dump_data->fw_ver)); dump_data->kernel_ver_code = 0; strscpy(dump_data->kernel_ver, init_utsname()->release, sizeof(dump_data->kernel_ver)); dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec); if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) { dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS); dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers)); memcpy(dump_tlv->tlv_data, &crash_data->registers, sizeof(crash_data->registers)); sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); } if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) { dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA); dump_tlv->tlv_len = cpu_to_le32(struct_size(ce_hdr, entries, CE_COUNT)); ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data); ce_hdr->ce_count = cpu_to_le32(CE_COUNT); memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved)); memcpy(ce_hdr->entries, crash_data->ce_crash_data, CE_COUNT * sizeof(ce_hdr->entries[0])); sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) + CE_COUNT * sizeof(ce_hdr->entries[0]); } /* Gather ram dump */ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA); dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len); if (crash_data->ramdump_buf_len) { memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, crash_data->ramdump_buf_len); sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; } } mutex_unlock(&ar->dump_mutex); return dump_data; } int ath10k_coredump_submit(struct ath10k *ar) { struct ath10k_dump_file_data *dump; if (ath10k_coredump_mask == 0) /* coredump disabled */ return 0; dump = ath10k_coredump_build(ar); if (!dump) { ath10k_warn(ar, "no crash dump data found for devcoredump"); return -ENODATA; } dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL); return 0; } int ath10k_coredump_create(struct ath10k *ar) { if (ath10k_coredump_mask == 0) /* coredump disabled */ return 0; ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data)); if (!ar->coredump.fw_crash_data) return -ENOMEM; return 0; } int ath10k_coredump_register(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar); if (!crash_data->ramdump_buf_len) return 0; crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len); if (!crash_data->ramdump_buf) return -ENOMEM; } return 0; } void ath10k_coredump_unregister(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; vfree(crash_data->ramdump_buf); } void ath10k_coredump_destroy(struct ath10k *ar) { if (ar->coredump.fw_crash_data->ramdump_buf) { vfree(ar->coredump.fw_crash_data->ramdump_buf); ar->coredump.fw_crash_data->ramdump_buf = NULL; ar->coredump.fw_crash_data->ramdump_buf_len = 0; } vfree(ar->coredump.fw_crash_data); ar->coredump.fw_crash_data = NULL; } |
2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | /* * net/tipc/ib_media.c: Infiniband bearer support for TIPC * * Copyright (c) 2013 Patrick McHardy <kaber@trash.net> * * Based on eth_media.c, which carries the following copyright notice: * * Copyright (c) 2001-2007, Ericsson AB * Copyright (c) 2005-2008, 2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/if_infiniband.h> #include "core.h" #include "bearer.h" #define TIPC_MAX_IB_LINK_WIN 500 /* convert InfiniBand address (media address format) media address to string */ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) { if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */ return 1; sprintf(str_buf, "%20phC", a->value); return 0; } /* Convert from media address format to discovery message addr format */ static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) { memset(msg, 0, TIPC_MEDIA_INFO_SIZE); memcpy(msg, addr->value, INFINIBAND_ALEN); return 0; } /* Convert raw InfiniBand address format to media addr format */ static int tipc_ib_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, const char *msg) { memset(addr, 0, sizeof(*addr)); memcpy(addr->value, msg, INFINIBAND_ALEN); addr->media_id = TIPC_MEDIA_TYPE_IB; addr->broadcast = !memcmp(msg, b->bcast_addr.value, INFINIBAND_ALEN); return 0; } /* Convert discovery msg addr format to InfiniBand media addr format */ static int tipc_ib_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) { return tipc_ib_raw2addr(b, addr, msg); } /* InfiniBand media registration info */ struct tipc_media ib_media_info = { .send_msg = tipc_l2_send_msg, .enable_media = tipc_enable_l2_media, .disable_media = tipc_disable_l2_media, .addr2str = tipc_ib_addr2str, .addr2msg = tipc_ib_addr2msg, .msg2addr = tipc_ib_msg2addr, .raw2addr = tipc_ib_raw2addr, .priority = TIPC_DEF_LINK_PRI, .tolerance = TIPC_DEF_LINK_TOL, .min_win = TIPC_DEF_LINK_WIN, .max_win = TIPC_MAX_IB_LINK_WIN, .type_id = TIPC_MEDIA_TYPE_IB, .hwaddr_len = INFINIBAND_ALEN, .name = "ib" }; |
17 17 17 14 14 14 14 14 14 14 14 14 14 3 2 2 6 8 8 14 14 14 7 7 14 14 14 8 8 6 14 2 2 2 2 2 6 6 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 2001-2006 Ian Kent <raven@themaw.net> */ #include <linux/sched/signal.h> #include "autofs_i.h" /* We make this a static variable rather than a part of the superblock; it * is better if we don't reassign numbers easily even across filesystems */ static autofs_wqt_t autofs_next_wait_queue = 1; void autofs_catatonic_mode(struct autofs_sb_info *sbi) { struct autofs_wait_queue *wq, *nwq; mutex_lock(&sbi->wq_mutex); if (sbi->flags & AUTOFS_SBI_CATATONIC) { mutex_unlock(&sbi->wq_mutex); return; } pr_debug("entering catatonic mode\n"); sbi->flags |= AUTOFS_SBI_CATATONIC; wq = sbi->queues; sbi->queues = NULL; /* Erase all wait queues */ while (wq) { nwq = wq->next; wq->status = -ENOENT; /* Magic is gone - report failure */ kfree(wq->name.name - wq->offset); wq->name.name = NULL; wake_up(&wq->queue); if (!--wq->wait_ctr) kfree(wq); wq = nwq; } fput(sbi->pipe); /* Close the pipe */ sbi->pipe = NULL; sbi->pipefd = -1; mutex_unlock(&sbi->wq_mutex); } static int autofs_write(struct autofs_sb_info *sbi, struct file *file, const void *addr, int bytes) { unsigned long sigpipe, flags; const char *data = (const char *)addr; ssize_t wr = 0; sigpipe = sigismember(¤t->pending.signal, SIGPIPE); mutex_lock(&sbi->pipe_mutex); while (bytes) { wr = __kernel_write(file, data, bytes, NULL); if (wr <= 0) break; data += wr; bytes -= wr; } mutex_unlock(&sbi->pipe_mutex); /* Keep the currently executing process from receiving a * SIGPIPE unless it was already supposed to get one */ if (wr == -EPIPE && !sigpipe) { spin_lock_irqsave(¤t->sighand->siglock, flags); sigdelset(¤t->pending.signal, SIGPIPE); recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); } /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; } static void autofs_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq, int type) { union { struct autofs_packet_hdr hdr; union autofs_packet_union v4_pkt; union autofs_v5_packet_union v5_pkt; } pkt; struct file *pipe = NULL; size_t pktsz; int ret; pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ pkt.hdr.proto_version = sbi->version; pkt.hdr.type = type; switch (type) { /* Kernel protocol v4 missing and expire packets */ case autofs_ptype_missing: { struct autofs_packet_missing *mp = &pkt.v4_pkt.missing; pktsz = sizeof(*mp); mp->wait_queue_token = wq->wait_queue_token; mp->len = wq->name.len; memcpy(mp->name, wq->name.name, wq->name.len); mp->name[wq->name.len] = '\0'; break; } case autofs_ptype_expire_multi: { struct autofs_packet_expire_multi *ep = &pkt.v4_pkt.expire_multi; pktsz = sizeof(*ep); ep->wait_queue_token = wq->wait_queue_token; ep->len = wq->name.len; memcpy(ep->name, wq->name.name, wq->name.len); ep->name[wq->name.len] = '\0'; break; } /* * Kernel protocol v5 packet for handling indirect and direct * mount missing and expire requests */ case autofs_ptype_missing_indirect: case autofs_ptype_expire_indirect: case autofs_ptype_missing_direct: case autofs_ptype_expire_direct: { struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns; pktsz = sizeof(*packet); packet->wait_queue_token = wq->wait_queue_token; packet->len = wq->name.len; memcpy(packet->name, wq->name.name, wq->name.len); packet->name[wq->name.len] = '\0'; packet->dev = wq->dev; packet->ino = wq->ino; packet->uid = from_kuid_munged(user_ns, wq->uid); packet->gid = from_kgid_munged(user_ns, wq->gid); packet->pid = wq->pid; packet->tgid = wq->tgid; break; } default: pr_warn("bad type %d!\n", type); mutex_unlock(&sbi->wq_mutex); return; } pipe = get_file(sbi->pipe); mutex_unlock(&sbi->wq_mutex); switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) { case 0: break; case -ENOMEM: case -ERESTARTSYS: /* Just fail this one */ autofs_wait_release(sbi, wq->wait_queue_token, ret); break; default: autofs_catatonic_mode(sbi); break; } fput(pipe); } static struct autofs_wait_queue * autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr) { struct autofs_wait_queue *wq; for (wq = sbi->queues; wq; wq = wq->next) { if (wq->name.hash == qstr->hash && wq->name.len == qstr->len && wq->name.name && !memcmp(wq->name.name, qstr->name, qstr->len)) break; } return wq; } /* * Check if we have a valid request. * Returns * 1 if the request should continue. * In this case we can return an autofs_wait_queue entry if one is * found or NULL to idicate a new wait needs to be created. * 0 or a negative errno if the request shouldn't continue. */ static int validate_request(struct autofs_wait_queue **wait, struct autofs_sb_info *sbi, const struct qstr *qstr, const struct path *path, enum autofs_notify notify) { struct dentry *dentry = path->dentry; struct autofs_wait_queue *wq; struct autofs_info *ino; if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* Wait in progress, continue; */ wq = autofs_find_wait(sbi, qstr); if (wq) { *wait = wq; return 1; } *wait = NULL; /* If we don't yet have any info this is a new request */ ino = autofs_dentry_ino(dentry); if (!ino) return 1; /* * If we've been asked to wait on an existing expire (NFY_NONE) * but there is no wait in the queue ... */ if (notify == NFY_NONE) { /* * Either we've betean the pending expire to post it's * wait or it finished while we waited on the mutex. * So we need to wait till either, the wait appears * or the expire finishes. */ while (ino->flags & AUTOFS_INF_EXPIRING) { mutex_unlock(&sbi->wq_mutex); schedule_timeout_interruptible(HZ/10); if (mutex_lock_interruptible(&sbi->wq_mutex)) return -EINTR; if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; wq = autofs_find_wait(sbi, qstr); if (wq) { *wait = wq; return 1; } } /* * Not ideal but the status has already gone. Of the two * cases where we wait on NFY_NONE neither depend on the * return status of the wait. */ return 0; } /* * If we've been asked to trigger a mount and the request * completed while we waited on the mutex ... */ if (notify == NFY_MOUNT) { struct dentry *new = NULL; struct path this; int valid = 1; /* * If the dentry was successfully mounted while we slept * on the wait queue mutex we can return success. If it * isn't mounted (doesn't have submounts for the case of * a multi-mount with no mount at it's base) we can * continue on and create a new request. */ if (!IS_ROOT(dentry)) { if (d_unhashed(dentry) && d_really_is_positive(dentry)) { struct dentry *parent = dentry->d_parent; new = d_lookup(parent, &dentry->d_name); if (new) dentry = new; } } this.mnt = path->mnt; this.dentry = dentry; if (path_has_submounts(&this)) valid = 0; if (new) dput(new); return valid; } return 1; } int autofs_wait(struct autofs_sb_info *sbi, const struct path *path, enum autofs_notify notify) { struct dentry *dentry = path->dentry; struct autofs_wait_queue *wq; struct qstr qstr; char *name; int status, ret, type; unsigned int offset = 0; pid_t pid; pid_t tgid; /* In catatonic mode, we don't wait for nobody */ if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* * Try translating pids to the namespace of the daemon. * * Zero means failure: we are in an unrelated pid namespace. */ pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); if (pid == 0 || tgid == 0) return -ENOENT; if (d_really_is_negative(dentry)) { /* * A wait for a negative dentry is invalid for certain * cases. A direct or offset mount "always" has its mount * point directory created and so the request dentry must * be positive or the map key doesn't exist. The situation * is very similar for indirect mounts except only dentrys * in the root of the autofs file system may be negative. */ if (autofs_type_trigger(sbi->type)) return -ENOENT; else if (!IS_ROOT(dentry->d_parent)) return -ENOENT; } name = kmalloc(NAME_MAX + 1, GFP_KERNEL); if (!name) return -ENOMEM; /* If this is a direct mount request create a dummy name */ if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) { qstr.name = name; qstr.len = sprintf(name, "%p", dentry); } else { char *p = dentry_path_raw(dentry, name, NAME_MAX); if (IS_ERR(p)) { kfree(name); return -ENOENT; } qstr.name = ++p; // skip the leading slash qstr.len = strlen(p); offset = p - name; } qstr.hash = full_name_hash(dentry, qstr.name, qstr.len); if (mutex_lock_interruptible(&sbi->wq_mutex)) { kfree(name); return -EINTR; } ret = validate_request(&wq, sbi, &qstr, path, notify); if (ret <= 0) { if (ret != -EINTR) mutex_unlock(&sbi->wq_mutex); kfree(name); return ret; } if (!wq) { /* Create a new wait queue */ wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL); if (!wq) { kfree(name); mutex_unlock(&sbi->wq_mutex); return -ENOMEM; } wq->wait_queue_token = autofs_next_wait_queue; if (++autofs_next_wait_queue == 0) autofs_next_wait_queue = 1; wq->next = sbi->queues; sbi->queues = wq; init_waitqueue_head(&wq->queue); memcpy(&wq->name, &qstr, sizeof(struct qstr)); wq->offset = offset; wq->dev = autofs_get_dev(sbi); wq->ino = autofs_get_ino(sbi); wq->uid = current_uid(); wq->gid = current_gid(); wq->pid = pid; wq->tgid = tgid; wq->status = -EINTR; /* Status return if interrupted */ wq->wait_ctr = 2; if (sbi->version < 5) { if (notify == NFY_MOUNT) type = autofs_ptype_missing; else type = autofs_ptype_expire_multi; } else { if (notify == NFY_MOUNT) type = autofs_type_trigger(sbi->type) ? autofs_ptype_missing_direct : autofs_ptype_missing_indirect; else type = autofs_type_trigger(sbi->type) ? autofs_ptype_expire_direct : autofs_ptype_expire_indirect; } pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, notify); /* * autofs_notify_daemon() may block; it will unlock ->wq_mutex */ autofs_notify_daemon(sbi, wq, type); } else { wq->wait_ctr++; pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, notify); mutex_unlock(&sbi->wq_mutex); kfree(name); } /* * wq->name.name is NULL iff the lock is already released * or the mount has been made catatonic. */ wait_event_killable(wq->queue, wq->name.name == NULL); status = wq->status; /* * For direct and offset mounts we need to track the requester's * uid and gid in the dentry info struct. This is so it can be * supplied, on request, by the misc device ioctl interface. * This is needed during daemon resatart when reconnecting * to existing, active, autofs mounts. The uid and gid (and * related string values) may be used for macro substitution * in autofs mount maps. */ if (!status) { struct autofs_info *ino; struct dentry *de = NULL; /* direct mount or browsable map */ ino = autofs_dentry_ino(dentry); if (!ino) { /* If not lookup actual dentry used */ de = d_lookup(dentry->d_parent, &dentry->d_name); if (de) ino = autofs_dentry_ino(de); } /* Set mount requester */ if (ino) { spin_lock(&sbi->fs_lock); ino->uid = wq->uid; ino->gid = wq->gid; spin_unlock(&sbi->fs_lock); } if (de) dput(de); } /* Are we the last process to need status? */ mutex_lock(&sbi->wq_mutex); if (!--wq->wait_ctr) kfree(wq); mutex_unlock(&sbi->wq_mutex); return status; } int autofs_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status) { struct autofs_wait_queue *wq, **wql; mutex_lock(&sbi->wq_mutex); for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { if (wq->wait_queue_token == wait_queue_token) break; } if (!wq) { mutex_unlock(&sbi->wq_mutex); return -EINVAL; } *wql = wq->next; /* Unlink from chain */ kfree(wq->name.name - wq->offset); wq->name.name = NULL; /* Do not wait on this queue */ wq->status = status; wake_up(&wq->queue); if (!--wq->wait_ctr) kfree(wq); mutex_unlock(&sbi->wq_mutex); return 0; } |
2 1 93 2 93 93 11 11 93 3 3 3 11 2 2 2 2 2 109 110 110 2 2 2 2 2 2 2 2 2 2 2 2 2 88 88 11 11 88 167 167 93 93 93 87 1 1 1 92 1 93 92 93 93 93 93 93 93 92 93 93 92 87 88 88 11 88 88 87 95 8 88 88 88 88 5 5 5 107 107 103 4 2 2 107 104 102 3 4 66 66 36 93 18 18 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core * * Copyright (C) 1995-99 Simon G. Vogl * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> * Mux support by Rodolfo Giometti <giometti@enneenne.com> and * Michael Lawnick <michael.lawnick.ext@nsn.com> * * Copyright (C) 2013-2017 Wolfram Sang <wsa@kernel.org> */ #define pr_fmt(fmt) "i2c-core: " fmt #include <dt-bindings/i2c/i2c.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/completion.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/i2c-smbus.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqflags.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/pinctrl/consumer.h> #include <linux/pinctrl/devinfo.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> #include <linux/property.h> #include <linux/rwsem.h> #include <linux/slab.h> #include "i2c-core.h" #define CREATE_TRACE_POINTS #include <trace/events/i2c.h> #define I2C_ADDR_OFFSET_TEN_BIT 0xa000 #define I2C_ADDR_OFFSET_SLAVE 0x1000 #define I2C_ADDR_7BITS_MAX 0x77 #define I2C_ADDR_7BITS_COUNT (I2C_ADDR_7BITS_MAX + 1) #define I2C_ADDR_DEVICE_ID 0x7c /* * core_lock protects i2c_adapter_idr, and guarantees that device detection, * deletion of detected devices are serialized */ static DEFINE_MUTEX(core_lock); static DEFINE_IDR(i2c_adapter_idr); static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver); static DEFINE_STATIC_KEY_FALSE(i2c_trace_msg_key); static bool is_registered; static struct dentry *i2c_debugfs_root; int i2c_transfer_trace_reg(void) { static_branch_inc(&i2c_trace_msg_key); return 0; } void i2c_transfer_trace_unreg(void) { static_branch_dec(&i2c_trace_msg_key); } const char *i2c_freq_mode_string(u32 bus_freq_hz) { switch (bus_freq_hz) { case I2C_MAX_STANDARD_MODE_FREQ: return "Standard Mode (100 kHz)"; case I2C_MAX_FAST_MODE_FREQ: return "Fast Mode (400 kHz)"; case I2C_MAX_FAST_MODE_PLUS_FREQ: return "Fast Mode Plus (1.0 MHz)"; case I2C_MAX_TURBO_MODE_FREQ: return "Turbo Mode (1.4 MHz)"; case I2C_MAX_HIGH_SPEED_MODE_FREQ: return "High Speed Mode (3.4 MHz)"; case I2C_MAX_ULTRA_FAST_MODE_FREQ: return "Ultra Fast Mode (5.0 MHz)"; default: return "Unknown Mode"; } } EXPORT_SYMBOL_GPL(i2c_freq_mode_string); const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client) { if (!(id && client)) return NULL; while (id->name[0]) { if (strcmp(client->name, id->name) == 0) return id; id++; } return NULL; } EXPORT_SYMBOL_GPL(i2c_match_id); const void *i2c_get_match_data(const struct i2c_client *client) { struct i2c_driver *driver = to_i2c_driver(client->dev.driver); const struct i2c_device_id *match; const void *data; data = device_get_match_data(&client->dev); if (!data) { match = i2c_match_id(driver->id_table, client); if (!match) return NULL; data = (const void *)match->driver_data; } return data; } EXPORT_SYMBOL(i2c_get_match_data); static int i2c_device_match(struct device *dev, const struct device_driver *drv) { struct i2c_client *client = i2c_verify_client(dev); const struct i2c_driver *driver; /* Attempt an OF style match */ if (i2c_of_match_device(drv->of_match_table, client)) return 1; /* Then ACPI style match */ if (acpi_driver_match_device(dev, drv)) return 1; driver = to_i2c_driver(drv); /* Finally an I2C match */ if (i2c_match_id(driver->id_table, client)) return 1; return 0; } static int i2c_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct i2c_client *client = to_i2c_client(dev); int rc; rc = of_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name); } /* i2c bus recovery routines */ static int get_scl_gpio_value(struct i2c_adapter *adap) { return gpiod_get_value_cansleep(adap->bus_recovery_info->scl_gpiod); } static void set_scl_gpio_value(struct i2c_adapter *adap, int val) { gpiod_set_value_cansleep(adap->bus_recovery_info->scl_gpiod, val); } static int get_sda_gpio_value(struct i2c_adapter *adap) { return gpiod_get_value_cansleep(adap->bus_recovery_info->sda_gpiod); } static void set_sda_gpio_value(struct i2c_adapter *adap, int val) { gpiod_set_value_cansleep(adap->bus_recovery_info->sda_gpiod, val); } static int i2c_generic_bus_free(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; int ret = -EOPNOTSUPP; if (bri->get_bus_free) ret = bri->get_bus_free(adap); else if (bri->get_sda) ret = bri->get_sda(adap); if (ret < 0) return ret; return ret ? 0 : -EBUSY; } /* * We are generating clock pulses. ndelay() determines durating of clk pulses. * We will generate clock with rate 100 KHz and so duration of both clock levels * is: delay in ns = (10^6 / 100) / 2 */ #define RECOVERY_NDELAY 5000 #define RECOVERY_CLK_CNT 9 int i2c_generic_scl_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; int i = 0, scl = 1, ret = 0; if (bri->prepare_recovery) bri->prepare_recovery(adap); if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_gpio); /* * If we can set SDA, we will always create a STOP to ensure additional * pulses will do no harm. This is achieved by letting SDA follow SCL * half a cycle later. Check the 'incomplete_write_byte' fault injector * for details. Note that we must honour tsu:sto, 4us, but lets use 5us * here for simplicity. */ bri->set_scl(adap, scl); ndelay(RECOVERY_NDELAY); if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); /* * By this time SCL is high, as we need to give 9 falling-rising edges */ while (i++ < RECOVERY_CLK_CNT * 2) { if (scl) { /* SCL shouldn't be low here */ if (!bri->get_scl(adap)) { dev_err(&adap->dev, "SCL is stuck low, exit recovery\n"); ret = -EBUSY; break; } } scl = !scl; bri->set_scl(adap, scl); /* Creating STOP again, see above */ if (scl) { /* Honour minimum tsu:sto */ ndelay(RECOVERY_NDELAY); } else { /* Honour minimum tf and thd:dat */ ndelay(RECOVERY_NDELAY / 2); } if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); if (scl) { ret = i2c_generic_bus_free(adap); if (ret == 0) break; } } /* If we can't check bus status, assume recovery worked */ if (ret == -EOPNOTSUPP) ret = 0; if (bri->unprepare_recovery) bri->unprepare_recovery(adap); if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_default); return ret; } EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); int i2c_recover_bus(struct i2c_adapter *adap) { if (!adap->bus_recovery_info) return -EBUSY; dev_dbg(&adap->dev, "Trying i2c bus recovery\n"); return adap->bus_recovery_info->recover_bus(adap); } EXPORT_SYMBOL_GPL(i2c_recover_bus); static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; struct device *dev = &adap->dev; struct pinctrl *p = bri->pinctrl ?: dev_pinctrl(dev->parent); bri->pinctrl = p; /* * we can't change states without pinctrl, so remove the states if * populated */ if (!p) { bri->pins_default = NULL; bri->pins_gpio = NULL; return; } if (!bri->pins_default) { bri->pins_default = pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT); if (IS_ERR(bri->pins_default)) { dev_dbg(dev, PINCTRL_STATE_DEFAULT " state not found for GPIO recovery\n"); bri->pins_default = NULL; } } if (!bri->pins_gpio) { bri->pins_gpio = pinctrl_lookup_state(p, "gpio"); if (IS_ERR(bri->pins_gpio)) bri->pins_gpio = pinctrl_lookup_state(p, "recovery"); if (IS_ERR(bri->pins_gpio)) { dev_dbg(dev, "no gpio or recovery state found for GPIO recovery\n"); bri->pins_gpio = NULL; } } /* for pinctrl state changes, we need all the information */ if (bri->pins_default && bri->pins_gpio) { dev_info(dev, "using pinctrl states for GPIO recovery"); } else { bri->pinctrl = NULL; bri->pins_default = NULL; bri->pins_gpio = NULL; } } static int i2c_gpio_init_generic_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; struct device *dev = &adap->dev; struct gpio_desc *gpiod; int ret = 0; /* * don't touch the recovery information if the driver is not using * generic SCL recovery */ if (bri->recover_bus && bri->recover_bus != i2c_generic_scl_recovery) return 0; /* * pins might be taken as GPIO, so we should inform pinctrl about * this and move the state to GPIO */ if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_gpio); /* * if there is incomplete or no recovery information, see if generic * GPIO recovery is available */ if (!bri->scl_gpiod) { gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); if (PTR_ERR(gpiod) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto cleanup_pinctrl_state; } if (!IS_ERR(gpiod)) { bri->scl_gpiod = gpiod; bri->recover_bus = i2c_generic_scl_recovery; dev_info(dev, "using generic GPIOs for recovery\n"); } } /* SDA GPIOD line is optional, so we care about DEFER only */ if (!bri->sda_gpiod) { /* * We have SCL. Pull SCL low and wait a bit so that SDA glitches * have no effect. */ gpiod_direction_output(bri->scl_gpiod, 0); udelay(10); gpiod = devm_gpiod_get(dev, "sda", GPIOD_IN); /* Wait a bit in case of a SDA glitch, and then release SCL. */ udelay(10); gpiod_direction_output(bri->scl_gpiod, 1); if (PTR_ERR(gpiod) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto cleanup_pinctrl_state; } if (!IS_ERR(gpiod)) bri->sda_gpiod = gpiod; } cleanup_pinctrl_state: /* change the state of the pins back to their default state */ if (bri->pinctrl) pinctrl_select_state(bri->pinctrl, bri->pins_default); return ret; } static int i2c_gpio_init_recovery(struct i2c_adapter *adap) { i2c_gpio_init_pinctrl_recovery(adap); return i2c_gpio_init_generic_recovery(adap); } static int i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; bool is_error_level = true; char *err_str; if (!bri) return 0; if (i2c_gpio_init_recovery(adap) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!bri->recover_bus) { err_str = "no suitable method provided"; is_error_level = false; goto err; } if (bri->scl_gpiod && bri->recover_bus == i2c_generic_scl_recovery) { bri->get_scl = get_scl_gpio_value; bri->set_scl = set_scl_gpio_value; if (bri->sda_gpiod) { bri->get_sda = get_sda_gpio_value; /* FIXME: add proper flag instead of '0' once available */ if (gpiod_get_direction(bri->sda_gpiod) == 0) bri->set_sda = set_sda_gpio_value; } } else if (bri->recover_bus == i2c_generic_scl_recovery) { /* Generic SCL recovery */ if (!bri->set_scl || !bri->get_scl) { err_str = "no {get|set}_scl() found"; goto err; } if (!bri->set_sda && !bri->get_sda) { err_str = "either get_sda() or set_sda() needed"; goto err; } } return 0; err: if (is_error_level) dev_err(&adap->dev, "Not using recovery: %s\n", err_str); else dev_dbg(&adap->dev, "Not using recovery: %s\n", err_str); adap->bus_recovery_info = NULL; return -EINVAL; } static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) { struct i2c_adapter *adap = client->adapter; unsigned int irq; if (!adap->host_notify_domain) return -ENXIO; if (client->flags & I2C_CLIENT_TEN) return -EINVAL; irq = irq_create_mapping(adap->host_notify_domain, client->addr); return irq > 0 ? irq : -ENXIO; } static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; bool do_power_on; int status; if (!client) return 0; client->irq = client->init_irq; if (!client->irq) { int irq = -ENOENT; if (client->flags & I2C_CLIENT_HOST_NOTIFY) { dev_dbg(dev, "Using Host Notify IRQ\n"); /* Keep adapter active when Host Notify is required */ pm_runtime_get_sync(&client->adapter->dev); irq = i2c_smbus_host_notify_to_irq(client); } else if (dev->of_node) { irq = of_irq_get_byname(dev->of_node, "irq"); if (irq == -EINVAL || irq == -ENODATA) irq = of_irq_get(dev->of_node, 0); } else if (ACPI_COMPANION(dev)) { bool wake_capable; irq = i2c_acpi_get_irq(client, &wake_capable); if (irq > 0 && wake_capable) client->flags |= I2C_CLIENT_WAKE; } if (irq == -EPROBE_DEFER) { status = irq; goto put_sync_adapter; } if (irq < 0) irq = 0; client->irq = irq; } driver = to_i2c_driver(dev->driver); /* * An I2C ID table is not mandatory, if and only if, a suitable OF * or ACPI ID table is supplied for the probing device. */ if (!driver->id_table && !acpi_driver_match_device(dev, dev->driver) && !i2c_of_match_device(dev->driver->of_match_table, client)) { status = -ENODEV; goto put_sync_adapter; } if (client->flags & I2C_CLIENT_WAKE) { int wakeirq; wakeirq = of_irq_get_byname(dev->of_node, "wakeup"); if (wakeirq == -EPROBE_DEFER) { status = wakeirq; goto put_sync_adapter; } device_init_wakeup(&client->dev, true); if (wakeirq > 0 && wakeirq != client->irq) status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); else if (client->irq > 0) status = dev_pm_set_wake_irq(dev, client->irq); else status = 0; if (status) dev_warn(&client->dev, "failed to set up wakeup irq\n"); } dev_dbg(dev, "probe\n"); status = of_clk_set_defaults(dev->of_node, false); if (status < 0) goto err_clear_wakeup_irq; do_power_on = !i2c_acpi_waive_d0_probe(dev); status = dev_pm_domain_attach(&client->dev, do_power_on); if (status) goto err_clear_wakeup_irq; client->devres_group_id = devres_open_group(&client->dev, NULL, GFP_KERNEL); if (!client->devres_group_id) { status = -ENOMEM; goto err_detach_pm_domain; } client->debugfs = debugfs_create_dir(dev_name(&client->dev), client->adapter->debugfs); if (driver->probe) status = driver->probe(client); else status = -EINVAL; /* * Note that we are not closing the devres group opened above so * even resources that were attached to the device after probe is * run are released when i2c_device_remove() is executed. This is * needed as some drivers would allocate additional resources, * for example when updating firmware. */ if (status) goto err_release_driver_resources; return 0; err_release_driver_resources: debugfs_remove_recursive(client->debugfs); devres_release_group(&client->dev, client->devres_group_id); err_detach_pm_domain: dev_pm_domain_detach(&client->dev, do_power_on); err_clear_wakeup_irq: dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); put_sync_adapter: if (client->flags & I2C_CLIENT_HOST_NOTIFY) pm_runtime_put_sync(&client->adapter->dev); return status; } static void i2c_device_remove(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct i2c_driver *driver; driver = to_i2c_driver(dev->driver); if (driver->remove) { dev_dbg(dev, "remove\n"); driver->remove(client); } debugfs_remove_recursive(client->debugfs); devres_release_group(&client->dev, client->devres_group_id); dev_pm_domain_detach(&client->dev, true); dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); client->irq = 0; if (client->flags & I2C_CLIENT_HOST_NOTIFY) pm_runtime_put(&client->adapter->dev); } static void i2c_device_shutdown(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; if (!client || !dev->driver) return; driver = to_i2c_driver(dev->driver); if (driver->shutdown) driver->shutdown(client); else if (client->irq > 0) disable_irq(client->irq); } static void i2c_client_dev_release(struct device *dev) { kfree(to_i2c_client(dev)); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", dev->type == &i2c_client_type ? to_i2c_client(dev)->name : to_i2c_adapter(dev)->name); } static DEVICE_ATTR_RO(name); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); int len; len = of_device_modalias(dev, buf, PAGE_SIZE); if (len != -ENODEV) return len; len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); if (len != -ENODEV) return len; return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name); } static DEVICE_ATTR_RO(modalias); static struct attribute *i2c_dev_attrs[] = { &dev_attr_name.attr, /* modalias helps coldplug: modprobe $(cat .../modalias) */ &dev_attr_modalias.attr, NULL }; ATTRIBUTE_GROUPS(i2c_dev); const struct bus_type i2c_bus_type = { .name = "i2c", .match = i2c_device_match, .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, }; EXPORT_SYMBOL_GPL(i2c_bus_type); const struct device_type i2c_client_type = { .groups = i2c_dev_groups, .uevent = i2c_device_uevent, .release = i2c_client_dev_release, }; EXPORT_SYMBOL_GPL(i2c_client_type); /** * i2c_verify_client - return parameter as i2c_client, or NULL * @dev: device, probably from some driver model iterator * * When traversing the driver model tree, perhaps using driver model * iterators like @device_for_each_child(), you can't assume very much * about the nodes you find. Use this function to avoid oopses caused * by wrongly treating some non-I2C device as an i2c_client. */ struct i2c_client *i2c_verify_client(struct device *dev) { return (dev->type == &i2c_client_type) ? to_i2c_client(dev) : NULL; } EXPORT_SYMBOL(i2c_verify_client); /* Return a unique address which takes the flags of the client into account */ static unsigned short i2c_encode_flags_to_addr(struct i2c_client *client) { unsigned short addr = client->addr; /* For some client flags, add an arbitrary offset to avoid collisions */ if (client->flags & I2C_CLIENT_TEN) addr |= I2C_ADDR_OFFSET_TEN_BIT; if (client->flags & I2C_CLIENT_SLAVE) addr |= I2C_ADDR_OFFSET_SLAVE; return addr; } /* This is a permissive address validity check, I2C address map constraints * are purposely not enforced, except for the general call address. */ static int i2c_check_addr_validity(unsigned int addr, unsigned short flags) { if (flags & I2C_CLIENT_TEN) { /* 10-bit address, all values are valid */ if (addr > 0x3ff) return -EINVAL; } else { /* 7-bit address, reject the general call address */ if (addr == 0x00 || addr > 0x7f) return -EINVAL; } return 0; } /* And this is a strict address validity check, used when probing. If a * device uses a reserved address, then it shouldn't be probed. 7-bit * addressing is assumed, 10-bit address devices are rare and should be * explicitly enumerated. */ int i2c_check_7bit_addr_validity_strict(unsigned short addr) { /* * Reserved addresses per I2C specification: * 0x00 General call address / START byte * 0x01 CBUS address * 0x02 Reserved for different bus format * 0x03 Reserved for future purposes * 0x04-0x07 Hs-mode master code * 0x78-0x7b 10-bit slave addressing * 0x7c-0x7f Reserved for future purposes */ if (addr < 0x08 || addr > 0x77) return -EINVAL; return 0; } static int __i2c_check_addr_busy(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); int addr = *(int *)addrp; if (client && i2c_encode_flags_to_addr(client) == addr) return -EBUSY; return 0; } /* walk up mux tree */ static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result; result = device_for_each_child(&adapter->dev, &addr, __i2c_check_addr_busy); if (!result && parent) result = i2c_check_mux_parents(parent, addr); return result; } /* recurse down mux tree */ static int i2c_check_mux_children(struct device *dev, void *addrp) { int result; if (dev->type == &i2c_adapter_type) result = device_for_each_child(dev, addrp, i2c_check_mux_children); else result = __i2c_check_addr_busy(dev, addrp); return result; } static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result = 0; if (parent) result = i2c_check_mux_parents(parent, addr); if (!result) result = device_for_each_child(&adapter->dev, &addr, i2c_check_mux_children); return result; } /** * i2c_adapter_lock_bus - Get exclusive access to an I2C bus segment * @adapter: Target I2C bus segment * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT * locks only this branch in the adapter tree */ static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); } /** * i2c_adapter_trylock_bus - Try to get exclusive access to an I2C bus segment * @adapter: Target I2C bus segment * @flags: I2C_LOCK_ROOT_ADAPTER trylocks the root i2c adapter, I2C_LOCK_SEGMENT * trylocks only this branch in the adapter tree */ static int i2c_adapter_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { return rt_mutex_trylock(&adapter->bus_lock); } /** * i2c_adapter_unlock_bus - Release exclusive access to an I2C bus segment * @adapter: Target I2C bus segment * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT * unlocks only this branch in the adapter tree */ static void i2c_adapter_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { rt_mutex_unlock(&adapter->bus_lock); } static void i2c_dev_set_name(struct i2c_adapter *adap, struct i2c_client *client, struct i2c_board_info const *info) { struct acpi_device *adev = ACPI_COMPANION(&client->dev); if (info && info->dev_name) { dev_set_name(&client->dev, "i2c-%s", info->dev_name); return; } if (adev) { dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev)); return; } dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), i2c_encode_flags_to_addr(client)); } int i2c_dev_irq_from_resources(const struct resource *resources, unsigned int num_resources) { struct irq_data *irqd; int i; for (i = 0; i < num_resources; i++) { const struct resource *r = &resources[i]; if (resource_type(r) != IORESOURCE_IRQ) continue; if (r->flags & IORESOURCE_BITS) { irqd = irq_get_irq_data(r->start); if (!irqd) break; irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } return r->start; } return 0; } /* * Serialize device instantiation in case it can be instantiated explicitly * and by auto-detection */ static int i2c_lock_addr(struct i2c_adapter *adap, unsigned short addr, unsigned short flags) { if (!(flags & I2C_CLIENT_TEN) && test_and_set_bit(addr, adap->addrs_in_instantiation)) return -EBUSY; return 0; } static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr, unsigned short flags) { if (!(flags & I2C_CLIENT_TEN)) clear_bit(addr, adap->addrs_in_instantiation); } /** * i2c_new_client_device - instantiate an i2c device * @adap: the adapter managing the device * @info: describes one I2C device; bus_num is ignored * Context: can sleep * * Create an i2c device. Binding is handled through driver model * probe()/remove() methods. A driver may be bound to this device when we * return from this function, or any later moment (e.g. maybe hotplugging will * load the driver module). This call is not appropriate for use by mainboard * initialization logic, which usually runs during an arch_initcall() long * before any i2c_adapter could exist. * * This returns the new i2c client, which may be saved for later use with * i2c_unregister_device(); or an ERR_PTR to describe the error. */ struct i2c_client * i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info) { struct i2c_client *client; bool need_put = false; int status; client = kzalloc(sizeof *client, GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); client->adapter = adap; client->dev.platform_data = info->platform_data; client->flags = info->flags; client->addr = info->addr; client->init_irq = info->irq; if (!client->init_irq) client->init_irq = i2c_dev_irq_from_resources(info->resources, info->num_resources); strscpy(client->name, info->type, sizeof(client->name)); status = i2c_check_addr_validity(client->addr, client->flags); if (status) { dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n", client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr); goto out_err_silent; } status = i2c_lock_addr(adap, client->addr, client->flags); if (status) goto out_err_silent; /* Check for address business */ status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client)); if (status) goto out_err; client->dev.parent = &client->adapter->dev; client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; client->dev.of_node = of_node_get(info->of_node); client->dev.fwnode = info->fwnode; device_enable_async_suspend(&client->dev); if (info->swnode) { status = device_add_software_node(&client->dev, info->swnode); if (status) { dev_err(&adap->dev, "Failed to add software node to client %s: %d\n", client->name, status); goto out_err_put_of_node; } } i2c_dev_set_name(adap, client, info); status = device_register(&client->dev); if (status) goto out_remove_swnode; dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", client->name, dev_name(&client->dev)); i2c_unlock_addr(adap, client->addr, client->flags); return client; out_remove_swnode: device_remove_software_node(&client->dev); need_put = true; out_err_put_of_node: of_node_put(info->of_node); out_err: dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x (%d)\n", client->name, client->addr, status); i2c_unlock_addr(adap, client->addr, client->flags); out_err_silent: if (need_put) put_device(&client->dev); else kfree(client); return ERR_PTR(status); } EXPORT_SYMBOL_GPL(i2c_new_client_device); /** * i2c_unregister_device - reverse effect of i2c_new_*_device() * @client: value returned from i2c_new_*_device() * Context: can sleep */ void i2c_unregister_device(struct i2c_client *client) { if (IS_ERR_OR_NULL(client)) return; if (client->dev.of_node) { of_node_clear_flag(client->dev.of_node, OF_POPULATED); of_node_put(client->dev.of_node); } if (ACPI_COMPANION(&client->dev)) acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); device_remove_software_node(&client->dev); device_unregister(&client->dev); } EXPORT_SYMBOL_GPL(i2c_unregister_device); /** * i2c_find_device_by_fwnode() - find an i2c_client for the fwnode * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_client * * Look up and return the &struct i2c_client corresponding to the @fwnode. * If no client can be found, or @fwnode is NULL, this returns NULL. * * The user must call put_device(&client->dev) once done with the i2c client. */ struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode) { struct i2c_client *client; struct device *dev; if (!fwnode) return NULL; dev = bus_find_device_by_fwnode(&i2c_bus_type, fwnode); if (!dev) return NULL; client = i2c_verify_client(dev); if (!client) put_device(dev); return client; } EXPORT_SYMBOL(i2c_find_device_by_fwnode); static const struct i2c_device_id dummy_id[] = { { "dummy", }, { "smbus_host_notify", }, { } }; static int dummy_probe(struct i2c_client *client) { return 0; } static struct i2c_driver dummy_driver = { .driver.name = "dummy", .probe = dummy_probe, .id_table = dummy_id, }; /** * i2c_new_dummy_device - return a new i2c device bound to a dummy driver * @adapter: the adapter managing the device * @address: seven bit address to be used * Context: can sleep * * This returns an I2C client bound to the "dummy" driver, intended for use * with devices that consume multiple addresses. Examples of such chips * include various EEPROMS (like 24c04 and 24c08 models). * * These dummy devices have two main uses. First, most I2C and SMBus calls * except i2c_transfer() need a client handle; the dummy will be that handle. * And second, this prevents the specified address from being bound to a * different driver. * * This returns the new i2c client, which should be saved for later use with * i2c_unregister_device(); or an ERR_PTR to describe the error. */ struct i2c_client *i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address) { struct i2c_board_info info = { I2C_BOARD_INFO("dummy", address), }; return i2c_new_client_device(adapter, &info); } EXPORT_SYMBOL_GPL(i2c_new_dummy_device); static void devm_i2c_release_dummy(void *client) { i2c_unregister_device(client); } /** * devm_i2c_new_dummy_device - return a new i2c device bound to a dummy driver * @dev: device the managed resource is bound to * @adapter: the adapter managing the device * @address: seven bit address to be used * Context: can sleep * * This is the device-managed version of @i2c_new_dummy_device. It returns the * new i2c client or an ERR_PTR in case of an error. */ struct i2c_client *devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adapter, u16 address) { struct i2c_client *client; int ret; client = i2c_new_dummy_device(adapter, address); if (IS_ERR(client)) return client; ret = devm_add_action_or_reset(dev, devm_i2c_release_dummy, client); if (ret) return ERR_PTR(ret); return client; } EXPORT_SYMBOL_GPL(devm_i2c_new_dummy_device); /** * i2c_new_ancillary_device - Helper to get the instantiated secondary address * and create the associated device * @client: Handle to the primary client * @name: Handle to specify which secondary address to get * @default_addr: Used as a fallback if no secondary address was specified * Context: can sleep * * I2C clients can be composed of multiple I2C slaves bound together in a single * component. The I2C client driver then binds to the master I2C slave and needs * to create I2C dummy clients to communicate with all the other slaves. * * This function creates and returns an I2C dummy client whose I2C address is * retrieved from the platform firmware based on the given slave name. If no * address is specified by the firmware default_addr is used. * * On DT-based platforms the address is retrieved from the "reg" property entry * cell whose "reg-names" value matches the slave name. * * This returns the new i2c client, which should be saved for later use with * i2c_unregister_device(); or an ERR_PTR to describe the error. */ struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client, const char *name, u16 default_addr) { struct device_node *np = client->dev.of_node; u32 addr = default_addr; int i; if (np) { i = of_property_match_string(np, "reg-names", name); if (i >= 0) of_property_read_u32_index(np, "reg", i, &addr); } dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr); return i2c_new_dummy_device(client->adapter, addr); } EXPORT_SYMBOL_GPL(i2c_new_ancillary_device); /* ------------------------------------------------------------------------- */ /* I2C bus adapters -- one roots each I2C or SMBUS segment */ static void i2c_adapter_dev_release(struct device *dev) { struct i2c_adapter *adap = to_i2c_adapter(dev); complete(&adap->dev_released); } unsigned int i2c_adapter_depth(struct i2c_adapter *adapter) { unsigned int depth = 0; struct device *parent; for (parent = adapter->dev.parent; parent; parent = parent->parent) if (parent->type == &i2c_adapter_type) depth++; WARN_ONCE(depth >= MAX_LOCKDEP_SUBCLASSES, "adapter depth exceeds lockdep subclass limit\n"); return depth; } EXPORT_SYMBOL_GPL(i2c_adapter_depth); /* * Let users instantiate I2C devices through sysfs. This can be used when * platform initialization code doesn't contain the proper data for * whatever reason. Also useful for drivers that do device detection and * detection fails, either because the device uses an unexpected address, * or this is a compatible device with different ID register values. * * Parameter checking may look overzealous, but we really don't want * the user to provide incorrect parameters. */ static ssize_t new_device_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_board_info info; struct i2c_client *client; char *blank, end; int res; memset(&info, 0, sizeof(struct i2c_board_info)); blank = strchr(buf, ' '); if (!blank) { dev_err(dev, "%s: Missing parameters\n", "new_device"); return -EINVAL; } if (blank - buf > I2C_NAME_SIZE - 1) { dev_err(dev, "%s: Invalid device name\n", "new_device"); return -EINVAL; } memcpy(info.type, buf, blank - buf); /* Parse remaining parameters, reject extra parameters */ res = sscanf(++blank, "%hi%c", &info.addr, &end); if (res < 1) { dev_err(dev, "%s: Can't parse I2C address\n", "new_device"); return -EINVAL; } if (res > 1 && end != '\n') { dev_err(dev, "%s: Extra parameters\n", "new_device"); return -EINVAL; } if ((info.addr & I2C_ADDR_OFFSET_TEN_BIT) == I2C_ADDR_OFFSET_TEN_BIT) { info.addr &= ~I2C_ADDR_OFFSET_TEN_BIT; info.flags |= I2C_CLIENT_TEN; } if (info.addr & I2C_ADDR_OFFSET_SLAVE) { info.addr &= ~I2C_ADDR_OFFSET_SLAVE; info.flags |= I2C_CLIENT_SLAVE; } client = i2c_new_client_device(adap, &info); if (IS_ERR(client)) return PTR_ERR(client); /* Keep track of the added device */ mutex_lock(&adap->userspace_clients_lock); list_add_tail(&client->detected, &adap->userspace_clients); mutex_unlock(&adap->userspace_clients_lock); dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device", info.type, info.addr); return count; } static DEVICE_ATTR_WO(new_device); /* * And of course let the users delete the devices they instantiated, if * they got it wrong. This interface can only be used to delete devices * instantiated by i2c_sysfs_new_device above. This guarantees that we * don't delete devices to which some kernel code still has references. * * Parameter checking may look overzealous, but we really don't want * the user to delete the wrong device. */ static ssize_t delete_device_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_client *client, *next; unsigned short addr; char end; int res; /* Parse parameters, reject extra parameters */ res = sscanf(buf, "%hi%c", &addr, &end); if (res < 1) { dev_err(dev, "%s: Can't parse I2C address\n", "delete_device"); return -EINVAL; } if (res > 1 && end != '\n') { dev_err(dev, "%s: Extra parameters\n", "delete_device"); return -EINVAL; } /* Make sure the device was added through sysfs */ res = -ENOENT; mutex_lock_nested(&adap->userspace_clients_lock, i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { if (i2c_encode_flags_to_addr(client) == addr) { dev_info(dev, "%s: Deleting device %s at 0x%02hx\n", "delete_device", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); res = count; break; } } mutex_unlock(&adap->userspace_clients_lock); if (res < 0) dev_err(dev, "%s: Can't find device in list\n", "delete_device"); return res; } static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, delete_device_store); static struct attribute *i2c_adapter_attrs[] = { &dev_attr_name.attr, &dev_attr_new_device.attr, &dev_attr_delete_device.attr, NULL }; ATTRIBUTE_GROUPS(i2c_adapter); const struct device_type i2c_adapter_type = { .groups = i2c_adapter_groups, .release = i2c_adapter_dev_release, }; EXPORT_SYMBOL_GPL(i2c_adapter_type); /** * i2c_verify_adapter - return parameter as i2c_adapter or NULL * @dev: device, probably from some driver model iterator * * When traversing the driver model tree, perhaps using driver model * iterators like @device_for_each_child(), you can't assume very much * about the nodes you find. Use this function to avoid oopses caused * by wrongly treating some non-I2C device as an i2c_adapter. */ struct i2c_adapter *i2c_verify_adapter(struct device *dev) { return (dev->type == &i2c_adapter_type) ? to_i2c_adapter(dev) : NULL; } EXPORT_SYMBOL(i2c_verify_adapter); static void i2c_scan_static_board_info(struct i2c_adapter *adapter) { struct i2c_devinfo *devinfo; down_read(&__i2c_board_lock); list_for_each_entry(devinfo, &__i2c_board_list, list) { if (devinfo->busnum == adapter->nr && IS_ERR(i2c_new_client_device(adapter, &devinfo->board_info))) dev_err(&adapter->dev, "Can't create device at 0x%02x\n", devinfo->board_info.addr); } up_read(&__i2c_board_lock); } static int i2c_do_add_adapter(struct i2c_driver *driver, struct i2c_adapter *adap) { /* Detect supported devices on that bus, and instantiate them */ i2c_detect(adap, driver); return 0; } static int __process_new_adapter(struct device_driver *d, void *data) { return i2c_do_add_adapter(to_i2c_driver(d), data); } static const struct i2c_lock_operations i2c_adapter_lock_ops = { .lock_bus = i2c_adapter_lock_bus, .trylock_bus = i2c_adapter_trylock_bus, .unlock_bus = i2c_adapter_unlock_bus, }; static void i2c_host_notify_irq_teardown(struct i2c_adapter *adap) { struct irq_domain *domain = adap->host_notify_domain; irq_hw_number_t hwirq; if (!domain) return; for (hwirq = 0 ; hwirq < I2C_ADDR_7BITS_COUNT ; hwirq++) irq_dispose_mapping(irq_find_mapping(domain, hwirq)); irq_domain_remove(domain); adap->host_notify_domain = NULL; } static int i2c_host_notify_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw_irq_num) { irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq); return 0; } static const struct irq_domain_ops i2c_host_notify_irq_ops = { .map = i2c_host_notify_irq_map, }; static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap) { struct irq_domain *domain; if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY)) return 0; domain = irq_domain_create_linear(adap->dev.parent->fwnode, I2C_ADDR_7BITS_COUNT, &i2c_host_notify_irq_ops, adap); if (!domain) return -ENOMEM; adap->host_notify_domain = domain; return 0; } /** * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct * I2C client. * @adap: the adapter * @addr: the I2C address of the notifying device * Context: can't sleep * * Helper function to be called from an I2C bus driver's interrupt * handler. It will schedule the Host Notify IRQ. */ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr) { int irq; if (!adap) return -EINVAL; dev_dbg(&adap->dev, "Detected HostNotify from address 0x%02x", addr); irq = irq_find_mapping(adap->host_notify_domain, addr); if (irq <= 0) return -ENXIO; generic_handle_irq_safe(irq); return 0; } EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify); static int i2c_register_adapter(struct i2c_adapter *adap) { int res = -EINVAL; /* Can't register until after driver model init */ if (WARN_ON(!is_registered)) { res = -EAGAIN; goto out_list; } /* Sanity checks */ if (WARN(!adap->name[0], "i2c adapter has no name")) goto out_list; if (!adap->algo) { pr_err("adapter '%s': no algo supplied!\n", adap->name); goto out_list; } if (!adap->lock_ops) adap->lock_ops = &i2c_adapter_lock_ops; adap->locked_flags = 0; rt_mutex_init(&adap->bus_lock); rt_mutex_init(&adap->mux_lock); mutex_init(&adap->userspace_clients_lock); INIT_LIST_HEAD(&adap->userspace_clients); /* Set default timeout to 1 second if not already set */ if (adap->timeout == 0) adap->timeout = HZ; /* register soft irqs for Host Notify */ res = i2c_setup_host_notify_irq_domain(adap); if (res) { pr_err("adapter '%s': can't create Host Notify IRQs (%d)\n", adap->name, res); goto out_list; } dev_set_name(&adap->dev, "i2c-%d", adap->nr); adap->dev.bus = &i2c_bus_type; adap->dev.type = &i2c_adapter_type; device_initialize(&adap->dev); /* * This adapter can be used as a parent immediately after device_add(), * setup runtime-pm (especially ignore-children) before hand. */ device_enable_async_suspend(&adap->dev); pm_runtime_no_callbacks(&adap->dev); pm_suspend_ignore_children(&adap->dev, true); pm_runtime_enable(&adap->dev); res = device_add(&adap->dev); if (res) { pr_err("adapter '%s': can't register device (%d)\n", adap->name, res); put_device(&adap->dev); goto out_list; } adap->debugfs = debugfs_create_dir(dev_name(&adap->dev), i2c_debugfs_root); res = i2c_setup_smbus_alert(adap); if (res) goto out_reg; res = i2c_init_recovery(adap); if (res == -EPROBE_DEFER) goto out_reg; dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); /* create pre-declared device nodes */ of_i2c_register_devices(adap); i2c_acpi_install_space_handler(adap); i2c_acpi_register_devices(adap); if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); /* Notify drivers */ mutex_lock(&core_lock); bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); mutex_unlock(&core_lock); return 0; out_reg: debugfs_remove_recursive(adap->debugfs); init_completion(&adap->dev_released); device_unregister(&adap->dev); wait_for_completion(&adap->dev_released); out_list: mutex_lock(&core_lock); idr_remove(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); return res; } /** * __i2c_add_numbered_adapter - i2c_add_numbered_adapter where nr is never -1 * @adap: the adapter to register (with adap->nr initialized) * Context: can sleep * * See i2c_add_numbered_adapter() for details. */ static int __i2c_add_numbered_adapter(struct i2c_adapter *adap) { int id; mutex_lock(&core_lock); id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1, GFP_KERNEL); mutex_unlock(&core_lock); if (WARN(id < 0, "couldn't get idr")) return id == -ENOSPC ? -EBUSY : id; return i2c_register_adapter(adap); } /** * i2c_add_adapter - declare i2c adapter, use dynamic bus number * @adapter: the adapter to add * Context: can sleep * * This routine is used to declare an I2C adapter when its bus number * doesn't matter or when its bus number is specified by an dt alias. * Examples of bases when the bus number doesn't matter: I2C adapters * dynamically added by USB links or PCI plugin cards. * * When this returns zero, a new bus number was allocated and stored * in adap->nr, and the specified adapter became available for clients. * Otherwise, a negative errno value is returned. */ int i2c_add_adapter(struct i2c_adapter *adapter) { struct device *dev = &adapter->dev; int id; if (dev->of_node) { id = of_alias_get_id(dev->of_node, "i2c"); if (id >= 0) { adapter->nr = id; return __i2c_add_numbered_adapter(adapter); } } mutex_lock(&core_lock); id = idr_alloc(&i2c_adapter_idr, adapter, __i2c_first_dynamic_bus_num, 0, GFP_KERNEL); mutex_unlock(&core_lock); if (WARN(id < 0, "couldn't get idr")) return id; adapter->nr = id; return i2c_register_adapter(adapter); } EXPORT_SYMBOL(i2c_add_adapter); /** * i2c_add_numbered_adapter - declare i2c adapter, use static bus number * @adap: the adapter to register (with adap->nr initialized) * Context: can sleep * * This routine is used to declare an I2C adapter when its bus number * matters. For example, use it for I2C adapters from system-on-chip CPUs, * or otherwise built in to the system's mainboard, and where i2c_board_info * is used to properly configure I2C devices. * * If the requested bus number is set to -1, then this function will behave * identically to i2c_add_adapter, and will dynamically assign a bus number. * * If no devices have pre-been declared for this bus, then be sure to * register the adapter before any dynamically allocated ones. Otherwise * the required bus ID may not be available. * * When this returns zero, the specified adapter became available for * clients using the bus number provided in adap->nr. Also, the table * of I2C devices pre-declared using i2c_register_board_info() is scanned, * and the appropriate driver model device nodes are created. Otherwise, a * negative errno value is returned. */ int i2c_add_numbered_adapter(struct i2c_adapter *adap) { if (adap->nr == -1) /* -1 means dynamically assign bus id */ return i2c_add_adapter(adap); return __i2c_add_numbered_adapter(adap); } EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); static void i2c_do_del_adapter(struct i2c_driver *driver, struct i2c_adapter *adapter) { struct i2c_client *client, *_n; /* Remove the devices we created ourselves as the result of hardware * probing (using a driver's detect method) */ list_for_each_entry_safe(client, _n, &driver->clients, detected) { if (client->adapter == adapter) { dev_dbg(&adapter->dev, "Removing %s at 0x%x\n", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); } } } static int __unregister_client(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); if (client && strcmp(client->name, "dummy")) i2c_unregister_device(client); return 0; } static int __unregister_dummy(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); i2c_unregister_device(client); return 0; } static int __process_removed_adapter(struct device_driver *d, void *data) { i2c_do_del_adapter(to_i2c_driver(d), data); return 0; } /** * i2c_del_adapter - unregister I2C adapter * @adap: the adapter being unregistered * Context: can sleep * * This unregisters an I2C adapter which was previously registered * by @i2c_add_adapter or @i2c_add_numbered_adapter. */ void i2c_del_adapter(struct i2c_adapter *adap) { struct i2c_adapter *found; struct i2c_client *client, *next; /* First make sure that this adapter was ever added */ mutex_lock(&core_lock); found = idr_find(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); if (found != adap) { pr_debug("attempting to delete unregistered adapter [%s]\n", adap->name); return; } i2c_acpi_remove_space_handler(adap); /* Tell drivers about this removal */ mutex_lock(&core_lock); bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_removed_adapter); mutex_unlock(&core_lock); /* Remove devices instantiated from sysfs */ mutex_lock_nested(&adap->userspace_clients_lock, i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); } mutex_unlock(&adap->userspace_clients_lock); /* Detach any active clients. This can't fail, thus we do not * check the returned value. This is a two-pass process, because * we can't remove the dummy devices during the first pass: they * could have been instantiated by real devices wishing to clean * them up properly, so we give them a chance to do that first. */ device_for_each_child(&adap->dev, NULL, __unregister_client); device_for_each_child(&adap->dev, NULL, __unregister_dummy); /* device name is gone after device_unregister */ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); pm_runtime_disable(&adap->dev); i2c_host_notify_irq_teardown(adap); debugfs_remove_recursive(adap->debugfs); /* wait until all references to the device are gone * * FIXME: This is old code and should ideally be replaced by an * alternative which results in decoupling the lifetime of the struct * device from the i2c_adapter, like spi or netdev do. Any solution * should be thoroughly tested with DEBUG_KOBJECT_RELEASE enabled! */ init_completion(&adap->dev_released); device_unregister(&adap->dev); wait_for_completion(&adap->dev_released); /* free bus id */ mutex_lock(&core_lock); idr_remove(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); /* Clear the device structure in case this adapter is ever going to be added again */ memset(&adap->dev, 0, sizeof(adap->dev)); } EXPORT_SYMBOL(i2c_del_adapter); static void devm_i2c_del_adapter(void *adapter) { i2c_del_adapter(adapter); } /** * devm_i2c_add_adapter - device-managed variant of i2c_add_adapter() * @dev: managing device for adding this I2C adapter * @adapter: the adapter to add * Context: can sleep * * Add adapter with dynamic bus number, same with i2c_add_adapter() * but the adapter will be auto deleted on driver detach. */ int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter) { int ret; ret = i2c_add_adapter(adapter); if (ret) return ret; return devm_add_action_or_reset(dev, devm_i2c_del_adapter, adapter); } EXPORT_SYMBOL_GPL(devm_i2c_add_adapter); static int i2c_dev_or_parent_fwnode_match(struct device *dev, const void *data) { if (dev_fwnode(dev) == data) return 1; if (dev->parent && dev_fwnode(dev->parent) == data) return 1; return 0; } /** * i2c_find_adapter_by_fwnode() - find an i2c_adapter for the fwnode * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter * * Look up and return the &struct i2c_adapter corresponding to the @fwnode. * If no adapter can be found, or @fwnode is NULL, this returns NULL. * * The user must call put_device(&adapter->dev) once done with the i2c adapter. */ struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode) { struct i2c_adapter *adapter; struct device *dev; if (!fwnode) return NULL; dev = bus_find_device(&i2c_bus_type, NULL, fwnode, i2c_dev_or_parent_fwnode_match); if (!dev) return NULL; adapter = i2c_verify_adapter(dev); if (!adapter) put_device(dev); return adapter; } EXPORT_SYMBOL(i2c_find_adapter_by_fwnode); /** * i2c_get_adapter_by_fwnode() - find an i2c_adapter for the fwnode * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter * * Look up and return the &struct i2c_adapter corresponding to the @fwnode, * and increment the adapter module's use count. If no adapter can be found, * or @fwnode is NULL, this returns NULL. * * The user must call i2c_put_adapter(adapter) once done with the i2c adapter. * Note that this is different from i2c_find_adapter_by_node(). */ struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode) { struct i2c_adapter *adapter; adapter = i2c_find_adapter_by_fwnode(fwnode); if (!adapter) return NULL; if (!try_module_get(adapter->owner)) { put_device(&adapter->dev); adapter = NULL; } return adapter; } EXPORT_SYMBOL(i2c_get_adapter_by_fwnode); static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p, u32 def_val, bool use_def) { int ret; ret = device_property_read_u32(dev, prop_name, cur_val_p); if (ret && use_def) *cur_val_p = def_val; dev_dbg(dev, "%s: %u\n", prop_name, *cur_val_p); } /** * i2c_parse_fw_timings - get I2C related timing parameters from firmware * @dev: The device to scan for I2C timing properties * @t: the i2c_timings struct to be filled with values * @use_defaults: bool to use sane defaults derived from the I2C specification * when properties are not found, otherwise don't update * * Scan the device for the generic I2C properties describing timing parameters * for the signal and fill the given struct with the results. If a property was * not found and use_defaults was true, then maximum timings are assumed which * are derived from the I2C specification. If use_defaults is not used, the * results will be as before, so drivers can apply their own defaults before * calling this helper. The latter is mainly intended for avoiding regressions * of existing drivers which want to switch to this function. New drivers * almost always should use the defaults. */ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults) { bool u = use_defaults; u32 d; i2c_parse_timing(dev, "clock-frequency", &t->bus_freq_hz, I2C_MAX_STANDARD_MODE_FREQ, u); d = t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ ? 1000 : t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120; i2c_parse_timing(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns, d, u); d = t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120; i2c_parse_timing(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns, d, u); i2c_parse_timing(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns, 0, u); i2c_parse_timing(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns, t->scl_fall_ns, u); i2c_parse_timing(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns, 0, u); i2c_parse_timing(dev, "i2c-digital-filter-width-ns", &t->digital_filter_width_ns, 0, u); i2c_parse_timing(dev, "i2c-analog-filter-cutoff-frequency", &t->analog_filter_cutoff_freq_hz, 0, u); } EXPORT_SYMBOL_GPL(i2c_parse_fw_timings); /* ------------------------------------------------------------------------- */ int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data)) { int res; mutex_lock(&core_lock); res = bus_for_each_dev(&i2c_bus_type, NULL, data, fn); mutex_unlock(&core_lock); return res; } EXPORT_SYMBOL_GPL(i2c_for_each_dev); static int __process_new_driver(struct device *dev, void *data) { if (dev->type != &i2c_adapter_type) return 0; return i2c_do_add_adapter(data, to_i2c_adapter(dev)); } /* * An i2c_driver is used with one or more i2c_client (device) nodes to access * i2c slave chips, on a bus instance associated with some i2c_adapter. */ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) { int res; /* Can't register until after driver model init */ if (WARN_ON(!is_registered)) return -EAGAIN; /* add the driver to the list of i2c drivers in the driver core */ driver->driver.owner = owner; driver->driver.bus = &i2c_bus_type; INIT_LIST_HEAD(&driver->clients); /* When registration returns, the driver core * will have called probe() for all matching-but-unbound devices. */ res = driver_register(&driver->driver); if (res) return res; pr_debug("driver [%s] registered\n", driver->driver.name); /* Walk the adapters that are already present */ i2c_for_each_dev(driver, __process_new_driver); return 0; } EXPORT_SYMBOL(i2c_register_driver); static int __process_removed_driver(struct device *dev, void *data) { if (dev->type == &i2c_adapter_type) i2c_do_del_adapter(data, to_i2c_adapter(dev)); return 0; } /** * i2c_del_driver - unregister I2C driver * @driver: the driver being unregistered * Context: can sleep */ void i2c_del_driver(struct i2c_driver *driver) { i2c_for_each_dev(driver, __process_removed_driver); driver_unregister(&driver->driver); pr_debug("driver [%s] unregistered\n", driver->driver.name); } EXPORT_SYMBOL(i2c_del_driver); /* ------------------------------------------------------------------------- */ struct i2c_cmd_arg { unsigned cmd; void *arg; }; static int i2c_cmd(struct device *dev, void *_arg) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_cmd_arg *arg = _arg; struct i2c_driver *driver; if (!client || !client->dev.driver) return 0; driver = to_i2c_driver(client->dev.driver); if (driver->command) driver->command(client, arg->cmd, arg->arg); return 0; } void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg) { struct i2c_cmd_arg cmd_arg; cmd_arg.cmd = cmd; cmd_arg.arg = arg; device_for_each_child(&adap->dev, &cmd_arg, i2c_cmd); } EXPORT_SYMBOL(i2c_clients_command); static int __init i2c_init(void) { int retval; retval = of_alias_get_highest_id("i2c"); down_write(&__i2c_board_lock); if (retval >= __i2c_first_dynamic_bus_num) __i2c_first_dynamic_bus_num = retval + 1; up_write(&__i2c_board_lock); retval = bus_register(&i2c_bus_type); if (retval) return retval; is_registered = true; i2c_debugfs_root = debugfs_create_dir("i2c", NULL); retval = i2c_add_driver(&dummy_driver); if (retval) goto class_err; if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier)); if (IS_ENABLED(CONFIG_ACPI)) WARN_ON(acpi_reconfig_notifier_register(&i2c_acpi_notifier)); return 0; class_err: is_registered = false; bus_unregister(&i2c_bus_type); return retval; } static void __exit i2c_exit(void) { if (IS_ENABLED(CONFIG_ACPI)) WARN_ON(acpi_reconfig_notifier_unregister(&i2c_acpi_notifier)); if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier)); i2c_del_driver(&dummy_driver); debugfs_remove_recursive(i2c_debugfs_root); bus_unregister(&i2c_bus_type); tracepoint_synchronize_unregister(); } /* We must initialize early, because some subsystems register i2c drivers * in subsys_initcall() code, but are linked (and initialized) before i2c. */ postcore_initcall(i2c_init); module_exit(i2c_exit); /* ---------------------------------------------------- * the functional interface to the i2c busses. * ---------------------------------------------------- */ /* Check if val is exceeding the quirk IFF quirk is non 0 */ #define i2c_quirk_exceeded(val, quirk) ((quirk) && ((val) > (quirk))) static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char *err_msg) { dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n", err_msg, msg->addr, msg->len, msg->flags & I2C_M_RD ? "read" : "write"); return -EOPNOTSUPP; } static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { const struct i2c_adapter_quirks *q = adap->quirks; int max_num = q->max_num_msgs, i; bool do_len_check = true; if (q->flags & I2C_AQ_COMB) { max_num = 2; /* special checks for combined messages */ if (num == 2) { if (q->flags & I2C_AQ_COMB_WRITE_FIRST && msgs[0].flags & I2C_M_RD) return i2c_quirk_error(adap, &msgs[0], "1st comb msg must be write"); if (q->flags & I2C_AQ_COMB_READ_SECOND && !(msgs[1].flags & I2C_M_RD)) return i2c_quirk_error(adap, &msgs[1], "2nd comb msg must be read"); if (q->flags & I2C_AQ_COMB_SAME_ADDR && msgs[0].addr != msgs[1].addr) return i2c_quirk_error(adap, &msgs[0], "comb msg only to same addr"); if (i2c_quirk_exceeded(msgs[0].len, q->max_comb_1st_msg_len)) return i2c_quirk_error(adap, &msgs[0], "msg too long"); if (i2c_quirk_exceeded(msgs[1].len, q->max_comb_2nd_msg_len)) return i2c_quirk_error(adap, &msgs[1], "msg too long"); do_len_check = false; } } if (i2c_quirk_exceeded(num, max_num)) return i2c_quirk_error(adap, &msgs[0], "too many messages"); for (i = 0; i < num; i++) { u16 len = msgs[i].len; if (msgs[i].flags & I2C_M_RD) { if (do_len_check && i2c_quirk_exceeded(len, q->max_read_len)) return i2c_quirk_error(adap, &msgs[i], "msg too long"); if (q->flags & I2C_AQ_NO_ZERO_LEN_READ && len == 0) return i2c_quirk_error(adap, &msgs[i], "no zero length"); } else { if (do_len_check && i2c_quirk_exceeded(len, q->max_write_len)) return i2c_quirk_error(adap, &msgs[i], "msg too long"); if (q->flags & I2C_AQ_NO_ZERO_LEN_WRITE && len == 0) return i2c_quirk_error(adap, &msgs[i], "no zero length"); } } return 0; } /** * __i2c_transfer - unlocked flavor of i2c_transfer * @adap: Handle to I2C bus * @msgs: One or more messages to execute before STOP is issued to * terminate the operation; each message begins with a START. * @num: Number of messages to be executed. * * Returns negative errno, else the number of messages executed. * * Adapter lock must be held when calling this function. No debug logging * takes place. */ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { unsigned long orig_jiffies; int ret, try; if (!adap->algo->master_xfer) { dev_dbg(&adap->dev, "I2C level transfers not supported\n"); return -EOPNOTSUPP; } if (WARN_ON(!msgs || num < 1)) return -EINVAL; ret = __i2c_check_suspended(adap); if (ret) return ret; if (adap->quirks && i2c_check_for_quirks(adap, msgs, num)) return -EOPNOTSUPP; /* * i2c_trace_msg_key gets enabled when tracepoint i2c_transfer gets * enabled. This is an efficient way of keeping the for-loop from * being executed when not needed. */ if (static_branch_unlikely(&i2c_trace_msg_key)) { int i; for (i = 0; i < num; i++) if (msgs[i].flags & I2C_M_RD) trace_i2c_read(adap, &msgs[i], i); else trace_i2c_write(adap, &msgs[i], i); } /* Retry automatically on arbitration loss */ orig_jiffies = jiffies; for (ret = 0, try = 0; try <= adap->retries; try++) { if (i2c_in_atomic_xfer_mode() && adap->algo->master_xfer_atomic) ret = adap->algo->master_xfer_atomic(adap, msgs, num); else ret = adap->algo->master_xfer(adap, msgs, num); if (ret != -EAGAIN) break; if (time_after(jiffies, orig_jiffies + adap->timeout)) break; } if (static_branch_unlikely(&i2c_trace_msg_key)) { int i; for (i = 0; i < ret; i++) if (msgs[i].flags & I2C_M_RD) trace_i2c_reply(adap, &msgs[i], i); trace_i2c_result(adap, num, ret); } return ret; } EXPORT_SYMBOL(__i2c_transfer); /** * i2c_transfer - execute a single or combined I2C message * @adap: Handle to I2C bus * @msgs: One or more messages to execute before STOP is issued to * terminate the operation; each message begins with a START. * @num: Number of messages to be executed. * * Returns negative errno, else the number of messages executed. * * Note that there is no requirement that each message be sent to * the same slave address, although that is the most common model. */ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int ret; /* REVISIT the fault reporting model here is weak: * * - When we get an error after receiving N bytes from a slave, * there is no way to report "N". * * - When we get a NAK after transmitting N bytes to a slave, * there is no way to report "N" ... or to let the master * continue executing the rest of this combined message, if * that's the appropriate response. * * - When for example "num" is two and we successfully complete * the first message but get an error part way through the * second, it's unclear whether that should be reported as * one (discarding status on the second message) or errno * (discarding status on the first one). */ ret = __i2c_lock_bus_helper(adap); if (ret) return ret; ret = __i2c_transfer(adap, msgs, num); i2c_unlock_bus(adap, I2C_LOCK_SEGMENT); return ret; } EXPORT_SYMBOL(i2c_transfer); /** * i2c_transfer_buffer_flags - issue a single I2C message transferring data * to/from a buffer * @client: Handle to slave device * @buf: Where the data is stored * @count: How many bytes to transfer, must be less than 64k since msg.len is u16 * @flags: The flags to be used for the message, e.g. I2C_M_RD for reads * * Returns negative errno, or else the number of bytes transferred. */ int i2c_transfer_buffer_flags(const struct i2c_client *client, char *buf, int count, u16 flags) { int ret; struct i2c_msg msg = { .addr = client->addr, .flags = flags | (client->flags & I2C_M_TEN), .len = count, .buf = buf, }; ret = i2c_transfer(client->adapter, &msg, 1); /* * If everything went ok (i.e. 1 msg transferred), return #bytes * transferred, else error code. */ return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_transfer_buffer_flags); /** * i2c_get_device_id - get manufacturer, part id and die revision of a device * @client: The device to query * @id: The queried information * * Returns negative errno on error, zero on success. */ int i2c_get_device_id(const struct i2c_client *client, struct i2c_device_identity *id) { struct i2c_adapter *adap = client->adapter; union i2c_smbus_data raw_id; int ret; if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) return -EOPNOTSUPP; raw_id.block[0] = 3; ret = i2c_smbus_xfer(adap, I2C_ADDR_DEVICE_ID, 0, I2C_SMBUS_READ, client->addr << 1, I2C_SMBUS_I2C_BLOCK_DATA, &raw_id); if (ret) return ret; id->manufacturer_id = (raw_id.block[1] << 4) | (raw_id.block[2] >> 4); id->part_id = ((raw_id.block[2] & 0xf) << 5) | (raw_id.block[3] >> 3); id->die_revision = raw_id.block[3] & 0x7; return 0; } EXPORT_SYMBOL_GPL(i2c_get_device_id); /** * i2c_client_get_device_id - get the driver match table entry of a device * @client: the device to query. The device must be bound to a driver * * Returns a pointer to the matching entry if found, NULL otherwise. */ const struct i2c_device_id *i2c_client_get_device_id(const struct i2c_client *client) { const struct i2c_driver *drv = to_i2c_driver(client->dev.driver); return i2c_match_id(drv->id_table, client); } EXPORT_SYMBOL_GPL(i2c_client_get_device_id); /* ---------------------------------------------------- * the i2c address scanning function * Will not work for 10-bit addresses! * ---------------------------------------------------- */ /* * Legacy default probe function, mostly relevant for SMBus. The default * probe method is a quick write, but it is known to corrupt the 24RF08 * EEPROMs due to a state machine bug, and could also irreversibly * write-protect some EEPROMs, so for address ranges 0x30-0x37 and 0x50-0x5f, * we use a short byte read instead. Also, some bus drivers don't implement * quick write, so we fallback to a byte read in that case too. * On x86, there is another special case for FSC hardware monitoring chips, * which want regular byte reads (address 0x73.) Fortunately, these are the * only known chips using this I2C address on PC hardware. * Returns 1 if probe succeeded, 0 if not. */ static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr) { int err; union i2c_smbus_data dummy; #ifdef CONFIG_X86 if (addr == 0x73 && (adap->class & I2C_CLASS_HWMON) && i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE_DATA)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE_DATA, &dummy); else #endif if (!((addr & ~0x07) == 0x30 || (addr & ~0x0f) == 0x50) && i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_QUICK, NULL); else if (i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy); else { dev_warn(&adap->dev, "No suitable probing method supported for address 0x%02X\n", addr); err = -EOPNOTSUPP; } return err >= 0; } static int i2c_detect_address(struct i2c_client *temp_client, struct i2c_driver *driver) { struct i2c_board_info info; struct i2c_adapter *adapter = temp_client->adapter; int addr = temp_client->addr; int err; /* Make sure the address is valid */ err = i2c_check_7bit_addr_validity_strict(addr); if (err) { dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n", addr); return err; } /* Skip if already in use (7 bit, no need to encode flags) */ if (i2c_check_addr_busy(adapter, addr)) return 0; /* Make sure there is something at this address */ if (!i2c_default_probe(adapter, addr)) return 0; /* Finally call the custom detection function */ memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = addr; err = driver->detect(temp_client, &info); if (err) { /* -ENODEV is returned if the detection fails. We catch it here as this isn't an error. */ return err == -ENODEV ? 0 : err; } /* Consistency check */ if (info.type[0] == '\0') { dev_err(&adapter->dev, "%s detection function provided no name for 0x%x\n", driver->driver.name, addr); } else { struct i2c_client *client; /* Detection succeeded, instantiate the device */ if (adapter->class & I2C_CLASS_DEPRECATED) dev_warn(&adapter->dev, "This adapter will soon drop class based instantiation of devices. " "Please make sure client 0x%02x gets instantiated by other means. " "Check 'Documentation/i2c/instantiating-devices.rst' for details.\n", info.addr); dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n", info.type, info.addr); client = i2c_new_client_device(adapter, &info); if (!IS_ERR(client)) list_add_tail(&client->detected, &driver->clients); else dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n", info.type, info.addr); } return 0; } static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) { const unsigned short *address_list; struct i2c_client *temp_client; int i, err = 0; address_list = driver->address_list; if (!driver->detect || !address_list) return 0; /* Warn that the adapter lost class based instantiation */ if (adapter->class == I2C_CLASS_DEPRECATED) { dev_dbg(&adapter->dev, "This adapter dropped support for I2C classes and won't auto-detect %s devices anymore. " "If you need it, check 'Documentation/i2c/instantiating-devices.rst' for alternatives.\n", driver->driver.name); return 0; } /* Stop here if the classes do not match */ if (!(adapter->class & driver->class)) return 0; /* Set up a temporary client to help detect callback */ temp_client = kzalloc(sizeof(*temp_client), GFP_KERNEL); if (!temp_client) return -ENOMEM; temp_client->adapter = adapter; for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { dev_dbg(&adapter->dev, "found normal entry for adapter %d, addr 0x%02x\n", i2c_adapter_id(adapter), address_list[i]); temp_client->addr = address_list[i]; err = i2c_detect_address(temp_client, driver); if (unlikely(err)) break; } kfree(temp_client); return err; } int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr) { return i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK, NULL) >= 0; } EXPORT_SYMBOL_GPL(i2c_probe_func_quick_read); struct i2c_client * i2c_new_scanned_device(struct i2c_adapter *adap, struct i2c_board_info *info, unsigned short const *addr_list, int (*probe)(struct i2c_adapter *adap, unsigned short addr)) { int i; if (!probe) probe = i2c_default_probe; for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) { /* Check address validity */ if (i2c_check_7bit_addr_validity_strict(addr_list[i]) < 0) { dev_warn(&adap->dev, "Invalid 7-bit address 0x%02x\n", addr_list[i]); continue; } /* Check address availability (7 bit, no need to encode flags) */ if (i2c_check_addr_busy(adap, addr_list[i])) { dev_dbg(&adap->dev, "Address 0x%02x already in use, not probing\n", addr_list[i]); continue; } /* Test address responsiveness */ if (probe(adap, addr_list[i])) break; } if (addr_list[i] == I2C_CLIENT_END) { dev_dbg(&adap->dev, "Probing failed, no device found\n"); return ERR_PTR(-ENODEV); } info->addr = addr_list[i]; return i2c_new_client_device(adap, info); } EXPORT_SYMBOL_GPL(i2c_new_scanned_device); struct i2c_adapter *i2c_get_adapter(int nr) { struct i2c_adapter *adapter; mutex_lock(&core_lock); adapter = idr_find(&i2c_adapter_idr, nr); if (!adapter) goto exit; if (try_module_get(adapter->owner)) get_device(&adapter->dev); else adapter = NULL; exit: mutex_unlock(&core_lock); return adapter; } EXPORT_SYMBOL(i2c_get_adapter); void i2c_put_adapter(struct i2c_adapter *adap) { if (!adap) return; module_put(adap->owner); /* Should be last, otherwise we risk use-after-free with 'adap' */ put_device(&adap->dev); } EXPORT_SYMBOL(i2c_put_adapter); /** * i2c_get_dma_safe_msg_buf() - get a DMA safe buffer for the given i2c_msg * @msg: the message to be checked * @threshold: the minimum number of bytes for which using DMA makes sense. * Should at least be 1. * * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO. * Or a valid pointer to be used with DMA. After use, release it by * calling i2c_put_dma_safe_msg_buf(). * * This function must only be called from process context! */ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) { /* also skip 0-length msgs for bogus thresholds of 0 */ if (!threshold) pr_debug("DMA buffer for addr=0x%02x with length 0 is bogus\n", msg->addr); if (msg->len < threshold || msg->len == 0) return NULL; if (msg->flags & I2C_M_DMA_SAFE) return msg->buf; pr_debug("using bounce buffer for addr=0x%02x, len=%d\n", msg->addr, msg->len); if (msg->flags & I2C_M_RD) return kzalloc(msg->len, GFP_KERNEL); else return kmemdup(msg->buf, msg->len, GFP_KERNEL); } EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); /** * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. * @msg: the message which the buffer corresponds to * @xferred: bool saying if the message was transferred */ void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) { if (!buf || buf == msg->buf) return; if (xferred && msg->flags & I2C_M_RD) memcpy(msg->buf, buf, msg->len); kfree(buf); } EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C-Bus main module"); MODULE_LICENSE("GPL"); |
18991 460 6193 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BIT_SPINLOCK_H #define __LINUX_BIT_SPINLOCK_H #include <linux/kernel.h> #include <linux/preempt.h> #include <linux/atomic.h> #include <linux/bug.h> /* * bit-based spin_lock() * * Don't use this unless you really need to: spin_lock() and spin_unlock() * are significantly faster. */ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr) { /* * Assuming the lock is uncontended, this never enters * the body of the outer loop. If it is contended, then * within the inner loop a non-atomic test is used to * busywait with less bus contention for a good time to * attempt to acquire the lock bit. */ preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) while (unlikely(test_and_set_bit_lock(bitnum, addr))) { preempt_enable(); do { cpu_relax(); } while (test_bit(bitnum, addr)); preempt_disable(); } #endif __acquire(bitlock); } /* * Return true if it was acquired */ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr) { preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) if (unlikely(test_and_set_bit_lock(bitnum, addr))) { preempt_enable(); return 0; } #endif __acquire(bitlock); return 1; } /* * bit-based spin_unlock() */ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) clear_bit_unlock(bitnum, addr); #endif preempt_enable(); __release(bitlock); } /* * bit-based spin_unlock() * non-atomic version, which can be used eg. if the bit lock itself is * protecting the rest of the flags in the word. */ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) __clear_bit_unlock(bitnum, addr); #endif preempt_enable(); __release(bitlock); } /* * Return true if the lock is held. */ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) { #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) return test_bit(bitnum, addr); #elif defined CONFIG_PREEMPT_COUNT return preempt_count(); #else return 1; #endif } #endif /* __LINUX_BIT_SPINLOCK_H */ |
17 17 9249 9245 4 4 1 4 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * * NOHZ implementation for low and high resolution timers * * Started by: Thomas Gleixner and Ingo Molnar */ #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/percpu.h> #include <linux/nmi.h> #include <linux/profile.h> #include <linux/sched/signal.h> #include <linux/sched/clock.h> #include <linux/sched/stat.h> #include <linux/sched/nohz.h> #include <linux/sched/loadavg.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> #include <linux/context_tracking.h> #include <linux/mm.h> #include <asm/irq_regs.h> #include "tick-internal.h" #include <trace/events/timer.h> /* * Per-CPU nohz control structure */ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); struct tick_sched *tick_get_tick_sched(int cpu) { return &per_cpu(tick_cpu_sched, cpu); } /* * The time when the last jiffy update happened. Write access must hold * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a * consistent view of jiffies and last_jiffies_update. */ static ktime_t last_jiffies_update; /* * Must be called with interrupts disabled ! */ static void tick_do_update_jiffies64(ktime_t now) { unsigned long ticks = 1; ktime_t delta, nextp; /* * 64-bit can do a quick check without holding the jiffies lock and * without looking at the sequence count. The smp_load_acquire() * pairs with the update done later in this function. * * 32-bit cannot do that because the store of 'tick_next_period' * consists of two 32-bit stores, and the first store could be * moved by the CPU to a random point in the future. */ if (IS_ENABLED(CONFIG_64BIT)) { if (ktime_before(now, smp_load_acquire(&tick_next_period))) return; } else { unsigned int seq; /* * Avoid contention on 'jiffies_lock' and protect the quick * check with the sequence count. */ do { seq = read_seqcount_begin(&jiffies_seq); nextp = tick_next_period; } while (read_seqcount_retry(&jiffies_seq, seq)); if (ktime_before(now, nextp)) return; } /* Quick check failed, i.e. update is required. */ raw_spin_lock(&jiffies_lock); /* * Re-evaluate with the lock held. Another CPU might have done the * update already. */ if (ktime_before(now, tick_next_period)) { raw_spin_unlock(&jiffies_lock); return; } write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, tick_next_period); if (unlikely(delta >= TICK_NSEC)) { /* Slow path for long idle sleep times */ s64 incr = TICK_NSEC; ticks += ktime_divns(delta, incr); last_jiffies_update = ktime_add_ns(last_jiffies_update, incr * ticks); } else { last_jiffies_update = ktime_add_ns(last_jiffies_update, TICK_NSEC); } /* Advance jiffies to complete the 'jiffies_seq' protected job */ jiffies_64 += ticks; /* Keep the tick_next_period variable up to date */ nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); if (IS_ENABLED(CONFIG_64BIT)) { /* * Pairs with smp_load_acquire() in the lockless quick * check above, and ensures that the update to 'jiffies_64' is * not reordered vs. the store to 'tick_next_period', neither * by the compiler nor by the CPU. */ smp_store_release(&tick_next_period, nextp); } else { /* * A plain store is good enough on 32-bit, as the quick check * above is protected by the sequence count. */ tick_next_period = nextp; } /* * Release the sequence count. calc_global_load() below is not * protected by it, but 'jiffies_lock' needs to be held to prevent * concurrent invocations. */ write_seqcount_end(&jiffies_seq); calc_global_load(); raw_spin_unlock(&jiffies_lock); update_wall_time(); } /* * Initialize and return retrieve the jiffies update. */ static ktime_t tick_init_jiffy_update(void) { ktime_t period; raw_spin_lock(&jiffies_lock); write_seqcount_begin(&jiffies_seq); /* Have we started the jiffies update yet ? */ if (last_jiffies_update == 0) { u32 rem; /* * Ensure that the tick is aligned to a multiple of * TICK_NSEC. */ div_u64_rem(tick_next_period, TICK_NSEC, &rem); if (rem) tick_next_period += TICK_NSEC - rem; last_jiffies_update = tick_next_period; } period = last_jiffies_update; write_seqcount_end(&jiffies_seq); raw_spin_unlock(&jiffies_lock); return period; } static inline int tick_sched_flag_test(struct tick_sched *ts, unsigned long flag) { return !!(ts->flags & flag); } static inline void tick_sched_flag_set(struct tick_sched *ts, unsigned long flag) { lockdep_assert_irqs_disabled(); ts->flags |= flag; } static inline void tick_sched_flag_clear(struct tick_sched *ts, unsigned long flag) { lockdep_assert_irqs_disabled(); ts->flags &= ~flag; } #define MAX_STALLED_JIFFIES 5 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) { int tick_cpu, cpu = smp_processor_id(); /* * Check if the do_timer duty was dropped. We don't care about * concurrency: This happens only when the CPU in charge went * into a long sleep. If two CPUs happen to assign themselves to * this duty, then the jiffies update is still serialized by * 'jiffies_lock'. * * If nohz_full is enabled, this should not happen because the * 'tick_do_timer_cpu' CPU never relinquishes. */ tick_cpu = READ_ONCE(tick_do_timer_cpu); if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) { #ifdef CONFIG_NO_HZ_FULL WARN_ON_ONCE(tick_nohz_full_running); #endif WRITE_ONCE(tick_do_timer_cpu, cpu); tick_cpu = cpu; } /* Check if jiffies need an update */ if (tick_cpu == cpu) tick_do_update_jiffies64(now); /* * If the jiffies update stalled for too long (timekeeper in stop_machine() * or VMEXIT'ed for several msecs), force an update. */ if (ts->last_tick_jiffies != jiffies) { ts->stalled_jiffies = 0; ts->last_tick_jiffies = READ_ONCE(jiffies); } else { if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { tick_do_update_jiffies64(now); ts->stalled_jiffies = 0; ts->last_tick_jiffies = READ_ONCE(jiffies); } } if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) ts->got_idle_tick = 1; } static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) { /* * When we are idle and the tick is stopped, we have to touch * the watchdog as we might not schedule for a really long * time. This happens on completely idle SMP systems while * waiting on the login prompt. We also increment the "start of * idle" jiffy stamp so the idle accounting adjustment we do * when we go busy again does not account too many ticks. */ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { touch_softlockup_watchdog_sched(); if (is_idle_task(current)) ts->idle_jiffies++; /* * In case the current tick fired too early past its expected * expiration, make sure we don't bypass the next clock reprogramming * to the same deadline. */ ts->next_tick = 0; } update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); } /* * We rearm the timer until we get disabled by the idle code. * Called with interrupts disabled. */ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); tick_sched_do_timer(ts, now); /* * Do not call when we are not in IRQ context and have * no valid 'regs' pointer */ if (regs) tick_sched_handle(ts, regs); else ts->next_tick = 0; /* * In dynticks mode, tick reprogram is deferred: * - to the idle task if in dynticks-idle * - to IRQ exit if in full-dynticks. */ if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED))) return HRTIMER_NORESTART; hrtimer_forward(timer, now, TICK_NSEC); return HRTIMER_RESTART; } #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; EXPORT_SYMBOL_GPL(tick_nohz_full_mask); bool tick_nohz_full_running; EXPORT_SYMBOL_GPL(tick_nohz_full_running); static atomic_t tick_dep_mask; static bool check_tick_dependency(atomic_t *dep) { int val = atomic_read(dep); if (val & TICK_DEP_MASK_POSIX_TIMER) { trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); return true; } if (val & TICK_DEP_MASK_PERF_EVENTS) { trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); return true; } if (val & TICK_DEP_MASK_SCHED) { trace_tick_stop(0, TICK_DEP_MASK_SCHED); return true; } if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); return true; } if (val & TICK_DEP_MASK_RCU) { trace_tick_stop(0, TICK_DEP_MASK_RCU); return true; } if (val & TICK_DEP_MASK_RCU_EXP) { trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP); return true; } return false; } static bool can_stop_full_tick(int cpu, struct tick_sched *ts) { lockdep_assert_irqs_disabled(); if (unlikely(!cpu_online(cpu))) return false; if (check_tick_dependency(&tick_dep_mask)) return false; if (check_tick_dependency(&ts->tick_dep_mask)) return false; if (check_tick_dependency(¤t->tick_dep_mask)) return false; if (check_tick_dependency(¤t->signal->tick_dep_mask)) return false; return true; } static void nohz_full_kick_func(struct irq_work *work) { /* Empty, the tick restart happens on tick_nohz_irq_exit() */ } static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = IRQ_WORK_INIT_HARD(nohz_full_kick_func); /* * Kick this CPU if it's full dynticks in order to force it to * re-evaluate its dependency on the tick and restart it if necessary. * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), * is NMI safe. */ static void tick_nohz_full_kick(void) { if (!tick_nohz_full_cpu(smp_processor_id())) return; irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); } /* * Kick the CPU if it's full dynticks in order to force it to * re-evaluate its dependency on the tick and restart it if necessary. */ void tick_nohz_full_kick_cpu(int cpu) { if (!tick_nohz_full_cpu(cpu)) return; irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); } static void tick_nohz_kick_task(struct task_struct *tsk) { int cpu; /* * If the task is not running, run_posix_cpu_timers() * has nothing to elapse, and an IPI can then be optimized out. * * activate_task() STORE p->tick_dep_mask * STORE p->on_rq * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) * LOCK rq->lock LOAD p->on_rq * smp_mb__after_spin_lock() * tick_nohz_task_switch() * LOAD p->tick_dep_mask * * XXX given a task picks up the dependency on schedule(), should we * only care about tasks that are currently on the CPU instead of all * that are on the runqueue? * * That is, does this want to be: task_on_cpu() / task_curr()? */ if (!sched_task_on_rq(tsk)) return; /* * If the task concurrently migrates to another CPU, * we guarantee it sees the new tick dependency upon * schedule. * * set_task_cpu(p, cpu); * STORE p->cpu = @cpu * __schedule() (switch to task 'p') * LOCK rq->lock * smp_mb__after_spin_lock() STORE p->tick_dep_mask * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) * LOAD p->tick_dep_mask LOAD p->cpu */ cpu = task_cpu(tsk); preempt_disable(); if (cpu_online(cpu)) tick_nohz_full_kick_cpu(cpu); preempt_enable(); } /* * Kick all full dynticks CPUs in order to force these to re-evaluate * their dependency on the tick and restart it if necessary. */ static void tick_nohz_full_kick_all(void) { int cpu; if (!tick_nohz_full_running) return; preempt_disable(); for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) tick_nohz_full_kick_cpu(cpu); preempt_enable(); } static void tick_nohz_dep_set_all(atomic_t *dep, enum tick_dep_bits bit) { int prev; prev = atomic_fetch_or(BIT(bit), dep); if (!prev) tick_nohz_full_kick_all(); } /* * Set a global tick dependency. Used by perf events that rely on freq and * unstable clocks. */ void tick_nohz_dep_set(enum tick_dep_bits bit) { tick_nohz_dep_set_all(&tick_dep_mask, bit); } void tick_nohz_dep_clear(enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &tick_dep_mask); } /* * Set per-CPU tick dependency. Used by scheduler and perf events in order to * manage event-throttling. */ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { int prev; struct tick_sched *ts; ts = per_cpu_ptr(&tick_cpu_sched, cpu); prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); if (!prev) { preempt_disable(); /* Perf needs local kick that is NMI safe */ if (cpu == smp_processor_id()) { tick_nohz_full_kick(); } else { /* Remote IRQ work not NMI-safe */ if (!WARN_ON_ONCE(in_nmi())) tick_nohz_full_kick_cpu(cpu); } preempt_enable(); } } EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); atomic_andnot(BIT(bit), &ts->tick_dep_mask); } EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); /* * Set a per-task tick dependency. RCU needs this. Also posix CPU timers * in order to elapse per task timers. */ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) tick_nohz_kick_task(tsk); } EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &tsk->tick_dep_mask); } EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); /* * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse * per process timers. */ void tick_nohz_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit) { int prev; struct signal_struct *sig = tsk->signal; prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); if (!prev) { struct task_struct *t; lockdep_assert_held(&tsk->sighand->siglock); __for_each_thread(sig, t) tick_nohz_kick_task(t); } } void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &sig->tick_dep_mask); } /* * Re-evaluate the need for the tick as we switch the current task. * It might need the tick due to per task/process properties: * perf events, posix CPU timers, ... */ void __tick_nohz_task_switch(void) { struct tick_sched *ts; if (!tick_nohz_full_cpu(smp_processor_id())) return; ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { if (atomic_read(¤t->tick_dep_mask) || atomic_read(¤t->signal->tick_dep_mask)) tick_nohz_full_kick(); } } /* Get the boot-time nohz CPU list from the kernel parameters. */ void __init tick_nohz_full_setup(cpumask_var_t cpumask) { alloc_bootmem_cpumask_var(&tick_nohz_full_mask); cpumask_copy(tick_nohz_full_mask, cpumask); tick_nohz_full_running = true; } bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { /* * The 'tick_do_timer_cpu' CPU handles housekeeping duty (unbound * timers, workqueues, timekeeping, ...) on behalf of full dynticks * CPUs. It must remain online when nohz full is enabled. */ if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu) return false; return true; } static int tick_nohz_cpu_down(unsigned int cpu) { return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; } void __init tick_nohz_init(void) { int cpu, ret; if (!tick_nohz_full_running) return; /* * Full dynticks uses IRQ work to drive the tick rescheduling on safe * locking contexts. But then we need IRQ work to raise its own * interrupts to avoid circular dependency on the tick. */ if (!arch_irq_work_has_interrupt()) { pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n"); cpumask_clear(tick_nohz_full_mask); tick_nohz_full_running = false; return; } if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { pr_warn("NO_HZ: Clearing %d from nohz_full range " "for timekeeping\n", cpu); cpumask_clear_cpu(cpu, tick_nohz_full_mask); } } for_each_cpu(cpu, tick_nohz_full_mask) ct_cpu_track_user(cpu); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "kernel/nohz:predown", NULL, tick_nohz_cpu_down); WARN_ON(ret < 0); pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", cpumask_pr_args(tick_nohz_full_mask)); } #endif /* #ifdef CONFIG_NO_HZ_FULL */ /* * NOHZ - aka dynamic tick functionality */ #ifdef CONFIG_NO_HZ_COMMON /* * NO HZ enabled ? */ bool tick_nohz_enabled __read_mostly = true; unsigned long tick_nohz_active __read_mostly; /* * Enable / Disable tickless mode */ static int __init setup_tick_nohz(char *str) { return (kstrtobool(str, &tick_nohz_enabled) == 0); } __setup("nohz=", setup_tick_nohz); bool tick_nohz_tick_stopped(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } bool tick_nohz_tick_stopped_cpu(int cpu) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } /** * tick_nohz_update_jiffies - update jiffies when idle was interrupted * @now: current ktime_t * * Called from interrupt entry when the CPU was idle * * In case the sched_tick was stopped on this CPU, we have to check if jiffies * must be updated. Otherwise an interrupt handler could use a stale jiffy * value. We do this unconditionally on any CPU, as we don't know whether the * CPU, which has the update task assigned, is in a long sleep. */ static void tick_nohz_update_jiffies(ktime_t now) { unsigned long flags; __this_cpu_write(tick_cpu_sched.idle_waketime, now); local_irq_save(flags); tick_do_update_jiffies64(now); local_irq_restore(flags); touch_softlockup_watchdog_sched(); } static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) { ktime_t delta; if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))) return; delta = ktime_sub(now, ts->idle_entrytime); write_seqcount_begin(&ts->idle_sleeptime_seq); if (nr_iowait_cpu(smp_processor_id()) > 0) ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); else ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); ts->idle_entrytime = now; tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_wakeup_event(); } static void tick_nohz_start_idle(struct tick_sched *ts) { write_seqcount_begin(&ts->idle_sleeptime_seq); ts->idle_entrytime = ktime_get(); tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_sleep_event(); } static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, bool compute_delta, u64 *last_update_time) { ktime_t now, idle; unsigned int seq; if (!tick_nohz_active) return -1; now = ktime_get(); if (last_update_time) *last_update_time = ktime_to_us(now); do { seq = read_seqcount_begin(&ts->idle_sleeptime_seq); if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) { ktime_t delta = ktime_sub(now, ts->idle_entrytime); idle = ktime_add(*sleeptime, delta); } else { idle = *sleeptime; } } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); return ktime_to_us(idle); } /** * get_cpu_idle_time_us - get the total idle time of a CPU * @cpu: CPU number to query * @last_update_time: variable to store update time in. Do not update * counters if NULL. * * Return the cumulative idle time (since boot) for a given * CPU, in microseconds. Note that this is partially broken due to * the counter of iowait tasks that can be remotely updated without * any synchronization. Therefore it is possible to observe backward * values within two consecutive reads. * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu */ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, !nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); /** * get_cpu_iowait_time_us - get the total iowait time of a CPU * @cpu: CPU number to query * @last_update_time: variable to store update time in. Do not update * counters if NULL. * * Return the cumulative iowait time (since boot) for a given * CPU, in microseconds. Note this is partially broken due to * the counter of iowait tasks that can be remotely updated without * any synchronization. Therefore it is possible to observe backward * values within two consecutive reads. * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu */ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) { hrtimer_cancel(&ts->sched_timer); hrtimer_set_expires(&ts->sched_timer, ts->last_tick); /* Forward the time to expire in the future */ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); } else { tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); } /* * Reset to make sure the next tick stop doesn't get fooled by past * cached clock deadline. */ ts->next_tick = 0; } static inline bool local_timer_softirq_pending(void) { return local_timers_pending() & BIT(TIMER_SOFTIRQ); } /* * Read jiffies and the time when jiffies were updated last */ u64 get_jiffies_update(unsigned long *basej) { unsigned long basejiff; unsigned int seq; u64 basemono; do { seq = read_seqcount_begin(&jiffies_seq); basemono = last_jiffies_update; basejiff = jiffies; } while (read_seqcount_retry(&jiffies_seq, seq)); *basej = basejiff; return basemono; } /** * tick_nohz_next_event() - return the clock monotonic based next event * @ts: pointer to tick_sched struct * @cpu: CPU number * * Return: * *%0 - When the next event is a maximum of TICK_NSEC in the future * and the tick is not stopped yet * *%next_event - Next event based on clock monotonic */ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { u64 basemono, next_tick, delta, expires; unsigned long basejiff; int tick_cpu; basemono = get_jiffies_update(&basejiff); ts->last_jiffies = basejiff; ts->timer_expires_base = basemono; /* * Keep the periodic tick, when RCU, architecture or irq_work * requests it. * Aside of that, check whether the local timer softirq is * pending. If so, its a bad idea to call get_next_timer_interrupt(), * because there is an already expired timer, so it will request * immediate expiry, which rearms the hardware timer with a * minimal delta, which brings us back to this place * immediately. Lather, rinse and repeat... */ if (rcu_needs_cpu() || arch_needs_cpu() || irq_work_needs_cpu() || local_timer_softirq_pending()) { next_tick = basemono + TICK_NSEC; } else { /* * Get the next pending timer. If high resolution * timers are enabled this only takes the timer wheel * timers into account. If high resolution timers are * disabled this also looks at the next expiring * hrtimer. */ next_tick = get_next_timer_interrupt(basejiff, basemono); ts->next_timer = next_tick; } /* Make sure next_tick is never before basemono! */ if (WARN_ON_ONCE(basemono > next_tick)) next_tick = basemono; /* * If the tick is due in the next period, keep it ticking or * force prod the timer. */ delta = next_tick - basemono; if (delta <= (u64)TICK_NSEC) { /* * We've not stopped the tick yet, and there's a timer in the * next period, so no point in stopping it either, bail. */ if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->timer_expires = 0; goto out; } } /* * If this CPU is the one which had the do_timer() duty last, we limit * the sleep time to the timekeeping 'max_deferment' value. * Otherwise we can sleep as long as we want. */ delta = timekeeping_max_deferment(); tick_cpu = READ_ONCE(tick_do_timer_cpu); if (tick_cpu != cpu && (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) delta = KTIME_MAX; /* Calculate the next expiry time */ if (delta < (KTIME_MAX - basemono)) expires = basemono + delta; else expires = KTIME_MAX; ts->timer_expires = min_t(u64, expires, next_tick); out: return ts->timer_expires; } static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); unsigned long basejiff = ts->last_jiffies; u64 basemono = ts->timer_expires_base; bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED); int tick_cpu; u64 expires; /* Make sure we won't be trying to stop it twice in a row. */ ts->timer_expires_base = 0; /* * Now the tick should be stopped definitely - so the timer base needs * to be marked idle as well to not miss a newly queued timer. */ expires = timer_base_try_to_set_idle(basejiff, basemono, &timer_idle); if (expires > ts->timer_expires) { /* * This path could only happen when the first timer was removed * between calculating the possible sleep length and now (when * high resolution mode is not active, timer could also be a * hrtimer). * * We have to stick to the original calculated expiry value to * not stop the tick for too long with a shallow C-state (which * was programmed by cpuidle because of an early next expiration * value). */ expires = ts->timer_expires; } /* If the timer base is not idle, retain the not yet stopped tick. */ if (!timer_idle) return; /* * If this CPU is the one which updates jiffies, then give up * the assignment and let it be taken by the CPU which runs * the tick timer next, which might be this CPU as well. If we * don't drop this here, the jiffies might be stale and * do_timer() never gets invoked. Keep track of the fact that it * was the one which had the do_timer() duty last. */ tick_cpu = READ_ONCE(tick_do_timer_cpu); if (tick_cpu == cpu) { WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE); tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST); } else if (tick_cpu != TICK_DO_TIMER_NONE) { tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST); } /* Skip reprogram of event if it's not changed */ if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { /* Sanity check: make sure clockevent is actually programmed */ if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) return; WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu " "timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick, dev->next_event, hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); } /* * tick_nohz_stop_tick() can be called several times before * tick_nohz_restart_sched_tick() is called. This happens when * interrupts arrive which do not cause a reschedule. In the first * call we save the current tick time, so we can restart the * scheduler tick in tick_nohz_restart_sched_tick(). */ if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { calc_load_nohz_start(); quiet_vmstat(); ts->last_tick = hrtimer_get_expires(&ts->sched_timer); tick_sched_flag_set(ts, TS_FLAG_STOPPED); trace_tick_stop(1, TICK_DEP_MASK_NONE); } ts->next_tick = expires; /* * If the expiration time == KTIME_MAX, then we simply stop * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) hrtimer_cancel(&ts->sched_timer); else tick_program_event(KTIME_MAX, 1); return; } if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED_HARD); } else { hrtimer_set_expires(&ts->sched_timer, expires); tick_program_event(expires, 1); } } static void tick_nohz_retain_tick(struct tick_sched *ts) { ts->timer_expires_base = 0; } #ifdef CONFIG_NO_HZ_FULL static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu) { if (tick_nohz_next_event(ts, cpu)) tick_nohz_stop_tick(ts, cpu); else tick_nohz_retain_tick(ts); } #endif /* CONFIG_NO_HZ_FULL */ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) { /* Update jiffies first */ tick_do_update_jiffies64(now); /* * Clear the timer idle flag, so we avoid IPIs on remote queueing and * the clock forward checks in the enqueue path: */ timer_clear_idle(); calc_load_nohz_stop(); touch_softlockup_watchdog_sched(); /* Cancel the scheduled timer and restore the tick: */ tick_sched_flag_clear(ts, TS_FLAG_STOPPED); tick_nohz_restart(ts, now); } static void __tick_nohz_full_update_tick(struct tick_sched *ts, ktime_t now) { #ifdef CONFIG_NO_HZ_FULL int cpu = smp_processor_id(); if (can_stop_full_tick(cpu, ts)) tick_nohz_full_stop_tick(ts, cpu); else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) tick_nohz_restart_sched_tick(ts, now); #endif } static void tick_nohz_full_update_tick(struct tick_sched *ts) { if (!tick_nohz_full_cpu(smp_processor_id())) return; if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ)) return; __tick_nohz_full_update_tick(ts, ktime_get()); } /* * A pending softirq outside an IRQ (or softirq disabled section) context * should be waiting for ksoftirqd to handle it. Therefore we shouldn't * reach this code due to the need_resched() early check in can_stop_idle_tick(). * * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, * triggering the code below, since wakep_softirqd() is ignored. * */ static bool report_idle_softirq(void) { static int ratelimit; unsigned int pending = local_softirq_pending(); if (likely(!pending)) return false; /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ if (!cpu_active(smp_processor_id())) { pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; if (!pending) return false; } if (ratelimit >= 10) return false; /* On RT, softirq handling may be waiting on some lock */ if (local_bh_blocked()) return false; pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", pending); ratelimit++; return true; } static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) { WARN_ON_ONCE(cpu_is_offline(cpu)); if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ))) return false; if (need_resched()) return false; if (unlikely(report_idle_softirq())) return false; if (tick_nohz_full_enabled()) { int tick_cpu = READ_ONCE(tick_do_timer_cpu); /* * Keep the tick alive to guarantee timekeeping progression * if there are full dynticks CPUs around */ if (tick_cpu == cpu) return false; /* Should not happen for nohz-full */ if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE)) return false; } return true; } /** * tick_nohz_idle_stop_tick - stop the idle tick from the idle task * * When the next event is more than a tick into the future, stop the idle tick */ void tick_nohz_idle_stop_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); int cpu = smp_processor_id(); ktime_t expires; /* * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the * tick timer expiration time is known already. */ if (ts->timer_expires_base) expires = ts->timer_expires; else if (can_stop_idle_tick(cpu, ts)) expires = tick_nohz_next_event(ts, cpu); else return; ts->idle_calls++; if (expires > 0LL) { int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); tick_nohz_stop_tick(ts, cpu); ts->idle_sleeps++; ts->idle_expires = expires; if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->idle_jiffies = ts->last_jiffies; nohz_balance_enter_idle(cpu); } } else { tick_nohz_retain_tick(ts); } } void tick_nohz_idle_retain_tick(void) { tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); } /** * tick_nohz_idle_enter - prepare for entering idle on the current CPU * * Called when we start the idle loop. */ void tick_nohz_idle_enter(void) { struct tick_sched *ts; lockdep_assert_irqs_enabled(); local_irq_disable(); ts = this_cpu_ptr(&tick_cpu_sched); WARN_ON_ONCE(ts->timer_expires_base); tick_sched_flag_set(ts, TS_FLAG_INIDLE); tick_nohz_start_idle(ts); local_irq_enable(); } /** * tick_nohz_irq_exit - Notify the tick about IRQ exit * * A timer may have been added/modified/deleted either by the current IRQ, * or by another place using this IRQ as a notification. This IRQ may have * also updated the RCU callback list. These events may require a * re-evaluation of the next tick. Depending on the context: * * 1) If the CPU is idle and no resched is pending, just proceed with idle * time accounting. The next tick will be re-evaluated on the next idle * loop iteration. * * 2) If the CPU is nohz_full: * * 2.1) If there is any tick dependency, restart the tick if stopped. * * 2.2) If there is no tick dependency, (re-)evaluate the next tick and * stop/update it accordingly. */ void tick_nohz_irq_exit(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) tick_nohz_start_idle(ts); else tick_nohz_full_update_tick(ts); } /** * tick_nohz_idle_got_tick - Check whether or not the tick handler has run * * Return: %true if the tick handler has run, otherwise %false */ bool tick_nohz_idle_got_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (ts->got_idle_tick) { ts->got_idle_tick = 0; return true; } return false; } /** * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer * or the tick, whichever expires first. Note that, if the tick has been * stopped, it returns the next hrtimer. * * Called from power state control code with interrupts disabled * * Return: the next expiration time */ ktime_t tick_nohz_get_next_hrtimer(void) { return __this_cpu_read(tick_cpu_device.evtdev)->next_event; } /** * tick_nohz_get_sleep_length - return the expected length of the current sleep * @delta_next: duration until the next event if the tick cannot be stopped * * Called from power state control code with interrupts disabled. * * The return value of this function and/or the value returned by it through the * @delta_next pointer can be negative which must be taken into account by its * callers. * * Return: the expected length of the current sleep */ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); int cpu = smp_processor_id(); /* * The idle entry time is expected to be a sufficient approximation of * the current time at this point. */ ktime_t now = ts->idle_entrytime; ktime_t next_event; WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); *delta_next = ktime_sub(dev->next_event, now); if (!can_stop_idle_tick(cpu, ts)) return *delta_next; next_event = tick_nohz_next_event(ts, cpu); if (!next_event) return *delta_next; /* * If the next highres timer to expire is earlier than 'next_event', the * idle governor needs to know that. */ next_event = min_t(u64, next_event, hrtimer_next_event_without(&ts->sched_timer)); return ktime_sub(next_event, now); } /** * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value * for a particular CPU. * @cpu: target CPU number * * Called from the schedutil frequency scaling governor in scheduler context. * * Return: the current idle calls counter value for @cpu */ unsigned long tick_nohz_get_idle_calls_cpu(int cpu) { struct tick_sched *ts = tick_get_tick_sched(cpu); return ts->idle_calls; } static void tick_nohz_account_idle_time(struct tick_sched *ts, ktime_t now) { unsigned long ticks; ts->idle_exittime = now; if (vtime_accounting_enabled_this_cpu()) return; /* * We stopped the tick in idle. update_process_times() would miss the * time we slept, as it does only a 1 tick accounting. * Enforce that this is accounted to idle ! */ ticks = jiffies - ts->idle_jiffies; /* * We might be one off. Do not randomly account a huge number of ticks! */ if (ticks && ticks < LONG_MAX) account_idle_ticks(ticks); } void tick_nohz_idle_restart_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ktime_t now = ktime_get(); tick_nohz_restart_sched_tick(ts, now); tick_nohz_account_idle_time(ts, now); } } static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) { if (tick_nohz_full_cpu(smp_processor_id())) __tick_nohz_full_update_tick(ts, now); else tick_nohz_restart_sched_tick(ts, now); tick_nohz_account_idle_time(ts, now); } /** * tick_nohz_idle_exit - Update the tick upon idle task exit * * When the idle task exits, update the tick depending on the * following situations: * * 1) If the CPU is not in nohz_full mode (most cases), then * restart the tick. * * 2) If the CPU is in nohz_full mode (corner case): * 2.1) If the tick can be kept stopped (no tick dependencies) * then re-evaluate the next tick and try to keep it stopped * as long as possible. * 2.2) If the tick has dependencies, restart the tick. * */ void tick_nohz_idle_exit(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); bool idle_active, tick_stopped; ktime_t now; local_irq_disable(); WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); WARN_ON_ONCE(ts->timer_expires_base); tick_sched_flag_clear(ts, TS_FLAG_INIDLE); idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE); tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); if (idle_active || tick_stopped) now = ktime_get(); if (idle_active) tick_nohz_stop_idle(ts, now); if (tick_stopped) tick_nohz_idle_update_tick(ts, now); local_irq_enable(); } /* * In low-resolution mode, the tick handler must be implemented directly * at the clockevent level. hrtimer can't be used instead, because its * infrastructure actually relies on the tick itself as a backend in * low-resolution mode (see hrtimer_run_queues()). */ static void tick_nohz_lowres_handler(struct clock_event_device *dev) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); dev->next_event = KTIME_MAX; if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); } static inline void tick_nohz_activate(struct tick_sched *ts) { if (!tick_nohz_enabled) return; tick_sched_flag_set(ts, TS_FLAG_NOHZ); /* One update is enough */ if (!test_and_set_bit(0, &tick_nohz_active)) timers_update_nohz(); } /** * tick_nohz_switch_to_nohz - switch to NOHZ mode */ static void tick_nohz_switch_to_nohz(void) { if (!tick_nohz_enabled) return; if (tick_switch_to_oneshot(tick_nohz_lowres_handler)) return; /* * Recycle the hrtimer in 'ts', so we can share the * highres code. */ tick_setup_sched_timer(false); } static inline void tick_nohz_irq_enter(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); ktime_t now; if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE)) return; now = ktime_get(); if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)) tick_nohz_stop_idle(ts, now); /* * If all CPUs are idle we may need to update a stale jiffies value. * Note nohz_full is a special case: a timekeeper is guaranteed to stay * alive but it might be busy looping with interrupts disabled in some * rare case (typically stop machine). So we must make sure we have a * last resort. */ if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) tick_nohz_update_jiffies(now); } #else static inline void tick_nohz_switch_to_nohz(void) { } static inline void tick_nohz_irq_enter(void) { } static inline void tick_nohz_activate(struct tick_sched *ts) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * Called from irq_enter() to notify about the possible interruption of idle() */ void tick_irq_enter(void) { tick_check_oneshot_broadcast_this_cpu(); tick_nohz_irq_enter(); } static int sched_skew_tick; static int __init skew_tick(char *str) { get_option(&str, &sched_skew_tick); return 0; } early_param("skew_tick", skew_tick); /** * tick_setup_sched_timer - setup the tick emulation timer * @hrtimer: whether to use the hrtimer or not */ void tick_setup_sched_timer(bool hrtimer) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); /* Emulate tick processing via per-CPU hrtimers: */ hrtimer_setup(&ts->sched_timer, tick_nohz_handler, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) tick_sched_flag_set(ts, TS_FLAG_HIGHRES); /* Get the next period (per-CPU) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); /* Offset the tick to avert 'jiffies_lock' contention. */ if (sched_skew_tick) { u64 offset = TICK_NSEC >> 1; do_div(offset, num_possible_cpus()); offset *= smp_processor_id(); hrtimer_add_expires_ns(&ts->sched_timer, offset); } hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); else tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); tick_nohz_activate(ts); } /* * Shut down the tick and make sure the CPU won't try to retake the timekeeping * duty before disabling IRQs in idle for the last time. */ void tick_sched_timer_dying(int cpu) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t idle_sleeptime, iowait_sleeptime; unsigned long idle_calls, idle_sleeps; /* This must happen before hrtimers are migrated! */ if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) hrtimer_cancel(&ts->sched_timer); idle_sleeptime = ts->idle_sleeptime; iowait_sleeptime = ts->iowait_sleeptime; idle_calls = ts->idle_calls; idle_sleeps = ts->idle_sleeps; memset(ts, 0, sizeof(*ts)); ts->idle_sleeptime = idle_sleeptime; ts->iowait_sleeptime = iowait_sleeptime; ts->idle_calls = idle_calls; ts->idle_sleeps = idle_sleeps; } /* * Async notification about clocksource changes */ void tick_clock_notify(void) { int cpu; for_each_possible_cpu(cpu) set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); } /* * Async notification about clock event changes */ void tick_oneshot_notify(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); set_bit(0, &ts->check_clocks); } /* * Check if a change happened, which makes oneshot possible. * * Called cyclically from the hrtimer softirq (driven by the timer * softirq). 'allow_nohz' signals that we can switch into low-res NOHZ * mode, because high resolution timers are disabled (either compile * or runtime). Called with interrupts disabled. */ int tick_check_oneshot_change(int allow_nohz) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (!test_and_clear_bit(0, &ts->check_clocks)) return 0; if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) return 0; if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) return 0; if (!allow_nohz) return 1; tick_nohz_switch_to_nohz(); return 0; } |
6 5 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 | // SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2007,2008 * Author(s): Steve French (sfrench@us.ibm.com) * * Contains the routines for mapping CIFS/NTFS ACLs * */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/keyctl.h> #include <linux/key-type.h> #include <uapi/linux/posix_acl.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include <keys/user-type.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "cifs_debug.h" #include "fs_context.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" /* security id for everyone/world system group */ static const struct smb_sid sid_everyone = { 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; /* security id for Authenticated Users system group */ static const struct smb_sid sid_authusers = { 1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} }; /* S-1-22-1 Unmapped Unix users */ static const struct smb_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22}, {cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-22-2 Unmapped Unix groups */ static const struct smb_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22}, {cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* * See https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ /* S-1-5-88 MS NFS and Apple style UID/GID/mode */ /* S-1-5-88-1 Unix uid */ static const struct smb_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-5-88-2 Unix gid */ static const struct smb_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* S-1-5-88-3 Unix mode */ static const struct smb_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(88), cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; static const struct cred *root_cred; static int cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { char *payload; /* * If the payload is less than or equal to the size of a pointer, then * an allocation here is wasteful. Just copy the data directly to the * payload.value union member instead. * * With this however, you must check the datalen before trying to * dereference payload.data! */ if (prep->datalen <= sizeof(key->payload)) { key->payload.data[0] = NULL; memcpy(&key->payload, prep->data, prep->datalen); } else { payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL); if (!payload) return -ENOMEM; key->payload.data[0] = payload; } key->datalen = prep->datalen; return 0; } static inline void cifs_idmap_key_destroy(struct key *key) { if (key->datalen > sizeof(key->payload)) kfree(key->payload.data[0]); } static struct key_type cifs_idmap_key_type = { .name = "cifs.idmap", .instantiate = cifs_idmap_key_instantiate, .destroy = cifs_idmap_key_destroy, .describe = user_describe, }; static char * sid_to_key_str(struct smb_sid *sidptr, unsigned int type) { int i, len; unsigned int saval; char *sidstr, *strptr; unsigned long long id_auth_val; /* 3 bytes for prefix */ sidstr = kmalloc(3 + SID_STRING_BASE_SIZE + (SID_STRING_SUBAUTH_SIZE * sidptr->num_subauth), GFP_KERNEL); if (!sidstr) return sidstr; strptr = sidstr; len = sprintf(strptr, "%cs:S-%hhu", type == SIDOWNER ? 'o' : 'g', sidptr->revision); strptr += len; /* The authority field is a single 48-bit number */ id_auth_val = (unsigned long long)sidptr->authority[5]; id_auth_val |= (unsigned long long)sidptr->authority[4] << 8; id_auth_val |= (unsigned long long)sidptr->authority[3] << 16; id_auth_val |= (unsigned long long)sidptr->authority[2] << 24; id_auth_val |= (unsigned long long)sidptr->authority[1] << 32; id_auth_val |= (unsigned long long)sidptr->authority[0] << 48; /* * MS-DTYP states that if the authority is >= 2^32, then it should be * expressed as a hex value. */ if (id_auth_val <= UINT_MAX) len = sprintf(strptr, "-%llu", id_auth_val); else len = sprintf(strptr, "-0x%llx", id_auth_val); strptr += len; for (i = 0; i < sidptr->num_subauth; ++i) { saval = le32_to_cpu(sidptr->sub_auth[i]); len = sprintf(strptr, "-%u", saval); strptr += len; } return sidstr; } /* * if the two SIDs (roughly equivalent to a UUID for a user or group) are * the same returns zero, if they do not match returns non-zero. */ static int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid) { int i; int num_subauth, num_sat, num_saw; if ((!ctsid) || (!cwsid)) return 1; /* compare the revision */ if (ctsid->revision != cwsid->revision) { if (ctsid->revision > cwsid->revision) return 1; else return -1; } /* compare all of the six auth values */ for (i = 0; i < NUM_AUTHS; ++i) { if (ctsid->authority[i] != cwsid->authority[i]) { if (ctsid->authority[i] > cwsid->authority[i]) return 1; else return -1; } } /* compare all of the subauth values if any */ num_sat = ctsid->num_subauth; num_saw = cwsid->num_subauth; num_subauth = min(num_sat, num_saw); if (num_subauth) { for (i = 0; i < num_subauth; ++i) { if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) { if (le32_to_cpu(ctsid->sub_auth[i]) > le32_to_cpu(cwsid->sub_auth[i])) return 1; else return -1; } } } return 0; /* sids compare/match */ } static bool is_well_known_sid(const struct smb_sid *psid, uint32_t *puid, bool is_group) { int i; int num_subauth; const struct smb_sid *pwell_known_sid; if (!psid || (puid == NULL)) return false; num_subauth = psid->num_subauth; /* check if Mac (or Windows NFS) vs. Samba format for Unix owner SID */ if (num_subauth == 2) { if (is_group) pwell_known_sid = &sid_unix_groups; else pwell_known_sid = &sid_unix_users; } else if (num_subauth == 3) { if (is_group) pwell_known_sid = &sid_unix_NFS_groups; else pwell_known_sid = &sid_unix_NFS_users; } else return false; /* compare the revision */ if (psid->revision != pwell_known_sid->revision) return false; /* compare all of the six auth values */ for (i = 0; i < NUM_AUTHS; ++i) { if (psid->authority[i] != pwell_known_sid->authority[i]) { cifs_dbg(FYI, "auth %d did not match\n", i); return false; } } if (num_subauth == 2) { if (psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) return false; *puid = le32_to_cpu(psid->sub_auth[1]); } else /* 3 subauths, ie Windows/Mac style */ { *puid = le32_to_cpu(psid->sub_auth[0]); if ((psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) || (psid->sub_auth[1] != pwell_known_sid->sub_auth[1])) return false; *puid = le32_to_cpu(psid->sub_auth[2]); } cifs_dbg(FYI, "Unix UID %d returned from SID\n", *puid); return true; /* well known sid found, uid returned */ } static __u16 cifs_copy_sid(struct smb_sid *dst, const struct smb_sid *src) { int i; __u16 size = 1 + 1 + 6; dst->revision = src->revision; dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES); for (i = 0; i < NUM_AUTHS; ++i) dst->authority[i] = src->authority[i]; for (i = 0; i < dst->num_subauth; ++i) dst->sub_auth[i] = src->sub_auth[i]; size += (dst->num_subauth * 4); return size; } static int id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid) { int rc; struct key *sidkey; struct smb_sid *ksid; unsigned int ksid_size; char desc[3 + 10 + 1]; /* 3 byte prefix + 10 bytes for value + NULL */ const struct cred *saved_cred; rc = snprintf(desc, sizeof(desc), "%ci:%u", sidtype == SIDOWNER ? 'o' : 'g', cid); if (rc >= sizeof(desc)) return -EINVAL; rc = 0; saved_cred = override_creds(root_cred); sidkey = request_key(&cifs_idmap_key_type, desc, ""); if (IS_ERR(sidkey)) { rc = -EINVAL; cifs_dbg(FYI, "%s: Can't map %cid %u to a SID\n", __func__, sidtype == SIDOWNER ? 'u' : 'g', cid); goto out_revert_creds; } else if (sidkey->datalen < CIFS_SID_BASE_SIZE) { rc = -EIO; cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n", __func__, sidkey->datalen); goto invalidate_key; } /* * A sid is usually too large to be embedded in payload.value, but if * there are no subauthorities and the host has 8-byte pointers, then * it could be. */ ksid = sidkey->datalen <= sizeof(sidkey->payload) ? (struct smb_sid *)&sidkey->payload : (struct smb_sid *)sidkey->payload.data[0]; ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32)); if (ksid_size > sidkey->datalen) { rc = -EIO; cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu, ksid_size=%u)\n", __func__, sidkey->datalen, ksid_size); goto invalidate_key; } cifs_copy_sid(ssid, ksid); out_key_put: key_put(sidkey); out_revert_creds: revert_creds(saved_cred); return rc; invalidate_key: key_invalidate(sidkey); goto out_key_put; } int sid_to_id(struct cifs_sb_info *cifs_sb, struct smb_sid *psid, struct cifs_fattr *fattr, uint sidtype) { int rc = 0; struct key *sidkey; char *sidstr; const struct cred *saved_cred; kuid_t fuid = cifs_sb->ctx->linux_uid; kgid_t fgid = cifs_sb->ctx->linux_gid; /* * If we have too many subauthorities, then something is really wrong. * Just return an error. */ if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) { cifs_dbg(FYI, "%s: %u subauthorities is too many!\n", __func__, psid->num_subauth); return -EIO; } if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) || (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) { uint32_t unix_id; bool is_group; if (sidtype != SIDOWNER) is_group = true; else is_group = false; if (is_well_known_sid(psid, &unix_id, is_group) == false) goto try_upcall_to_get_id; if (is_group) { kgid_t gid; gid_t id; id = (gid_t)unix_id; gid = make_kgid(&init_user_ns, id); if (gid_valid(gid)) { fgid = gid; goto got_valid_id; } } else { kuid_t uid; uid_t id; id = (uid_t)unix_id; uid = make_kuid(&init_user_ns, id); if (uid_valid(uid)) { fuid = uid; goto got_valid_id; } } /* If unable to find uid/gid easily from SID try via upcall */ } try_upcall_to_get_id: sidstr = sid_to_key_str(psid, sidtype); if (!sidstr) return -ENOMEM; saved_cred = override_creds(root_cred); sidkey = request_key(&cifs_idmap_key_type, sidstr, ""); if (IS_ERR(sidkey)) { cifs_dbg(FYI, "%s: Can't map SID %s to a %cid\n", __func__, sidstr, sidtype == SIDOWNER ? 'u' : 'g'); goto out_revert_creds; } /* * FIXME: Here we assume that uid_t and gid_t are same size. It's * probably a safe assumption but might be better to check based on * sidtype. */ BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t)); if (sidkey->datalen != sizeof(uid_t)) { cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n", __func__, sidkey->datalen); key_invalidate(sidkey); goto out_key_put; } if (sidtype == SIDOWNER) { kuid_t uid; uid_t id; memcpy(&id, &sidkey->payload.data[0], sizeof(uid_t)); uid = make_kuid(&init_user_ns, id); if (uid_valid(uid)) fuid = uid; } else { kgid_t gid; gid_t id; memcpy(&id, &sidkey->payload.data[0], sizeof(gid_t)); gid = make_kgid(&init_user_ns, id); if (gid_valid(gid)) fgid = gid; } out_key_put: key_put(sidkey); out_revert_creds: revert_creds(saved_cred); kfree(sidstr); /* * Note that we return 0 here unconditionally. If the mapping * fails then we just fall back to using the ctx->linux_uid/linux_gid. */ got_valid_id: rc = 0; if (sidtype == SIDOWNER) fattr->cf_uid = fuid; else fattr->cf_gid = fgid; return rc; } int init_cifs_idmap(void) { struct cred *cred; struct key *keyring; int ret; cifs_dbg(FYI, "Registering the %s key type\n", cifs_idmap_key_type.name); /* create an override credential set with a special thread keyring in * which requests are cached * * this is used to prevent malicious redirections from being installed * with add_key(). */ cred = prepare_kernel_cred(&init_task); if (!cred) return -ENOMEM; keyring = keyring_alloc(".cifs_idmap", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&cifs_idmap_key_type); if (ret < 0) goto failed_put_key; /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; root_cred = cred; cifs_dbg(FYI, "cifs idmap keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } void exit_cifs_idmap(void) { key_revoke(root_cred->thread_keyring); unregister_key_type(&cifs_idmap_key_type); put_cred(root_cred); cifs_dbg(FYI, "Unregistered %s key type\n", cifs_idmap_key_type.name); } /* copy ntsd, owner sid, and group sid from a security descriptor to another */ static __u32 copy_sec_desc(const struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd, __u32 sidsoffset, struct smb_sid *pownersid, struct smb_sid *pgrpsid) { struct smb_sid *owner_sid_ptr, *group_sid_ptr; struct smb_sid *nowner_sid_ptr, *ngroup_sid_ptr; /* copy security descriptor control portion */ pnntsd->revision = pntsd->revision; pnntsd->type = pntsd->type; pnntsd->dacloffset = cpu_to_le32(sizeof(struct smb_ntsd)); pnntsd->sacloffset = 0; pnntsd->osidoffset = cpu_to_le32(sidsoffset); pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct smb_sid)); /* copy owner sid */ if (pownersid) owner_sid_ptr = pownersid; else owner_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); nowner_sid_ptr = (struct smb_sid *)((char *)pnntsd + sidsoffset); cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr); /* copy group sid */ if (pgrpsid) group_sid_ptr = pgrpsid; else group_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); ngroup_sid_ptr = (struct smb_sid *)((char *)pnntsd + sidsoffset + sizeof(struct smb_sid)); cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr); return sidsoffset + (2 * sizeof(struct smb_sid)); } /* change posix mode to reflect permissions pmode is the existing mode (we only want to overwrite part of this bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007 */ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, umode_t *pdenied, umode_t mask) { __u32 flags = le32_to_cpu(ace_flags); /* * Do not assume "preferred" or "canonical" order. * The first DENY or ALLOW ACE which matches perfectly is * the permission to be used. Once allowed or denied, same * permission in later ACEs do not matter. */ /* If not already allowed, deny these bits */ if (type == ACCESS_DENIED) { if (flags & GENERIC_ALL && !(*pmode & mask & 0777)) *pdenied |= mask & 0777; if (((flags & GENERIC_WRITE) || ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) && !(*pmode & mask & 0222)) *pdenied |= mask & 0222; if (((flags & GENERIC_READ) || ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) && !(*pmode & mask & 0444)) *pdenied |= mask & 0444; if (((flags & GENERIC_EXECUTE) || ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) && !(*pmode & mask & 0111)) *pdenied |= mask & 0111; return; } else if (type != ACCESS_ALLOWED) { cifs_dbg(VFS, "unknown access control type %d\n", type); return; } /* else ACCESS_ALLOWED type */ if ((flags & GENERIC_ALL) && !(*pdenied & mask & 0777)) { *pmode |= mask & 0777; cifs_dbg(NOISY, "all perms\n"); return; } if (((flags & GENERIC_WRITE) || ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) && !(*pdenied & mask & 0222)) *pmode |= mask & 0222; if (((flags & GENERIC_READ) || ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) && !(*pdenied & mask & 0444)) *pmode |= mask & 0444; if (((flags & GENERIC_EXECUTE) || ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) && !(*pdenied & mask & 0111)) *pmode |= mask & 0111; /* If DELETE_CHILD is set only on an owner ACE, set sticky bit */ if (flags & FILE_DELETE_CHILD) { if (mask == ACL_OWNER_MASK) { if (!(*pdenied & 01000)) *pmode |= 01000; } else if (!(*pdenied & 01000)) { *pmode &= ~01000; *pdenied |= 01000; } } cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode); return; } /* Generate access flags to reflect permissions mode is the existing mode. This function is called for every ACE in the DACL whose SID matches with either owner or group or everyone. */ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, __u32 *pace_flags) { /* reset access mask */ *pace_flags = 0x0; /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */ mode &= bits_to_use; /* check for R/W/X UGO since we do not know whose flags is this but we have cleared all the bits sans RWX for either user or group or other as per bits_to_use */ if (mode & S_IRUGO) *pace_flags |= SET_FILE_READ_RIGHTS; if (mode & S_IWUGO) *pace_flags |= SET_FILE_WRITE_RIGHTS; if (mode & S_IXUGO) *pace_flags |= SET_FILE_EXEC_RIGHTS; cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n", mode, *pace_flags); return; } static __u16 cifs_copy_ace(struct smb_ace *dst, struct smb_ace *src, struct smb_sid *psid) { __u16 size = 1 + 1 + 2 + 4; dst->type = src->type; dst->flags = src->flags; dst->access_req = src->access_req; /* Check if there's a replacement sid specified */ if (psid) size += cifs_copy_sid(&dst->sid, psid); else size += cifs_copy_sid(&dst->sid, &src->sid); dst->size = cpu_to_le16(size); return size; } static __u16 fill_ace_for_sid(struct smb_ace *pntace, const struct smb_sid *psid, __u64 nmode, umode_t bits, __u8 access_type, bool allow_delete_child) { int i; __u16 size = 0; __u32 access_req = 0; pntace->type = access_type; pntace->flags = 0x0; mode_to_access_flags(nmode, bits, &access_req); if (access_type == ACCESS_ALLOWED && allow_delete_child) access_req |= FILE_DELETE_CHILD; if (access_type == ACCESS_ALLOWED && !access_req) access_req = SET_MINIMUM_RIGHTS; else if (access_type == ACCESS_DENIED) access_req &= ~SET_MINIMUM_RIGHTS; pntace->access_req = cpu_to_le32(access_req); pntace->sid.revision = psid->revision; pntace->sid.num_subauth = psid->num_subauth; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = psid->authority[i]; for (i = 0; i < psid->num_subauth; i++) pntace->sid.sub_auth[i] = psid->sub_auth[i]; size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4); pntace->size = cpu_to_le16(size); return size; } #ifdef CONFIG_CIFS_DEBUG2 static void dump_ace(struct smb_ace *pace, char *end_of_acl) { int num_subauth; /* validate that we do not go past end of acl */ if (le16_to_cpu(pace->size) < 16) { cifs_dbg(VFS, "ACE too small %d\n", le16_to_cpu(pace->size)); return; } if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) { cifs_dbg(VFS, "ACL too small to parse ACE\n"); return; } num_subauth = pace->sid.num_subauth; if (num_subauth) { int i; cifs_dbg(FYI, "ACE revision %d num_auth %d type %d flags %d size %d\n", pace->sid.revision, pace->sid.num_subauth, pace->type, pace->flags, le16_to_cpu(pace->size)); for (i = 0; i < num_subauth; ++i) { cifs_dbg(FYI, "ACE sub_auth[%d]: 0x%x\n", i, le32_to_cpu(pace->sid.sub_auth[i])); } /* BB add length check to make sure that we do not have huge num auths and therefore go off the end */ } return; } #endif static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, struct smb_sid *pownersid, struct smb_sid *pgrpsid, struct cifs_fattr *fattr, bool mode_from_special_sid) { int i; u16 num_aces = 0; int acl_size; char *acl_base; struct smb_ace **ppace; /* BB need to add parm so we can store the SID BB */ if (!pdacl) { /* no DACL in the security descriptor, set all the permissions for user/group/other */ fattr->cf_mode |= 0777; return; } /* validate that we do not go past end of acl */ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) || end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { cifs_dbg(VFS, "ACL too small to parse DACL\n"); return; } cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n", le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), le16_to_cpu(pdacl->num_aces)); /* reset rwx permissions for user/group/other. Also, if num_aces is 0 i.e. DACL has no ACEs, user/group/other have no permissions */ fattr->cf_mode &= ~(0777); acl_base = (char *)pdacl; acl_size = sizeof(struct smb_acl); num_aces = le16_to_cpu(pdacl->num_aces); if (num_aces > 0) { umode_t denied_mode = 0; if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) / (offsetof(struct smb_ace, sid) + offsetof(struct smb_sid, sub_auth) + sizeof(__le16))) return; ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL); if (!ppace) return; for (i = 0; i < num_aces; ++i) { if (end_of_acl - acl_base < acl_size) break; ppace[i] = (struct smb_ace *) (acl_base + acl_size); acl_base = (char *)ppace[i]; acl_size = offsetof(struct smb_ace, sid) + offsetof(struct smb_sid, sub_auth); if (end_of_acl - acl_base < acl_size || ppace[i]->sid.num_subauth == 0 || ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || (end_of_acl - acl_base < acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || (le16_to_cpu(ppace[i]->size) < acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth)) break; #ifdef CONFIG_CIFS_DEBUG2 dump_ace(ppace[i], end_of_acl); #endif if (mode_from_special_sid && (compare_sids(&(ppace[i]->sid), &sid_unix_NFS_mode) == 0)) { /* * Full permissions are: * 07777 = S_ISUID | S_ISGID | S_ISVTX | * S_IRWXU | S_IRWXG | S_IRWXO */ fattr->cf_mode &= ~07777; fattr->cf_mode |= le32_to_cpu(ppace[i]->sid.sub_auth[2]); break; } else { if (compare_sids(&(ppace[i]->sid), pownersid) == 0) { access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, &fattr->cf_mode, &denied_mode, ACL_OWNER_MASK); } else if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0) { access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, &fattr->cf_mode, &denied_mode, ACL_GROUP_MASK); } else if ((compare_sids(&(ppace[i]->sid), &sid_everyone) == 0) || (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)) { access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, &fattr->cf_mode, &denied_mode, ACL_EVERYONE_MASK); } } /* memcpy((void *)(&(cifscred->aces[i])), (void *)ppace[i], sizeof(struct smb_ace)); */ acl_size = le16_to_cpu(ppace[i]->size); } kfree(ppace); } return; } unsigned int setup_authusers_ACE(struct smb_ace *pntace) { int i; unsigned int ace_size = 20; pntace->type = ACCESS_ALLOWED_ACE_TYPE; pntace->flags = 0x0; pntace->access_req = cpu_to_le32(GENERIC_ALL); pntace->sid.num_subauth = 1; pntace->sid.revision = 1; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = sid_authusers.authority[i]; pntace->sid.sub_auth[0] = sid_authusers.sub_auth[0]; /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */ pntace->size = cpu_to_le16(ace_size); return ace_size; } /* * Fill in the special SID based on the mode. See * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ unsigned int setup_special_mode_ACE(struct smb_ace *pntace, bool posix, __u64 nmode) { int i; unsigned int ace_size = 28; if (posix) pntace->type = ACCESS_ALLOWED_ACE_TYPE; else pntace->type = ACCESS_DENIED_ACE_TYPE; pntace->flags = 0x0; pntace->access_req = 0; pntace->sid.num_subauth = 3; pntace->sid.revision = 1; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = sid_unix_NFS_mode.authority[i]; pntace->sid.sub_auth[0] = sid_unix_NFS_mode.sub_auth[0]; pntace->sid.sub_auth[1] = sid_unix_NFS_mode.sub_auth[1]; pntace->sid.sub_auth[2] = cpu_to_le32(nmode & 07777); /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */ pntace->size = cpu_to_le16(ace_size); return ace_size; } unsigned int setup_special_user_owner_ACE(struct smb_ace *pntace) { int i; unsigned int ace_size = 28; pntace->type = ACCESS_ALLOWED_ACE_TYPE; pntace->flags = 0x0; pntace->access_req = cpu_to_le32(GENERIC_ALL); pntace->sid.num_subauth = 3; pntace->sid.revision = 1; for (i = 0; i < NUM_AUTHS; i++) pntace->sid.authority[i] = sid_unix_NFS_users.authority[i]; pntace->sid.sub_auth[0] = sid_unix_NFS_users.sub_auth[0]; pntace->sid.sub_auth[1] = sid_unix_NFS_users.sub_auth[1]; pntace->sid.sub_auth[2] = cpu_to_le32(current_fsgid().val); /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */ pntace->size = cpu_to_le16(ace_size); return ace_size; } static void populate_new_aces(char *nacl_base, struct smb_sid *pownersid, struct smb_sid *pgrpsid, __u64 *pnmode, u16 *pnum_aces, u16 *pnsize, bool modefromsid, bool posix) { __u64 nmode; u16 num_aces = 0; u16 nsize = 0; __u64 user_mode; __u64 group_mode; __u64 other_mode; __u64 deny_user_mode = 0; __u64 deny_group_mode = 0; bool sticky_set = false; struct smb_ace *pnntace = NULL; nmode = *pnmode; num_aces = *pnum_aces; nsize = *pnsize; if (modefromsid || posix) { pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += setup_special_mode_ACE(pnntace, posix, nmode); num_aces++; if (modefromsid) { pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += setup_authusers_ACE(pnntace); num_aces++; } goto set_size; } /* * We'll try to keep the mode as requested by the user. * But in cases where we cannot meaningfully convert that * into ACL, return back the updated mode, so that it is * updated in the inode. */ if (!memcmp(pownersid, pgrpsid, sizeof(struct smb_sid))) { /* * Case when owner and group SIDs are the same. * Set the more restrictive of the two modes. */ user_mode = nmode & (nmode << 3) & 0700; group_mode = nmode & (nmode >> 3) & 0070; } else { user_mode = nmode & 0700; group_mode = nmode & 0070; } other_mode = nmode & 0007; /* We need DENY ACE when the perm is more restrictive than the next sets. */ deny_user_mode = ~(user_mode) & ((group_mode << 3) | (other_mode << 6)) & 0700; deny_group_mode = ~(group_mode) & (other_mode << 3) & 0070; *pnmode = user_mode | group_mode | other_mode | (nmode & ~0777); /* This tells if we should allow delete child for group and everyone. */ if (nmode & 01000) sticky_set = true; if (deny_user_mode) { pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pownersid, deny_user_mode, 0700, ACCESS_DENIED, false); num_aces++; } /* Group DENY ACE does not conflict with owner ALLOW ACE. Keep in preferred order*/ if (deny_group_mode && !(deny_group_mode & (user_mode >> 3))) { pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode, 0070, ACCESS_DENIED, false); num_aces++; } pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pownersid, user_mode, 0700, ACCESS_ALLOWED, true); num_aces++; /* Group DENY ACE conflicts with owner ALLOW ACE. So keep it after. */ if (deny_group_mode && (deny_group_mode & (user_mode >> 3))) { pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode, 0070, ACCESS_DENIED, false); num_aces++; } pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, pgrpsid, group_mode, 0070, ACCESS_ALLOWED, !sticky_set); num_aces++; pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += fill_ace_for_sid(pnntace, &sid_everyone, other_mode, 0007, ACCESS_ALLOWED, !sticky_set); num_aces++; set_size: *pnum_aces = num_aces; *pnsize = nsize; } static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *pndacl, struct smb_sid *pownersid, struct smb_sid *pgrpsid, struct smb_sid *pnownersid, struct smb_sid *pngrpsid) { int i; u16 size = 0; struct smb_ace *pntace = NULL; char *acl_base = NULL; u16 src_num_aces = 0; u16 nsize = 0; struct smb_ace *pnntace = NULL; char *nacl_base = NULL; u16 ace_size = 0; acl_base = (char *)pdacl; size = sizeof(struct smb_acl); src_num_aces = le16_to_cpu(pdacl->num_aces); nacl_base = (char *)pndacl; nsize = sizeof(struct smb_acl); /* Go through all the ACEs */ for (i = 0; i < src_num_aces; ++i) { pntace = (struct smb_ace *) (acl_base + size); pnntace = (struct smb_ace *) (nacl_base + nsize); if (pnownersid && compare_sids(&pntace->sid, pownersid) == 0) ace_size = cifs_copy_ace(pnntace, pntace, pnownersid); else if (pngrpsid && compare_sids(&pntace->sid, pgrpsid) == 0) ace_size = cifs_copy_ace(pnntace, pntace, pngrpsid); else ace_size = cifs_copy_ace(pnntace, pntace, NULL); size += le16_to_cpu(pntace->size); nsize += ace_size; } return nsize; } static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl, struct smb_sid *pownersid, struct smb_sid *pgrpsid, __u64 *pnmode, bool mode_from_sid, bool posix) { int i; u16 size = 0; struct smb_ace *pntace = NULL; char *acl_base = NULL; u16 src_num_aces = 0; u16 nsize = 0; struct smb_ace *pnntace = NULL; char *nacl_base = NULL; u16 num_aces = 0; bool new_aces_set = false; /* Assuming that pndacl and pnmode are never NULL */ nacl_base = (char *)pndacl; nsize = sizeof(struct smb_acl); /* If pdacl is NULL, we don't have a src. Simply populate new ACL. */ if (!pdacl || posix) { populate_new_aces(nacl_base, pownersid, pgrpsid, pnmode, &num_aces, &nsize, mode_from_sid, posix); goto finalize_dacl; } acl_base = (char *)pdacl; size = sizeof(struct smb_acl); src_num_aces = le16_to_cpu(pdacl->num_aces); /* Retain old ACEs which we can retain */ for (i = 0; i < src_num_aces; ++i) { pntace = (struct smb_ace *) (acl_base + size); if (!new_aces_set && (pntace->flags & INHERITED_ACE)) { /* Place the new ACEs in between existing explicit and inherited */ populate_new_aces(nacl_base, pownersid, pgrpsid, pnmode, &num_aces, &nsize, mode_from_sid, posix); new_aces_set = true; } /* If it's any one of the ACE we're replacing, skip! */ if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) || (compare_sids(&pntace->sid, pownersid) == 0) || (compare_sids(&pntace->sid, pgrpsid) == 0) || (compare_sids(&pntace->sid, &sid_everyone) == 0) || (compare_sids(&pntace->sid, &sid_authusers) == 0))) { goto next_ace; } /* update the pointer to the next ACE to populate*/ pnntace = (struct smb_ace *) (nacl_base + nsize); nsize += cifs_copy_ace(pnntace, pntace, NULL); num_aces++; next_ace: size += le16_to_cpu(pntace->size); } /* If inherited ACEs are not present, place the new ones at the tail */ if (!new_aces_set) { populate_new_aces(nacl_base, pownersid, pgrpsid, pnmode, &num_aces, &nsize, mode_from_sid, posix); new_aces_set = true; } finalize_dacl: pndacl->num_aces = cpu_to_le16(num_aces); pndacl->size = cpu_to_le16(nsize); return 0; } static int parse_sid(struct smb_sid *psid, char *end_of_acl) { /* BB need to add parm so we can store the SID BB */ /* validate that we do not go past end of ACL - sid must be at least 8 bytes long (assuming no sub-auths - e.g. the null SID */ if (end_of_acl < (char *)psid + 8) { cifs_dbg(VFS, "ACL too small to parse SID %p\n", psid); return -EINVAL; } #ifdef CONFIG_CIFS_DEBUG2 if (psid->num_subauth) { int i; cifs_dbg(FYI, "SID revision %d num_auth %d\n", psid->revision, psid->num_subauth); for (i = 0; i < psid->num_subauth; i++) { cifs_dbg(FYI, "SID sub_auth[%d]: 0x%x\n", i, le32_to_cpu(psid->sub_auth[i])); } /* BB add length check to make sure that we do not have huge num auths and therefore go off the end */ cifs_dbg(FYI, "RID 0x%x\n", le32_to_cpu(psid->sub_auth[psid->num_subauth-1])); } #endif return 0; } /* Convert CIFS ACL to POSIX form */ static int parse_sec_desc(struct cifs_sb_info *cifs_sb, struct smb_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr, bool get_mode_from_special_sid) { int rc = 0; struct smb_sid *owner_sid_ptr, *group_sid_ptr; struct smb_acl *dacl_ptr; /* no need for SACL ptr */ char *end_of_acl = ((char *)pntsd) + acl_len; __u32 dacloffset; if (pntsd == NULL) return -EIO; owner_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); group_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); dacloffset = le32_to_cpu(pntsd->dacloffset); dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset); cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n", pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset), le32_to_cpu(pntsd->gsidoffset), le32_to_cpu(pntsd->sacloffset), dacloffset); /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */ rc = parse_sid(owner_sid_ptr, end_of_acl); if (rc) { cifs_dbg(FYI, "%s: Error %d parsing Owner SID\n", __func__, rc); return rc; } rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER); if (rc) { cifs_dbg(FYI, "%s: Error %d mapping Owner SID to uid\n", __func__, rc); return rc; } rc = parse_sid(group_sid_ptr, end_of_acl); if (rc) { cifs_dbg(FYI, "%s: Error %d mapping Owner SID to gid\n", __func__, rc); return rc; } rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP); if (rc) { cifs_dbg(FYI, "%s: Error %d mapping Group SID to gid\n", __func__, rc); return rc; } if (dacloffset) parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr, group_sid_ptr, fattr, get_mode_from_special_sid); else cifs_dbg(FYI, "no ACL\n"); /* BB grant all or default perms? */ return rc; } /* Convert permission bits from mode to equivalent CIFS ACL */ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd, __u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid, bool mode_from_sid, bool id_from_sid, bool posix, int *aclflag) { int rc = 0; __u32 dacloffset; __u32 ndacloffset; __u32 sidsoffset; struct smb_sid *owner_sid_ptr, *group_sid_ptr; struct smb_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL; struct smb_acl *dacl_ptr = NULL; /* no need for SACL ptr */ struct smb_acl *ndacl_ptr = NULL; /* no need for SACL ptr */ char *end_of_acl = ((char *)pntsd) + secdesclen; u16 size = 0; dacloffset = le32_to_cpu(pntsd->dacloffset); if (dacloffset) { dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset); if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) { cifs_dbg(VFS, "Server returned illegal ACL size\n"); return -EINVAL; } } owner_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); group_sid_ptr = (struct smb_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */ ndacloffset = sizeof(struct smb_ntsd); ndacl_ptr = (struct smb_acl *)((char *)pnntsd + ndacloffset); ndacl_ptr->revision = dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION); ndacl_ptr->size = cpu_to_le16(0); ndacl_ptr->num_aces = cpu_to_le16(0); rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr, pnmode, mode_from_sid, posix); sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size); /* copy the non-dacl portion of secdesc */ *pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset, NULL, NULL); *aclflag |= CIFS_ACL_DACL; } else { ndacloffset = sizeof(struct smb_ntsd); ndacl_ptr = (struct smb_acl *)((char *)pnntsd + ndacloffset); ndacl_ptr->revision = dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION); ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0; if (uid_valid(uid)) { /* chown */ uid_t id; nowner_sid_ptr = kzalloc(sizeof(struct smb_sid), GFP_KERNEL); if (!nowner_sid_ptr) { rc = -ENOMEM; goto chown_chgrp_exit; } id = from_kuid(&init_user_ns, uid); if (id_from_sid) { struct owner_sid *osid = (struct owner_sid *)nowner_sid_ptr; /* Populate the user ownership fields S-1-5-88-1 */ osid->Revision = 1; osid->NumAuth = 3; osid->Authority[5] = 5; osid->SubAuthorities[0] = cpu_to_le32(88); osid->SubAuthorities[1] = cpu_to_le32(1); osid->SubAuthorities[2] = cpu_to_le32(id); } else { /* lookup sid with upcall */ rc = id_to_sid(id, SIDOWNER, nowner_sid_ptr); if (rc) { cifs_dbg(FYI, "%s: Mapping error %d for owner id %d\n", __func__, rc, id); goto chown_chgrp_exit; } } *aclflag |= CIFS_ACL_OWNER; } if (gid_valid(gid)) { /* chgrp */ gid_t id; ngroup_sid_ptr = kzalloc(sizeof(struct smb_sid), GFP_KERNEL); if (!ngroup_sid_ptr) { rc = -ENOMEM; goto chown_chgrp_exit; } id = from_kgid(&init_user_ns, gid); if (id_from_sid) { struct owner_sid *gsid = (struct owner_sid *)ngroup_sid_ptr; /* Populate the group ownership fields S-1-5-88-2 */ gsid->Revision = 1; gsid->NumAuth = 3; gsid->Authority[5] = 5; gsid->SubAuthorities[0] = cpu_to_le32(88); gsid->SubAuthorities[1] = cpu_to_le32(2); gsid->SubAuthorities[2] = cpu_to_le32(id); } else { /* lookup sid with upcall */ rc = id_to_sid(id, SIDGROUP, ngroup_sid_ptr); if (rc) { cifs_dbg(FYI, "%s: Mapping error %d for group id %d\n", __func__, rc, id); goto chown_chgrp_exit; } } *aclflag |= CIFS_ACL_GROUP; } if (dacloffset) { /* Replace ACEs for old owner with new one */ size = replace_sids_and_copy_aces(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr, nowner_sid_ptr, ngroup_sid_ptr); ndacl_ptr->size = cpu_to_le16(size); } sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size); /* copy the non-dacl portion of secdesc */ *pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset, nowner_sid_ptr, ngroup_sid_ptr); chown_chgrp_exit: /* errors could jump here. So make sure we return soon after this */ kfree(nowner_sid_ptr); kfree(ngroup_sid_ptr); } return rc; } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, const struct cifs_fid *cifsfid, u32 *pacllen, u32 info) { struct smb_ntsd *pntsd = NULL; unsigned int xid; int rc; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return ERR_CAST(tlink); xid = get_xid(); rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd, pacllen, info); free_xid(xid); cifs_put_tlink(tlink); cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); if (rc) return ERR_PTR(rc); return pntsd; } static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, u32 *pacllen, u32 info) { struct smb_ntsd *pntsd = NULL; int oplock = 0; unsigned int xid; int rc; struct cifs_tcon *tcon; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct cifs_fid fid; struct cifs_open_parms oparms; if (IS_ERR(tlink)) return ERR_CAST(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = READ_CONTROL, .create_options = cifs_create_options(cifs_sb, 0), .disposition = FILE_OPEN, .path = path, .fid = &fid, }; if (info & SACL_SECINFO) oparms.desired_access |= SYSTEM_SECURITY; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (!rc) { rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen, info); CIFSSMBClose(xid, tcon, fid.netfid); } cifs_put_tlink(tlink); free_xid(xid); cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen); if (rc) return ERR_PTR(rc); return pntsd; } /* Retrieve an ACL from the server */ struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, struct inode *inode, const char *path, u32 *pacllen, u32 info) { struct smb_ntsd *pntsd = NULL; struct cifsFileInfo *open_file = NULL; if (inode) open_file = find_readable_file(CIFS_I(inode), true); if (!open_file) return get_cifs_acl_by_path(cifs_sb, path, pacllen, info); pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info); cifsFileInfo_put(open_file); return pntsd; } /* Set an ACL on the server */ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen, struct inode *inode, const char *path, int aclflag) { int oplock = 0; unsigned int xid; int rc, access_flags = 0; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct cifs_fid fid; struct cifs_open_parms oparms; if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP) access_flags |= WRITE_OWNER; if (aclflag & CIFS_ACL_SACL) access_flags |= SYSTEM_SECURITY; if (aclflag & CIFS_ACL_DACL) access_flags |= WRITE_DAC; oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, .desired_access = access_flags, .create_options = cifs_create_options(cifs_sb, 0), .disposition = FILE_OPEN, .path = path, .fid = &fid, }; rc = CIFS_open(xid, &oparms, &oplock, NULL); if (rc) { cifs_dbg(VFS, "Unable to open file to set ACL\n"); goto out; } rc = CIFSSMBSetCIFSACL(xid, tcon, fid.netfid, pnntsd, acllen, aclflag); cifs_dbg(NOISY, "SetCIFSACL rc = %d\n", rc); CIFSSMBClose(xid, tcon, fid.netfid); out: free_xid(xid); cifs_put_tlink(tlink); return rc; } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */ int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, struct inode *inode, bool mode_from_special_sid, const char *path, const struct cifs_fid *pfid) { struct smb_ntsd *pntsd = NULL; u32 acllen = 0; int rc = 0; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct smb_version_operations *ops; const u32 info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO; cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); if (IS_ERR(tlink)) return PTR_ERR(tlink); ops = tlink_tcon(tlink)->ses->server->ops; if (pfid && (ops->get_acl_by_fid)) pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen, info); else if (ops->get_acl) pntsd = ops->get_acl(cifs_sb, inode, path, &acllen, info); else { cifs_put_tlink(tlink); return -EOPNOTSUPP; } /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ if (IS_ERR(pntsd)) { rc = PTR_ERR(pntsd); cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); } else if (mode_from_special_sid) { rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true); kfree(pntsd); } else { /* get approximated mode from ACL */ rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false); kfree(pntsd); if (rc) cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); } cifs_put_tlink(tlink); return rc; } /* Convert mode bits to an ACL so we can update the ACL on the server */ int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode, kuid_t uid, kgid_t gid) { int rc = 0; int aclflag = CIFS_ACL_DACL; /* default flag to set */ __u32 secdesclen = 0; __u32 nsecdesclen = 0; __u32 dacloffset = 0; struct smb_acl *dacl_ptr = NULL; struct smb_ntsd *pntsd = NULL; /* acl obtained from server */ struct smb_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; struct smb_version_operations *ops; bool mode_from_sid, id_from_sid; const u32 info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO; bool posix; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); posix = tlink_tcon(tlink)->posix_extensions; ops = tlink_tcon(tlink)->ses->server->ops; cifs_dbg(NOISY, "set ACL from mode for %s\n", path); /* Get the security descriptor */ if (ops->get_acl == NULL) { cifs_put_tlink(tlink); return -EOPNOTSUPP; } pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen, info); if (IS_ERR(pntsd)) { rc = PTR_ERR(pntsd); cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); cifs_put_tlink(tlink); return rc; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) mode_from_sid = true; else mode_from_sid = false; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) id_from_sid = true; else id_from_sid = false; /* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */ if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */ if (posix) nsecdesclen = 1 * sizeof(struct smb_ace); else if (mode_from_sid) nsecdesclen = secdesclen + (2 * sizeof(struct smb_ace)); else /* cifsacl */ nsecdesclen = secdesclen + (5 * sizeof(struct smb_ace)); } else { /* chown */ /* When ownership changes, changes new owner sid length could be different */ nsecdesclen = sizeof(struct smb_ntsd) + (sizeof(struct smb_sid) * 2); dacloffset = le32_to_cpu(pntsd->dacloffset); if (dacloffset) { dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset); if (mode_from_sid) nsecdesclen += le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace); else /* cifsacl */ nsecdesclen += le16_to_cpu(dacl_ptr->size); } } /* * Add three ACEs for owner, group, everyone getting rid of other ACEs * as chmod disables ACEs and set the security descriptor. Allocate * memory for the smb header, set security descriptor request security * descriptor parameters, and security descriptor itself */ nsecdesclen = max_t(u32, nsecdesclen, DEFAULT_SEC_DESC_LEN); pnntsd = kmalloc(nsecdesclen, GFP_KERNEL); if (!pnntsd) { kfree(pntsd); cifs_put_tlink(tlink); return -ENOMEM; } rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid, mode_from_sid, id_from_sid, posix, &aclflag); cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); if (ops->set_acl == NULL) rc = -EOPNOTSUPP; if (!rc) { /* Set the security descriptor */ rc = ops->set_acl(pnntsd, nsecdesclen, inode, path, aclflag); cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); } cifs_put_tlink(tlink); kfree(pnntsd); kfree(pntsd); return rc; } struct posix_acl *cifs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, int type) { #if defined(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) && defined(CONFIG_CIFS_POSIX) struct posix_acl *acl = NULL; ssize_t rc = -EOPNOTSUPP; unsigned int xid; struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return ERR_CAST(tlink); pTcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { acl = ERR_CAST(full_path); goto out; } /* return alt name if available as pseudo attr */ switch (type) { case ACL_TYPE_ACCESS: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_get_acl(xid, pTcon, full_path, &acl, ACL_TYPE_ACCESS, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; case ACL_TYPE_DEFAULT: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_get_acl(xid, pTcon, full_path, &acl, ACL_TYPE_DEFAULT, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; } if (rc < 0) { if (rc == -EINVAL) acl = ERR_PTR(-EOPNOTSUPP); else acl = ERR_PTR(rc); } out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return acl; #else return ERR_PTR(-EOPNOTSUPP); #endif } int cifs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type) { #if defined(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) && defined(CONFIG_CIFS_POSIX) int rc = -EOPNOTSUPP; unsigned int xid; struct super_block *sb = dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; void *page; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); pTcon = tlink_tcon(tlink); xid = get_xid(); page = alloc_dentry_path(); full_path = build_path_from_dentry(dentry, page); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto out; } if (!acl) goto out; /* return dos attributes as pseudo xattr */ /* return alt name if available as pseudo attr */ /* if proc/fs/cifs/streamstoxattr is set then search server for EAs or streams to returns as xattrs */ if (posix_acl_xattr_size(acl->a_count) > CIFSMaxBufSize) { cifs_dbg(FYI, "size of EA value too large\n"); rc = -EOPNOTSUPP; goto out; } switch (type) { case ACL_TYPE_ACCESS: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_set_acl(xid, pTcon, full_path, acl, ACL_TYPE_ACCESS, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; case ACL_TYPE_DEFAULT: if (sb->s_flags & SB_POSIXACL) rc = cifs_do_set_acl(xid, pTcon, full_path, acl, ACL_TYPE_DEFAULT, cifs_sb->local_nls, cifs_remap(cifs_sb)); break; } out: free_dentry_path(page); free_xid(xid); cifs_put_tlink(tlink); return rc; #else return -EOPNOTSUPP; #endif } |
3012 3014 19 26 3009 3014 3009 26 4883 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | // SPDX-License-Identifier: GPL-2.0 /* * USB-ACPI glue code * * Copyright 2012 Red Hat <mjg@redhat.com> */ #include <linux/module.h> #include <linux/usb.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/pci.h> #include <linux/usb/hcd.h> #include "hub.h" /** * usb_acpi_power_manageable - check whether usb port has * acpi power resource. * @hdev: USB device belonging to the usb hub * @index: port index based zero * * Return true if the port has acpi power resource and false if no. */ bool usb_acpi_power_manageable(struct usb_device *hdev, int index) { acpi_handle port_handle; int port1 = index + 1; port_handle = usb_get_hub_port_acpi_handle(hdev, port1); if (port_handle) return acpi_bus_power_manageable(port_handle); else return false; } EXPORT_SYMBOL_GPL(usb_acpi_power_manageable); #define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899" #define USB_DSM_DISABLE_U1_U2_FOR_PORT 5 /** * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port. * @hdev: USB device belonging to the usb hub * @index: zero based port index * * Some USB3 ports may not support USB3 link power management U1/U2 states * due to different retimer setup. ACPI provides _DSM method which returns 0x01 * if U1 and U2 states should be disabled. Evaluate _DSM with: * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899 * Arg1: Revision ID = 0 * Arg2: Function Index = 5 * Arg3: (empty) * * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0 */ int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index) { union acpi_object *obj; acpi_handle port_handle; int port1 = index + 1; guid_t guid; int ret; ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid); if (ret) return ret; port_handle = usb_get_hub_port_acpi_handle(hdev, port1); if (!port_handle) { dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1); return -ENODEV; } if (!acpi_check_dsm(port_handle, &guid, 0, BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) { dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n", port1, USB_DSM_DISABLE_U1_U2_FOR_PORT); return -ENODEV; } obj = acpi_evaluate_dsm_typed(port_handle, &guid, 0, USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL, ACPI_TYPE_INTEGER); if (!obj) { dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1); return -EINVAL; } if (obj->integer.value == 0x01) ret = 1; ACPI_FREE(obj); return ret; } EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable); /** * usb_acpi_set_power_state - control usb port's power via acpi power * resource * @hdev: USB device belonging to the usb hub * @index: port index based zero * @enable: power state expected to be set * * Notice to use usb_acpi_power_manageable() to check whether the usb port * has acpi power resource before invoking this function. * * Returns 0 on success, else negative errno. */ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); struct usb_port *port_dev; acpi_handle port_handle; unsigned char state; int port1 = index + 1; int error = -EINVAL; if (!hub) return -ENODEV; port_dev = hub->ports[port1 - 1]; port_handle = (acpi_handle) usb_get_hub_port_acpi_handle(hdev, port1); if (!port_handle) return error; if (enable) state = ACPI_STATE_D0; else state = ACPI_STATE_D3_COLD; error = acpi_bus_set_power(port_handle, state); if (!error) dev_dbg(&port_dev->dev, "acpi: power was set to %d\n", enable); else dev_dbg(&port_dev->dev, "acpi: power failed to be set\n"); return error; } EXPORT_SYMBOL_GPL(usb_acpi_set_power_state); /** * usb_acpi_add_usb4_devlink - add device link to USB4 Host Interface for tunneled USB3 devices * * @udev: Tunneled USB3 device connected to a roothub. * * Adds a device link between a tunneled USB3 device and the USB4 Host Interface * device to ensure correct runtime PM suspend and resume order. This function * should only be called for tunneled USB3 devices. * The USB4 Host Interface this tunneled device depends on is found from the roothub * port ACPI device specific data _DSD entry. * * Return: negative error code on failure, 0 otherwise */ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) { const struct device_link *link; struct usb_port *port_dev; struct usb_hub *hub; if (!udev->parent || udev->parent->parent) return 0; hub = usb_hub_to_struct_hub(udev->parent); port_dev = hub->ports[udev->portnum - 1]; struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = fwnode_find_reference(dev_fwnode(&port_dev->dev), "usb4-host-interface", 0); if (IS_ERR(nhi_fwnode) || !nhi_fwnode->dev) return 0; link = device_link_add(&port_dev->child->dev, nhi_fwnode->dev, DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE | DL_FLAG_PM_RUNTIME); if (!link) { dev_err(&port_dev->dev, "Failed to created device link from %s to %s\n", dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev)); return -EINVAL; } dev_dbg(&port_dev->dev, "Created device link from %s to %s\n", dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev)); return 0; } /* * Private to usb-acpi, all the core needs to know is that * port_dev->location is non-zero when it has been set by the firmware. */ #define USB_ACPI_LOCATION_VALID (1 << 31) static void usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle) { enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *upc = NULL; struct acpi_pld_info *pld = NULL; acpi_status status; /* * According to 9.14 in ACPI Spec 6.2. _PLD indicates whether usb port * is user visible and _UPC indicates whether it is connectable. If * the port was visible and connectable, it could be freely connected * and disconnected with USB devices. If no visible and connectable, * a usb device is directly hard-wired to the port. If no visible and * no connectable, the port would be not used. */ if (acpi_get_physical_device_location(handle, &pld) && pld) port_dev->location = USB_ACPI_LOCATION_VALID | pld->group_token << 8 | pld->group_position; status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer); if (ACPI_FAILURE(status)) goto out; upc = buffer.pointer; if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4) goto out; /* UPC states port is connectable */ if (upc->package.elements[0].integer.value) if (!pld) ; /* keep connect_type as unknown */ else if (pld->user_visible) connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG; else connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED; else connect_type = USB_PORT_NOT_USED; out: port_dev->connect_type = connect_type; kfree(upc); ACPI_FREE(pld); } static struct acpi_device * usb_acpi_get_companion_for_port(struct usb_port *port_dev) { struct usb_device *udev; struct acpi_device *adev; acpi_handle *parent_handle; int port1; /* Get the struct usb_device point of port's hub */ udev = to_usb_device(port_dev->dev.parent->parent); /* * The root hub ports' parent is the root hub. The non-root-hub * ports' parent is the parent hub port which the hub is * connected to. */ if (!udev->parent) { adev = ACPI_COMPANION(&udev->dev); port1 = usb_hcd_find_raw_port_number(bus_to_hcd(udev->bus), port_dev->portnum); } else { parent_handle = usb_get_hub_port_acpi_handle(udev->parent, udev->portnum); if (!parent_handle) return NULL; adev = acpi_fetch_acpi_dev(parent_handle); port1 = port_dev->portnum; } return acpi_find_child_by_adr(adev, port1); } static struct acpi_device * usb_acpi_find_companion_for_port(struct usb_port *port_dev) { struct acpi_device *adev; adev = usb_acpi_get_companion_for_port(port_dev); if (!adev) return NULL; usb_acpi_get_connect_type(port_dev, adev->handle); return adev; } static struct acpi_device * usb_acpi_find_companion_for_device(struct usb_device *udev) { struct acpi_device *adev; struct usb_port *port_dev; struct usb_hub *hub; if (!udev->parent) { /* * root hub is only child (_ADR=0) under its parent, the HC. * sysdev pointer is the HC as seen from firmware. */ adev = ACPI_COMPANION(udev->bus->sysdev); return acpi_find_child_device(adev, 0, false); } hub = usb_hub_to_struct_hub(udev->parent); if (!hub) return NULL; /* Tunneled USB3 devices depend on USB4 Host Interface, set device link to it */ if (udev->speed >= USB_SPEED_SUPER && udev->tunnel_mode != USB_LINK_NATIVE) usb_acpi_add_usb4_devlink(udev); /* * This is an embedded USB device connected to a port and such * devices share port's ACPI companion. */ port_dev = hub->ports[udev->portnum - 1]; return usb_acpi_get_companion_for_port(port_dev); } static struct acpi_device *usb_acpi_find_companion(struct device *dev) { /* * The USB hierarchy like following: * * Device (EHC1) * Device (HUBN) * Device (PR01) * Device (PR11) * Device (PR12) * Device (FN12) * Device (FN13) * Device (PR13) * ... * where HUBN is root hub, and PRNN are USB ports and devices * connected to them, and FNNN are individualk functions for * connected composite USB devices. PRNN and FNNN may contain * _CRS and other methods describing sideband resources for * the connected device. * * On the kernel side both root hub and embedded USB devices are * represented as instances of usb_device structure, and ports * are represented as usb_port structures, so the whole process * is split into 2 parts: finding companions for devices and * finding companions for ports. * * Note that we do not handle individual functions of composite * devices yet, for that we would need to assign companions to * devices corresponding to USB interfaces. */ if (is_usb_device(dev)) return usb_acpi_find_companion_for_device(to_usb_device(dev)); else if (is_usb_port(dev)) return usb_acpi_find_companion_for_port(to_usb_port(dev)); return NULL; } static bool usb_acpi_bus_match(struct device *dev) { return is_usb_device(dev) || is_usb_port(dev); } static struct acpi_bus_type usb_acpi_bus = { .name = "USB", .match = usb_acpi_bus_match, .find_companion = usb_acpi_find_companion, }; int usb_acpi_register(void) { return register_acpi_bus_type(&usb_acpi_bus); } void usb_acpi_unregister(void) { unregister_acpi_bus_type(&usb_acpi_bus); } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_LOCAL_H #define _ASM_X86_LOCAL_H #include <linux/percpu.h> #include <linux/atomic.h> #include <asm/asm.h> typedef struct { atomic_long_t a; } local_t; #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) #define local_set(l, i) atomic_long_set(&(l)->a, (i)) static inline void local_inc(local_t *l) { asm volatile(_ASM_INC "%0" : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { asm volatile(_ASM_DEC "%0" : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { asm volatile(_ASM_ADD "%1,%0" : "+m" (l->a.counter) : "ir" (i)); } static inline void local_sub(long i, local_t *l) { asm volatile(_ASM_SUB "%1,%0" : "+m" (l->a.counter) : "ir" (i)); } /** * local_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @l: pointer to type local_t * * Atomically subtracts @i from @l and returns * true if the result is zero, or false for all * other cases. */ static inline bool local_sub_and_test(long i, local_t *l) { return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i); } /** * local_dec_and_test - decrement and test * @l: pointer to type local_t * * Atomically decrements @l by 1 and * returns true if the result is 0, or false for all other * cases. */ static inline bool local_dec_and_test(local_t *l) { return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e); } /** * local_inc_and_test - increment and test * @l: pointer to type local_t * * Atomically increments @l by 1 * and returns true if the result is zero, or false for all * other cases. */ static inline bool local_inc_and_test(local_t *l) { return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e); } /** * local_add_negative - add and test if negative * @i: integer value to add * @l: pointer to type local_t * * Atomically adds @i to @l and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ static inline bool local_add_negative(long i, local_t *l) { return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i); } /** * local_add_return - add and return * @i: integer value to add * @l: pointer to type local_t * * Atomically adds @i to @l and returns @i + @l */ static inline long local_add_return(long i, local_t *l) { long __i = i; asm volatile(_ASM_XADD "%0, %1;" : "+r" (i), "+m" (l->a.counter) : : "memory"); return i + __i; } static inline long local_sub_return(long i, local_t *l) { return local_add_return(-i, l); } #define local_inc_return(l) (local_add_return(1, l)) #define local_dec_return(l) (local_sub_return(1, l)) static inline long local_cmpxchg(local_t *l, long old, long new) { return cmpxchg_local(&l->a.counter, old, new); } static inline bool local_try_cmpxchg(local_t *l, long *old, long new) { return try_cmpxchg_local(&l->a.counter, (typeof(l->a.counter) *) old, new); } /* * Implement local_xchg using CMPXCHG instruction without the LOCK prefix. * XCHG is expensive due to the implied LOCK prefix. The processor * cannot prefetch cachelines if XCHG is used. */ static __always_inline long local_xchg(local_t *l, long n) { long c = local_read(l); do { } while (!local_try_cmpxchg(l, &c, n)); return c; } /** * local_add_unless - add unless the number is already a given value * @l: pointer of type local_t * @a: the amount to add to l... * @u: ...unless l is equal to u. * * Atomically adds @a to @l, if @v was not already @u. * Returns true if the addition was done. */ static __always_inline bool local_add_unless(local_t *l, long a, long u) { long c = local_read(l); do { if (unlikely(c == u)) return false; } while (!local_try_cmpxchg(l, &c, c + a)); return true; } #define local_inc_not_zero(l) local_add_unless((l), 1, 0) /* On x86_32, these are no better than the atomic variants. * On x86-64 these are better than the atomic variants on SMP kernels * because they dont use a lock prefix. */ #define __local_inc(l) local_inc(l) #define __local_dec(l) local_dec(l) #define __local_add(i, l) local_add((i), (l)) #define __local_sub(i, l) local_sub((i), (l)) #endif /* _ASM_X86_LOCAL_H */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef JPEG_H #define JPEG_H 1 /* * Insert a JPEG header at start of frame * * This module is used by the gspca subdrivers. * A special case is done for Conexant webcams. * * Copyright (C) Jean-Francois Moine (http://moinejf.free.fr) */ /* * generation options * CONEX_CAM Conexant if present */ /* JPEG header */ static const u8 jpeg_head[] = { 0xff, 0xd8, /* jpeg */ /* quantization table quality 50% */ 0xff, 0xdb, 0x00, 0x84, /* DQT */ 0, #define JPEG_QT0_OFFSET 7 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 1, #define JPEG_QT1_OFFSET 72 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, /* huffman table */ 0xff, 0xc4, 0x01, 0xa2, 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x01, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, #ifdef CONEX_CAM /* the Conexant frames start with SOF0 */ #define JPEG_HDR_SZ 556 #else 0xff, 0xc0, 0x00, 0x11, /* SOF0 (start of frame 0 */ 0x08, /* data precision */ #define JPEG_HEIGHT_OFFSET 561 0x01, 0xe0, /* height */ 0x02, 0x80, /* width */ 0x03, /* component number */ 0x01, 0x21, /* samples Y */ 0x00, /* quant Y */ 0x02, 0x11, 0x01, /* samples CbCr - quant CbCr */ 0x03, 0x11, 0x01, 0xff, 0xda, 0x00, 0x0c, /* SOS (start of scan) */ 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00 #define JPEG_HDR_SZ 589 #endif }; /* define the JPEG header */ static void jpeg_define(u8 *jpeg_hdr, int height, int width, int samplesY) { memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head); #ifndef CONEX_CAM jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8; jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height; jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8; jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width; jpeg_hdr[JPEG_HEIGHT_OFFSET + 6] = samplesY; #endif } /* set the JPEG quality */ static void jpeg_set_qual(u8 *jpeg_hdr, int quality) { int i, sc; if (quality <= 0) sc = 5000; else if (quality < 50) sc = 5000 / quality; else sc = 200 - quality * 2; for (i = 0; i < 64; i++) { jpeg_hdr[JPEG_QT0_OFFSET + i] = (jpeg_head[JPEG_QT0_OFFSET + i] * sc + 50) / 100; jpeg_hdr[JPEG_QT1_OFFSET + i] = (jpeg_head[JPEG_QT1_OFFSET + i] * sc + 50) / 100; } } #endif |
291 292 286 43 15 49 292 80 81 80 1 12 12 43 3 7 43 27 9 32 92 60 15 2 30 31 88 88 88 88 88 88 55 50 88 88 88 87 88 38 38 38 38 51 51 51 51 10 2 12 11 33 1 1 48 48 48 13 46 11 43 43 2 33 1 9 43 43 48 48 5 2 5 3 5 1 5 2 1 5 7 7 7 20 3 8 28 25 4 22 7 1 28 9 9 3 7 7 2 3 5 33 28 11 26 1 27 6 1 6 2 5 5 1 5 1 4 2 2 2 41 24 13 14 4 2 14 4 18 35 17 18 4 22 8 1 7 7 7 7 2 1 1 2 1 1 36 1 11 2 1 30 3 64 34 41 41 19 19 13 8 8 5 5 13 219 220 13 13 7 7 7 7 7 7 3 4 43 39 3 3 10 1 1 1 2 5 37 38 17 30 38 37 38 38 37 1 1 3 2 3 2 4 5 5 3 3 5 47 37 4 1 1 1 4 1 6 15 2 13 2 5 4 3 5 1 1 3 18 10 3 1 2 4 1 1 1 6 22 1 1 3 1 1 1 2 1 2 3 2 1 1 5 12 1 1 1 1 1 1 1 1 1 1 1 1 20 298 53 47 6 46 10 280 76 3 6 7 5 69 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 | // SPDX-License-Identifier: GPL-2.0-only /* * Functions to manage eBPF programs attached to cgroups * * Copyright (c) 2016 Daniel Mack */ #include <linux/kernel.h> #include <linux/atomic.h> #include <linux/cgroup.h> #include <linux/filter.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/string.h> #include <linux/bpf.h> #include <linux/bpf-cgroup.h> #include <linux/bpf_lsm.h> #include <linux/bpf_verifier.h> #include <net/sock.h> #include <net/bpf_sk_storage.h> #include "../cgroup/cgroup-internal.h" DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE); EXPORT_SYMBOL(cgroup_bpf_enabled_key); /* * cgroup bpf destruction makes heavy use of work items and there can be a lot * of concurrent destructions. Use a separate workqueue so that cgroup bpf * destruction work items don't end up filling up max_active of system_wq * which may lead to deadlock. */ static struct workqueue_struct *cgroup_bpf_destroy_wq; static int __init cgroup_bpf_wq_init(void) { cgroup_bpf_destroy_wq = alloc_workqueue("cgroup_bpf_destroy", 0, 1); if (!cgroup_bpf_destroy_wq) panic("Failed to alloc workqueue for cgroup bpf destroy.\n"); return 0; } core_initcall(cgroup_bpf_wq_init); /* __always_inline is necessary to prevent indirect call through run_prog * function pointer. */ static __always_inline int bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp, enum cgroup_bpf_attach_type atype, const void *ctx, bpf_prog_run_fn run_prog, int retval, u32 *ret_flags) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_cg_run_ctx run_ctx; u32 func_ret; run_ctx.retval = retval; migrate_disable(); rcu_read_lock(); array = rcu_dereference(cgrp->effective[atype]); item = &array->items[0]; old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; func_ret = run_prog(prog, ctx); if (ret_flags) { *(ret_flags) |= (func_ret >> 1); func_ret &= 1; } if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval)) run_ctx.retval = -EPERM; item++; } bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); migrate_enable(); return run_ctx.retval; } unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx, const struct bpf_insn *insn) { const struct bpf_prog *shim_prog; struct sock *sk; struct cgroup *cgrp; int ret = 0; u64 *args; args = (u64 *)ctx; sk = (void *)(unsigned long)args[0]; /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/ shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi)); cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); if (likely(cgrp)) ret = bpf_prog_run_array_cg(&cgrp->bpf, shim_prog->aux->cgroup_atype, ctx, bpf_prog_run, 0, NULL); return ret; } unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx, const struct bpf_insn *insn) { const struct bpf_prog *shim_prog; struct socket *sock; struct cgroup *cgrp; int ret = 0; u64 *args; args = (u64 *)ctx; sock = (void *)(unsigned long)args[0]; /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/ shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi)); cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data); if (likely(cgrp)) ret = bpf_prog_run_array_cg(&cgrp->bpf, shim_prog->aux->cgroup_atype, ctx, bpf_prog_run, 0, NULL); return ret; } unsigned int __cgroup_bpf_run_lsm_current(const void *ctx, const struct bpf_insn *insn) { const struct bpf_prog *shim_prog; struct cgroup *cgrp; int ret = 0; /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/ shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi)); /* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */ cgrp = task_dfl_cgroup(current); if (likely(cgrp)) ret = bpf_prog_run_array_cg(&cgrp->bpf, shim_prog->aux->cgroup_atype, ctx, bpf_prog_run, 0, NULL); return ret; } #ifdef CONFIG_BPF_LSM struct cgroup_lsm_atype { u32 attach_btf_id; int refcnt; }; static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM]; static enum cgroup_bpf_attach_type bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id) { int i; lockdep_assert_held(&cgroup_mutex); if (attach_type != BPF_LSM_CGROUP) return to_cgroup_bpf_attach_type(attach_type); for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++) if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id) return CGROUP_LSM_START + i; for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++) if (cgroup_lsm_atype[i].attach_btf_id == 0) return CGROUP_LSM_START + i; return -E2BIG; } void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) { int i = cgroup_atype - CGROUP_LSM_START; lockdep_assert_held(&cgroup_mutex); WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id && cgroup_lsm_atype[i].attach_btf_id != attach_btf_id); cgroup_lsm_atype[i].attach_btf_id = attach_btf_id; cgroup_lsm_atype[i].refcnt++; } void bpf_cgroup_atype_put(int cgroup_atype) { int i = cgroup_atype - CGROUP_LSM_START; cgroup_lock(); if (--cgroup_lsm_atype[i].refcnt <= 0) cgroup_lsm_atype[i].attach_btf_id = 0; WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0); cgroup_unlock(); } #else static enum cgroup_bpf_attach_type bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id) { if (attach_type != BPF_LSM_CGROUP) return to_cgroup_bpf_attach_type(attach_type); return -EOPNOTSUPP; } #endif /* CONFIG_BPF_LSM */ void cgroup_bpf_offline(struct cgroup *cgrp) { cgroup_get(cgrp); percpu_ref_kill(&cgrp->bpf.refcnt); } static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) { enum bpf_cgroup_storage_type stype; for_each_cgroup_storage_type(stype) bpf_cgroup_storage_free(storages[stype]); } static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], struct bpf_cgroup_storage *new_storages[], enum bpf_attach_type type, struct bpf_prog *prog, struct cgroup *cgrp) { enum bpf_cgroup_storage_type stype; struct bpf_cgroup_storage_key key; struct bpf_map *map; key.cgroup_inode_id = cgroup_id(cgrp); key.attach_type = type; for_each_cgroup_storage_type(stype) { map = prog->aux->cgroup_storage[stype]; if (!map) continue; storages[stype] = cgroup_storage_lookup((void *)map, &key, false); if (storages[stype]) continue; storages[stype] = bpf_cgroup_storage_alloc(prog, stype); if (IS_ERR(storages[stype])) { bpf_cgroup_storages_free(new_storages); return -ENOMEM; } new_storages[stype] = storages[stype]; } return 0; } static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], struct bpf_cgroup_storage *src[]) { enum bpf_cgroup_storage_type stype; for_each_cgroup_storage_type(stype) dst[stype] = src[stype]; } static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], struct cgroup *cgrp, enum bpf_attach_type attach_type) { enum bpf_cgroup_storage_type stype; for_each_cgroup_storage_type(stype) bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); } /* Called when bpf_cgroup_link is auto-detached from dying cgroup. * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It * doesn't free link memory, which will eventually be done by bpf_link's * release() callback, when its last FD is closed. */ static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) { cgroup_put(link->cgroup); link->cgroup = NULL; } /** * cgroup_bpf_release() - put references of all bpf programs and * release all cgroup bpf data * @work: work structure embedded into the cgroup to modify */ static void cgroup_bpf_release(struct work_struct *work) { struct cgroup *p, *cgrp = container_of(work, struct cgroup, bpf.release_work); struct bpf_prog_array *old_array; struct list_head *storages = &cgrp->bpf.storages; struct bpf_cgroup_storage *storage, *stmp; unsigned int atype; cgroup_lock(); for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) { struct hlist_head *progs = &cgrp->bpf.progs[atype]; struct bpf_prog_list *pl; struct hlist_node *pltmp; hlist_for_each_entry_safe(pl, pltmp, progs, node) { hlist_del(&pl->node); if (pl->prog) { if (pl->prog->expected_attach_type == BPF_LSM_CGROUP) bpf_trampoline_unlink_cgroup_shim(pl->prog); bpf_prog_put(pl->prog); } if (pl->link) { if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP) bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog); bpf_cgroup_link_auto_detach(pl->link); } kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key[atype]); } old_array = rcu_dereference_protected( cgrp->bpf.effective[atype], lockdep_is_held(&cgroup_mutex)); bpf_prog_array_free(old_array); } list_for_each_entry_safe(storage, stmp, storages, list_cg) { bpf_cgroup_storage_unlink(storage); bpf_cgroup_storage_free(storage); } cgroup_unlock(); for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) cgroup_bpf_put(p); percpu_ref_exit(&cgrp->bpf.refcnt); cgroup_put(cgrp); } /** * cgroup_bpf_release_fn() - callback used to schedule releasing * of bpf cgroup data * @ref: percpu ref counter structure */ static void cgroup_bpf_release_fn(struct percpu_ref *ref) { struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); queue_work(cgroup_bpf_destroy_wq, &cgrp->bpf.release_work); } /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through * link or direct prog. */ static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) { if (pl->prog) return pl->prog; if (pl->link) return pl->link->link.prog; return NULL; } /* count number of elements in the list. * it's slow but the list cannot be long */ static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt) { struct bpf_prog_list *pl; u32 cnt = 0; hlist_for_each_entry(pl, head, node) { if (!prog_list_prog(pl)) continue; if (preorder_cnt && (pl->flags & BPF_F_PREORDER)) (*preorder_cnt)++; cnt++; } return cnt; } /* if parent has non-overridable prog attached, * disallow attaching new programs to the descendent cgroup. * if parent has overridable or multi-prog, allow attaching */ static bool hierarchy_allows_attach(struct cgroup *cgrp, enum cgroup_bpf_attach_type atype) { struct cgroup *p; p = cgroup_parent(cgrp); if (!p) return true; do { u32 flags = p->bpf.flags[atype]; u32 cnt; if (flags & BPF_F_ALLOW_MULTI) return true; cnt = prog_list_length(&p->bpf.progs[atype], NULL); WARN_ON_ONCE(cnt > 1); if (cnt == 1) return !!(flags & BPF_F_ALLOW_OVERRIDE); p = cgroup_parent(p); } while (p); return true; } /* compute a chain of effective programs for a given cgroup: * start from the list of programs in this cgroup and add * all parent programs. * Note that parent's F_ALLOW_OVERRIDE-type program is yielding * to programs in this cgroup */ static int compute_effective_progs(struct cgroup *cgrp, enum cgroup_bpf_attach_type atype, struct bpf_prog_array **array) { struct bpf_prog_array_item *item; struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct cgroup *p = cgrp; int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart; /* count number of effective programs by walking parents */ do { if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt); p = cgroup_parent(p); } while (p); progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); if (!progs) return -ENOMEM; /* populate the array with effective progs */ cnt = 0; p = cgrp; fstart = preorder_cnt; bstart = preorder_cnt - 1; do { if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) continue; init_bstart = bstart; hlist_for_each_entry(pl, &p->bpf.progs[atype], node) { if (!prog_list_prog(pl)) continue; if (pl->flags & BPF_F_PREORDER) { item = &progs->items[bstart]; bstart--; } else { item = &progs->items[fstart]; fstart++; } item->prog = prog_list_prog(pl); bpf_cgroup_storages_assign(item->cgroup_storage, pl->storage); cnt++; } /* reverse pre-ordering progs at this cgroup level */ for (i = bstart + 1, j = init_bstart; i < j; i++, j--) swap(progs->items[i], progs->items[j]); } while ((p = cgroup_parent(p))); *array = progs; return 0; } static void activate_effective_progs(struct cgroup *cgrp, enum cgroup_bpf_attach_type atype, struct bpf_prog_array *old_array) { old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array, lockdep_is_held(&cgroup_mutex)); /* free prog array after grace period, since __cgroup_bpf_run_*() * might be still walking the array */ bpf_prog_array_free(old_array); } /** * cgroup_bpf_inherit() - inherit effective programs from parent * @cgrp: the cgroup to modify */ int cgroup_bpf_inherit(struct cgroup *cgrp) { /* has to use marco instead of const int, since compiler thinks * that array below is variable length */ #define NR ARRAY_SIZE(cgrp->bpf.effective) struct bpf_prog_array *arrays[NR] = {}; struct cgroup *p; int ret, i; ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, GFP_KERNEL); if (ret) return ret; for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) cgroup_bpf_get(p); for (i = 0; i < NR; i++) INIT_HLIST_HEAD(&cgrp->bpf.progs[i]); INIT_LIST_HEAD(&cgrp->bpf.storages); for (i = 0; i < NR; i++) if (compute_effective_progs(cgrp, i, &arrays[i])) goto cleanup; for (i = 0; i < NR; i++) activate_effective_progs(cgrp, i, arrays[i]); return 0; cleanup: for (i = 0; i < NR; i++) bpf_prog_array_free(arrays[i]); for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) cgroup_bpf_put(p); percpu_ref_exit(&cgrp->bpf.refcnt); return -ENOMEM; } static int update_effective_progs(struct cgroup *cgrp, enum cgroup_bpf_attach_type atype) { struct cgroup_subsys_state *css; int err; /* allocate and recompute effective prog arrays */ css_for_each_descendant_pre(css, &cgrp->self) { struct cgroup *desc = container_of(css, struct cgroup, self); if (percpu_ref_is_zero(&desc->bpf.refcnt)) continue; err = compute_effective_progs(desc, atype, &desc->bpf.inactive); if (err) goto cleanup; } /* all allocations were successful. Activate all prog arrays */ css_for_each_descendant_pre(css, &cgrp->self) { struct cgroup *desc = container_of(css, struct cgroup, self); if (percpu_ref_is_zero(&desc->bpf.refcnt)) { if (unlikely(desc->bpf.inactive)) { bpf_prog_array_free(desc->bpf.inactive); desc->bpf.inactive = NULL; } continue; } activate_effective_progs(desc, atype, desc->bpf.inactive); desc->bpf.inactive = NULL; } return 0; cleanup: /* oom while computing effective. Free all computed effective arrays * since they were not activated */ css_for_each_descendant_pre(css, &cgrp->self) { struct cgroup *desc = container_of(css, struct cgroup, self); bpf_prog_array_free(desc->bpf.inactive); desc->bpf.inactive = NULL; } return err; } #define BPF_CGROUP_MAX_PROGS 64 static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs, struct bpf_prog *prog, struct bpf_cgroup_link *link, struct bpf_prog *replace_prog, bool allow_multi) { struct bpf_prog_list *pl; /* single-attach case */ if (!allow_multi) { if (hlist_empty(progs)) return NULL; return hlist_entry(progs->first, typeof(*pl), node); } hlist_for_each_entry(pl, progs, node) { if (prog && pl->prog == prog && prog != replace_prog) /* disallow attaching the same prog twice */ return ERR_PTR(-EINVAL); if (link && pl->link == link) /* disallow attaching the same link twice */ return ERR_PTR(-EINVAL); } /* direct prog multi-attach w/ replacement case */ if (replace_prog) { hlist_for_each_entry(pl, progs, node) { if (pl->prog == replace_prog) /* a match found */ return pl; } /* prog to replace not found for cgroup */ return ERR_PTR(-ENOENT); } return NULL; } /** * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and * propagate the change to descendants * @cgrp: The cgroup which descendants to traverse * @prog: A program to attach * @link: A link to attach * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set * @type: Type of attach operation * @flags: Option flags * * Exactly one of @prog or @link can be non-null. * Must be called with cgroup_mutex held. */ static int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_prog *replace_prog, struct bpf_cgroup_link *link, enum bpf_attach_type type, u32 flags) { u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); struct bpf_prog *old_prog = NULL; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; struct bpf_prog *new_prog = prog ? : link->link.prog; enum cgroup_bpf_attach_type atype; struct bpf_prog_list *pl; struct hlist_head *progs; int err; if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) /* invalid combination */ return -EINVAL; if (link && (prog || replace_prog)) /* only either link or prog/replace_prog can be specified */ return -EINVAL; if (!!replace_prog != !!(flags & BPF_F_REPLACE)) /* replace_prog implies BPF_F_REPLACE, and vice versa */ return -EINVAL; atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id); if (atype < 0) return -EINVAL; progs = &cgrp->bpf.progs[atype]; if (!hierarchy_allows_attach(cgrp, atype)) return -EPERM; if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags) /* Disallow attaching non-overridable on top * of existing overridable in this cgroup. * Disallow attaching multi-prog if overridable or none */ return -EPERM; if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS) return -E2BIG; pl = find_attach_entry(progs, prog, link, replace_prog, flags & BPF_F_ALLOW_MULTI); if (IS_ERR(pl)) return PTR_ERR(pl); if (bpf_cgroup_storages_alloc(storage, new_storage, type, prog ? : link->link.prog, cgrp)) return -ENOMEM; if (pl) { old_prog = pl->prog; } else { struct hlist_node *last = NULL; pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { bpf_cgroup_storages_free(new_storage); return -ENOMEM; } if (hlist_empty(progs)) hlist_add_head(&pl->node, progs); else hlist_for_each(last, progs) { if (last->next) continue; hlist_add_behind(&pl->node, last); break; } } pl->prog = prog; pl->link = link; pl->flags = flags; bpf_cgroup_storages_assign(pl->storage, storage); cgrp->bpf.flags[atype] = saved_flags; if (type == BPF_LSM_CGROUP) { err = bpf_trampoline_link_cgroup_shim(new_prog, atype); if (err) goto cleanup; } err = update_effective_progs(cgrp, atype); if (err) goto cleanup_trampoline; if (old_prog) { if (type == BPF_LSM_CGROUP) bpf_trampoline_unlink_cgroup_shim(old_prog); bpf_prog_put(old_prog); } else { static_branch_inc(&cgroup_bpf_enabled_key[atype]); } bpf_cgroup_storages_link(new_storage, cgrp, type); return 0; cleanup_trampoline: if (type == BPF_LSM_CGROUP) bpf_trampoline_unlink_cgroup_shim(new_prog); cleanup: if (old_prog) { pl->prog = old_prog; pl->link = NULL; } bpf_cgroup_storages_free(new_storage); if (!old_prog) { hlist_del(&pl->node); kfree(pl); } return err; } static int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_prog *replace_prog, struct bpf_cgroup_link *link, enum bpf_attach_type type, u32 flags) { int ret; cgroup_lock(); ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags); cgroup_unlock(); return ret; } /* Swap updated BPF program for given link in effective program arrays across * all descendant cgroups. This function is guaranteed to succeed. */ static void replace_effective_prog(struct cgroup *cgrp, enum cgroup_bpf_attach_type atype, struct bpf_cgroup_link *link) { struct bpf_prog_array_item *item; struct cgroup_subsys_state *css; struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct hlist_head *head; struct cgroup *cg; int pos; css_for_each_descendant_pre(css, &cgrp->self) { struct cgroup *desc = container_of(css, struct cgroup, self); if (percpu_ref_is_zero(&desc->bpf.refcnt)) continue; /* find position of link in effective progs array */ for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) continue; head = &cg->bpf.progs[atype]; hlist_for_each_entry(pl, head, node) { if (!prog_list_prog(pl)) continue; if (pl->link == link) goto found; pos++; } } found: BUG_ON(!cg); progs = rcu_dereference_protected( desc->bpf.effective[atype], lockdep_is_held(&cgroup_mutex)); item = &progs->items[pos]; WRITE_ONCE(item->prog, link->link.prog); } } /** * __cgroup_bpf_replace() - Replace link's program and propagate the change * to descendants * @cgrp: The cgroup which descendants to traverse * @link: A link for which to replace BPF program * @new_prog: &struct bpf_prog for the target BPF program with its refcnt * incremented * * Must be called with cgroup_mutex held. */ static int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, struct bpf_prog *new_prog) { enum cgroup_bpf_attach_type atype; struct bpf_prog *old_prog; struct bpf_prog_list *pl; struct hlist_head *progs; bool found = false; atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id); if (atype < 0) return -EINVAL; progs = &cgrp->bpf.progs[atype]; if (link->link.prog->type != new_prog->type) return -EINVAL; hlist_for_each_entry(pl, progs, node) { if (pl->link == link) { found = true; break; } } if (!found) return -ENOENT; old_prog = xchg(&link->link.prog, new_prog); replace_effective_prog(cgrp, atype, link); bpf_prog_put(old_prog); return 0; } static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog) { struct bpf_cgroup_link *cg_link; int ret; cg_link = container_of(link, struct bpf_cgroup_link, link); cgroup_lock(); /* link might have been auto-released by dying cgroup, so fail */ if (!cg_link->cgroup) { ret = -ENOLINK; goto out_unlock; } if (old_prog && link->prog != old_prog) { ret = -EPERM; goto out_unlock; } ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); out_unlock: cgroup_unlock(); return ret; } static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs, struct bpf_prog *prog, struct bpf_cgroup_link *link, bool allow_multi) { struct bpf_prog_list *pl; if (!allow_multi) { if (hlist_empty(progs)) /* report error when trying to detach and nothing is attached */ return ERR_PTR(-ENOENT); /* to maintain backward compatibility NONE and OVERRIDE cgroups * allow detaching with invalid FD (prog==NULL) in legacy mode */ return hlist_entry(progs->first, typeof(*pl), node); } if (!prog && !link) /* to detach MULTI prog the user has to specify valid FD * of the program or link to be detached */ return ERR_PTR(-EINVAL); /* find the prog or link and detach it */ hlist_for_each_entry(pl, progs, node) { if (pl->prog == prog && pl->link == link) return pl; } return ERR_PTR(-ENOENT); } /** * purge_effective_progs() - After compute_effective_progs fails to alloc new * cgrp->bpf.inactive table we can recover by * recomputing the array in place. * * @cgrp: The cgroup which descendants to travers * @prog: A program to detach or NULL * @link: A link to detach or NULL * @atype: Type of detach operation */ static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_cgroup_link *link, enum cgroup_bpf_attach_type atype) { struct cgroup_subsys_state *css; struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct hlist_head *head; struct cgroup *cg; int pos; /* recompute effective prog array in place */ css_for_each_descendant_pre(css, &cgrp->self) { struct cgroup *desc = container_of(css, struct cgroup, self); if (percpu_ref_is_zero(&desc->bpf.refcnt)) continue; /* find position of link or prog in effective progs array */ for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) continue; head = &cg->bpf.progs[atype]; hlist_for_each_entry(pl, head, node) { if (!prog_list_prog(pl)) continue; if (pl->prog == prog && pl->link == link) goto found; pos++; } } /* no link or prog match, skip the cgroup of this layer */ continue; found: progs = rcu_dereference_protected( desc->bpf.effective[atype], lockdep_is_held(&cgroup_mutex)); /* Remove the program from the array */ WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos), "Failed to purge a prog from array at index %d", pos); } } /** * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and * propagate the change to descendants * @cgrp: The cgroup which descendants to traverse * @prog: A program to detach or NULL * @link: A link to detach or NULL * @type: Type of detach operation * * At most one of @prog or @link can be non-NULL. * Must be called with cgroup_mutex held. */ static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_cgroup_link *link, enum bpf_attach_type type) { enum cgroup_bpf_attach_type atype; struct bpf_prog *old_prog; struct bpf_prog_list *pl; struct hlist_head *progs; u32 attach_btf_id = 0; u32 flags; if (prog) attach_btf_id = prog->aux->attach_btf_id; if (link) attach_btf_id = link->link.prog->aux->attach_btf_id; atype = bpf_cgroup_atype_find(type, attach_btf_id); if (atype < 0) return -EINVAL; progs = &cgrp->bpf.progs[atype]; flags = cgrp->bpf.flags[atype]; if (prog && link) /* only one of prog or link can be specified */ return -EINVAL; pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); if (IS_ERR(pl)) return PTR_ERR(pl); /* mark it deleted, so it's ignored while recomputing effective */ old_prog = pl->prog; pl->prog = NULL; pl->link = NULL; if (update_effective_progs(cgrp, atype)) { /* if update effective array failed replace the prog with a dummy prog*/ pl->prog = old_prog; pl->link = link; purge_effective_progs(cgrp, old_prog, link, atype); } /* now can actually delete it from this cgroup list */ hlist_del(&pl->node); kfree(pl); if (hlist_empty(progs)) /* last program was detached, reset flags to zero */ cgrp->bpf.flags[atype] = 0; if (old_prog) { if (type == BPF_LSM_CGROUP) bpf_trampoline_unlink_cgroup_shim(old_prog); bpf_prog_put(old_prog); } static_branch_dec(&cgroup_bpf_enabled_key[atype]); return 0; } static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type) { int ret; cgroup_lock(); ret = __cgroup_bpf_detach(cgrp, prog, NULL, type); cgroup_unlock(); return ret; } /* Must be called with cgroup_mutex held to avoid races. */ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr) { __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags); bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE; __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); enum bpf_attach_type type = attr->query.attach_type; enum cgroup_bpf_attach_type from_atype, to_atype; enum cgroup_bpf_attach_type atype; struct bpf_prog_array *effective; int cnt, ret = 0, i; int total_cnt = 0; u32 flags; if (effective_query && prog_attach_flags) return -EINVAL; if (type == BPF_LSM_CGROUP) { if (!effective_query && attr->query.prog_cnt && prog_ids && !prog_attach_flags) return -EINVAL; from_atype = CGROUP_LSM_START; to_atype = CGROUP_LSM_END; flags = 0; } else { from_atype = to_cgroup_bpf_attach_type(type); if (from_atype < 0) return -EINVAL; to_atype = from_atype; flags = cgrp->bpf.flags[from_atype]; } for (atype = from_atype; atype <= to_atype; atype++) { if (effective_query) { effective = rcu_dereference_protected(cgrp->bpf.effective[atype], lockdep_is_held(&cgroup_mutex)); total_cnt += bpf_prog_array_length(effective); } else { total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL); } } /* always output uattr->query.attach_flags as 0 during effective query */ flags = effective_query ? 0 : flags; if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) return -EFAULT; if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt))) return -EFAULT; if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt) /* return early if user requested only program count + flags */ return 0; if (attr->query.prog_cnt < total_cnt) { total_cnt = attr->query.prog_cnt; ret = -ENOSPC; } for (atype = from_atype; atype <= to_atype && total_cnt; atype++) { if (effective_query) { effective = rcu_dereference_protected(cgrp->bpf.effective[atype], lockdep_is_held(&cgroup_mutex)); cnt = min_t(int, bpf_prog_array_length(effective), total_cnt); ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt); } else { struct hlist_head *progs; struct bpf_prog_list *pl; struct bpf_prog *prog; u32 id; progs = &cgrp->bpf.progs[atype]; cnt = min_t(int, prog_list_length(progs, NULL), total_cnt); i = 0; hlist_for_each_entry(pl, progs, node) { prog = prog_list_prog(pl); id = prog->aux->id; if (copy_to_user(prog_ids + i, &id, sizeof(id))) return -EFAULT; if (++i == cnt) break; } if (prog_attach_flags) { flags = cgrp->bpf.flags[atype]; for (i = 0; i < cnt; i++) if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags))) return -EFAULT; prog_attach_flags += cnt; } } prog_ids += cnt; total_cnt -= cnt; } return ret; } static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr) { int ret; cgroup_lock(); ret = __cgroup_bpf_query(cgrp, attr, uattr); cgroup_unlock(); return ret; } int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog) { struct bpf_prog *replace_prog = NULL; struct cgroup *cgrp; int ret; cgrp = cgroup_get_from_fd(attr->target_fd); if (IS_ERR(cgrp)) return PTR_ERR(cgrp); if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && (attr->attach_flags & BPF_F_REPLACE)) { replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); if (IS_ERR(replace_prog)) { cgroup_put(cgrp); return PTR_ERR(replace_prog); } } ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, attr->attach_type, attr->attach_flags); if (replace_prog) bpf_prog_put(replace_prog); cgroup_put(cgrp); return ret; } int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) { struct bpf_prog *prog; struct cgroup *cgrp; int ret; cgrp = cgroup_get_from_fd(attr->target_fd); if (IS_ERR(cgrp)) return PTR_ERR(cgrp); prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); if (IS_ERR(prog)) prog = NULL; ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); if (prog) bpf_prog_put(prog); cgroup_put(cgrp); return ret; } static void bpf_cgroup_link_release(struct bpf_link *link) { struct bpf_cgroup_link *cg_link = container_of(link, struct bpf_cgroup_link, link); struct cgroup *cg; /* link might have been auto-detached by dying cgroup already, * in that case our work is done here */ if (!cg_link->cgroup) return; cgroup_lock(); /* re-check cgroup under lock again */ if (!cg_link->cgroup) { cgroup_unlock(); return; } WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, cg_link->type)); if (cg_link->type == BPF_LSM_CGROUP) bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog); cg = cg_link->cgroup; cg_link->cgroup = NULL; cgroup_unlock(); cgroup_put(cg); } static void bpf_cgroup_link_dealloc(struct bpf_link *link) { struct bpf_cgroup_link *cg_link = container_of(link, struct bpf_cgroup_link, link); kfree(cg_link); } static int bpf_cgroup_link_detach(struct bpf_link *link) { bpf_cgroup_link_release(link); return 0; } static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, struct seq_file *seq) { struct bpf_cgroup_link *cg_link = container_of(link, struct bpf_cgroup_link, link); u64 cg_id = 0; cgroup_lock(); if (cg_link->cgroup) cg_id = cgroup_id(cg_link->cgroup); cgroup_unlock(); seq_printf(seq, "cgroup_id:\t%llu\n" "attach_type:\t%d\n", cg_id, cg_link->type); } static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, struct bpf_link_info *info) { struct bpf_cgroup_link *cg_link = container_of(link, struct bpf_cgroup_link, link); u64 cg_id = 0; cgroup_lock(); if (cg_link->cgroup) cg_id = cgroup_id(cg_link->cgroup); cgroup_unlock(); info->cgroup.cgroup_id = cg_id; info->cgroup.attach_type = cg_link->type; return 0; } static const struct bpf_link_ops bpf_cgroup_link_lops = { .release = bpf_cgroup_link_release, .dealloc = bpf_cgroup_link_dealloc, .detach = bpf_cgroup_link_detach, .update_prog = cgroup_bpf_replace, .show_fdinfo = bpf_cgroup_link_show_fdinfo, .fill_link_info = bpf_cgroup_link_fill_link_info, }; int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct bpf_link_primer link_primer; struct bpf_cgroup_link *link; struct cgroup *cgrp; int err; if (attr->link_create.flags) return -EINVAL; cgrp = cgroup_get_from_fd(attr->link_create.target_fd); if (IS_ERR(cgrp)) return PTR_ERR(cgrp); link = kzalloc(sizeof(*link), GFP_USER); if (!link) { err = -ENOMEM; goto out_put_cgroup; } bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, prog); link->cgroup = cgrp; link->type = attr->link_create.attach_type; err = bpf_link_prime(&link->link, &link_primer); if (err) { kfree(link); goto out_put_cgroup; } err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, BPF_F_ALLOW_MULTI); if (err) { bpf_link_cleanup(&link_primer); goto out_put_cgroup; } return bpf_link_settle(&link_primer); out_put_cgroup: cgroup_put(cgrp); return err; } int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { struct cgroup *cgrp; int ret; cgrp = cgroup_get_from_fd(attr->query.target_fd); if (IS_ERR(cgrp)) return PTR_ERR(cgrp); ret = cgroup_bpf_query(cgrp, attr, uattr); cgroup_put(cgrp); return ret; } /** * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering * @sk: The socket sending or receiving traffic * @skb: The skb that is being sent or received * @atype: The type of program to be executed * * If no socket is passed, or the socket is not of type INET or INET6, * this function does nothing and returns 0. * * The program type passed in via @type must be suitable for network * filtering. No further check is performed to assert that. * * For egress packets, this function can return: * NET_XMIT_SUCCESS (0) - continue with packet output * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr * NET_XMIT_CN (2) - continue with packet output and notify TCP * to call cwr * -err - drop packet * * For ingress packets, this function will return -EPERM if any * attached program was found and if it returned != 1 during execution. * Otherwise 0 is returned. */ int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, enum cgroup_bpf_attach_type atype) { unsigned int offset = -skb_network_offset(skb); struct sock *save_sk; void *saved_data_end; struct cgroup *cgrp; int ret; if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) return 0; cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); save_sk = skb->sk; skb->sk = sk; __skb_push(skb, offset); /* compute pointers for the bpf prog */ bpf_compute_and_save_data_end(skb, &saved_data_end); if (atype == CGROUP_INET_EGRESS) { u32 flags = 0; bool cn; ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb, __bpf_prog_run_save_cb, 0, &flags); /* Return values of CGROUP EGRESS BPF programs are: * 0: drop packet * 1: keep packet * 2: drop packet and cn * 3: keep packet and cn * * The returned value is then converted to one of the NET_XMIT * or an error code that is then interpreted as drop packet * (and no cn): * 0: NET_XMIT_SUCCESS skb should be transmitted * 1: NET_XMIT_DROP skb should be dropped and cn * 2: NET_XMIT_CN skb should be transmitted and cn * 3: -err skb should be dropped */ cn = flags & BPF_RET_SET_CN; if (ret && !IS_ERR_VALUE((long)ret)) ret = -EFAULT; if (!ret) ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); else ret = (cn ? NET_XMIT_DROP : ret); } else { ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb, __bpf_prog_run_save_cb, 0, NULL); if (ret && !IS_ERR_VALUE((long)ret)) ret = -EFAULT; } bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); skb->sk = save_sk; return ret; } EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); /** * __cgroup_bpf_run_filter_sk() - Run a program on a sock * @sk: sock structure to manipulate * @atype: The type of program to be executed * * socket is passed is expected to be of type INET or INET6. * * The program type passed in via @type must be suitable for sock * filtering. No further check is performed to assert that. * * This function will return %-EPERM if any if an attached program was found * and if it returned != 1 during execution. In all other cases, 0 is returned. */ int __cgroup_bpf_run_filter_sk(struct sock *sk, enum cgroup_bpf_attach_type atype) { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0, NULL); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); /** * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and * provided by user sockaddr * @sk: sock struct that will use sockaddr * @uaddr: sockaddr struct provided by user * @uaddrlen: Pointer to the size of the sockaddr struct provided by user. It is * read-only for AF_INET[6] uaddr but can be modified for AF_UNIX * uaddr. * @atype: The type of program to be executed * @t_ctx: Pointer to attach type specific context * @flags: Pointer to u32 which contains higher bits of BPF program * return value (OR'ed together). * * socket is expected to be of type INET, INET6 or UNIX. * * This function will return %-EPERM if an attached program is found and * returned value != 1 during execution. In all other cases, 0 is returned. */ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, struct sockaddr *uaddr, int *uaddrlen, enum cgroup_bpf_attach_type atype, void *t_ctx, u32 *flags) { struct bpf_sock_addr_kern ctx = { .sk = sk, .uaddr = uaddr, .t_ctx = t_ctx, }; struct sockaddr_storage unspec; struct cgroup *cgrp; int ret; /* Check socket family since not all sockets represent network * endpoint (e.g. AF_UNIX). */ if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6 && sk->sk_family != AF_UNIX) return 0; if (!ctx.uaddr) { memset(&unspec, 0, sizeof(unspec)); ctx.uaddr = (struct sockaddr *)&unspec; ctx.uaddrlen = 0; } else { ctx.uaddrlen = *uaddrlen; } cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0, flags); if (!ret && uaddr) *uaddrlen = ctx.uaddrlen; return ret; } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); /** * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock * @sk: socket to get cgroup from * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains * sk with connection information (IP addresses, etc.) May not contain * cgroup info if it is a req sock. * @atype: The type of program to be executed * * socket passed is expected to be of type INET or INET6. * * The program type passed in via @type must be suitable for sock_ops * filtering. No further check is performed to assert that. * * This function will return %-EPERM if any if an attached program was found * and if it returned != 1 during execution. In all other cases, 0 is returned. */ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct bpf_sock_ops_kern *sock_ops, enum cgroup_bpf_attach_type atype) { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run, 0, NULL); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum cgroup_bpf_attach_type atype) { struct cgroup *cgrp; struct bpf_cgroup_dev_ctx ctx = { .access_type = (access << 16) | dev_type, .major = major, .minor = minor, }; int ret; rcu_read_lock(); cgrp = task_dfl_cgroup(current); ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0, NULL); rcu_read_unlock(); return ret; } BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) { /* flags argument is not used now, * but provides an ability to extend the API. * verifier checks that its value is correct. */ enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); struct bpf_cgroup_storage *storage; struct bpf_cg_run_ctx *ctx; void *ptr; /* get current cgroup storage from BPF run context */ ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); storage = ctx->prog_item->cgroup_storage[stype]; if (stype == BPF_CGROUP_STORAGE_SHARED) ptr = &READ_ONCE(storage->buf)->data[0]; else ptr = this_cpu_ptr(storage->percpu_buf); return (unsigned long)ptr; } const struct bpf_func_proto bpf_get_local_storage_proto = { .func = bpf_get_local_storage, .gpl_only = false, .ret_type = RET_PTR_TO_MAP_VALUE, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_ANYTHING, }; BPF_CALL_0(bpf_get_retval) { struct bpf_cg_run_ctx *ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); return ctx->retval; } const struct bpf_func_proto bpf_get_retval_proto = { .func = bpf_get_retval, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_1(bpf_set_retval, int, retval) { struct bpf_cg_run_ctx *ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); ctx->retval = retval; return 0; } const struct bpf_func_proto bpf_set_retval_proto = { .func = bpf_set_retval, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, }; static const struct bpf_func_proto * cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { const struct bpf_func_proto *func_proto; func_proto = cgroup_common_func_proto(func_id, prog); if (func_proto) return func_proto; func_proto = cgroup_current_func_proto(func_id, prog); if (func_proto) return func_proto; switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; default: return bpf_base_func_proto(func_id, prog); } } static bool cgroup_dev_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (type == BPF_WRITE) return false; if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) return false; /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; switch (off) { case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): bpf_ctx_record_field_size(info, size_default); if (!bpf_ctx_narrow_access_ok(off, size, size_default)) return false; break; default: if (size != size_default) return false; } return true; } const struct bpf_prog_ops cg_dev_prog_ops = { }; const struct bpf_verifier_ops cg_dev_verifier_ops = { .get_func_proto = cgroup_dev_func_proto, .is_valid_access = cgroup_dev_is_valid_access, }; /** * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl * * @head: sysctl table header * @table: sysctl table * @write: sysctl is being read (= 0) or written (= 1) * @buf: pointer to buffer (in and out) * @pcount: value-result argument: value is size of buffer pointed to by @buf, * result is size of @new_buf if program set new value, initial value * otherwise * @ppos: value-result argument: value is position at which read from or write * to sysctl is happening, result is new position if program overrode it, * initial value otherwise * @atype: type of program to be executed * * Program is run when sysctl is being accessed, either read or written, and * can allow or deny such access. * * This function will return %-EPERM if an attached program is found and * returned value != 1 during execution. In all other cases 0 is returned. */ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, const struct ctl_table *table, int write, char **buf, size_t *pcount, loff_t *ppos, enum cgroup_bpf_attach_type atype) { struct bpf_sysctl_kern ctx = { .head = head, .table = table, .write = write, .ppos = ppos, .cur_val = NULL, .cur_len = PAGE_SIZE, .new_val = NULL, .new_len = 0, .new_updated = 0, }; struct cgroup *cgrp; loff_t pos = 0; int ret; ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); if (!ctx.cur_val || table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { /* Let BPF program decide how to proceed. */ ctx.cur_len = 0; } if (write && *buf && *pcount) { /* BPF program should be able to override new value with a * buffer bigger than provided by user. */ ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); if (ctx.new_val) { memcpy(ctx.new_val, *buf, ctx.new_len); } else { /* Let BPF program decide how to proceed. */ ctx.new_len = 0; } } rcu_read_lock(); cgrp = task_dfl_cgroup(current); ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0, NULL); rcu_read_unlock(); kfree(ctx.cur_val); if (ret == 1 && ctx.new_updated) { kfree(*buf); *buf = ctx.new_val; *pcount = ctx.new_len; } else { kfree(ctx.new_val); } return ret; } #ifdef CONFIG_NET static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen, struct bpf_sockopt_buf *buf) { if (unlikely(max_optlen < 0)) return -EINVAL; if (unlikely(max_optlen > PAGE_SIZE)) { /* We don't expose optvals that are greater than PAGE_SIZE * to the BPF program. */ max_optlen = PAGE_SIZE; } if (max_optlen <= sizeof(buf->data)) { /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE * bytes avoid the cost of kzalloc. */ ctx->optval = buf->data; ctx->optval_end = ctx->optval + max_optlen; return max_optlen; } ctx->optval = kzalloc(max_optlen, GFP_USER); if (!ctx->optval) return -ENOMEM; ctx->optval_end = ctx->optval + max_optlen; return max_optlen; } static void sockopt_free_buf(struct bpf_sockopt_kern *ctx, struct bpf_sockopt_buf *buf) { if (ctx->optval == buf->data) return; kfree(ctx->optval); } static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx, struct bpf_sockopt_buf *buf) { return ctx->optval != buf->data; } int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, int *optname, sockptr_t optval, int *optlen, char **kernel_optval) { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); struct bpf_sockopt_buf buf = {}; struct bpf_sockopt_kern ctx = { .sk = sk, .level = *level, .optname = *optname, }; int ret, max_optlen; /* Allocate a bit more than the initial user buffer for * BPF program. The canonical use case is overriding * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). */ max_optlen = max_t(int, 16, *optlen); max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); if (max_optlen < 0) return max_optlen; ctx.optlen = *optlen; if (copy_from_sockptr(ctx.optval, optval, min(*optlen, max_optlen))) { ret = -EFAULT; goto out; } lock_sock(sk); ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT, &ctx, bpf_prog_run, 0, NULL); release_sock(sk); if (ret) goto out; if (ctx.optlen == -1) { /* optlen set to -1, bypass kernel */ ret = 1; } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { /* optlen is out of bounds */ if (*optlen > PAGE_SIZE && ctx.optlen >= 0) { pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n", ctx.optlen, max_optlen); ret = 0; goto out; } ret = -EFAULT; } else { /* optlen within bounds, run kernel handler */ ret = 0; /* export any potential modifications */ *level = ctx.level; *optname = ctx.optname; /* optlen == 0 from BPF indicates that we should * use original userspace data. */ if (ctx.optlen != 0) { *optlen = ctx.optlen; /* We've used bpf_sockopt_kern->buf as an intermediary * storage, but the BPF program indicates that we need * to pass this data to the kernel setsockopt handler. * No way to export on-stack buf, have to allocate a * new buffer. */ if (!sockopt_buf_allocated(&ctx, &buf)) { void *p = kmalloc(ctx.optlen, GFP_USER); if (!p) { ret = -ENOMEM; goto out; } memcpy(p, ctx.optval, ctx.optlen); *kernel_optval = p; } else { *kernel_optval = ctx.optval; } /* export and don't free sockopt buf */ return 0; } } out: sockopt_free_buf(&ctx, &buf); return ret; } int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen, int max_optlen, int retval) { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); struct bpf_sockopt_buf buf = {}; struct bpf_sockopt_kern ctx = { .sk = sk, .level = level, .optname = optname, .current_task = current, }; int orig_optlen; int ret; orig_optlen = max_optlen; ctx.optlen = max_optlen; max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); if (max_optlen < 0) return max_optlen; if (!retval) { /* If kernel getsockopt finished successfully, * copy whatever was returned to the user back * into our temporary buffer. Set optlen to the * one that kernel returned as well to let * BPF programs inspect the value. */ if (copy_from_sockptr(&ctx.optlen, optlen, sizeof(ctx.optlen))) { ret = -EFAULT; goto out; } if (ctx.optlen < 0) { ret = -EFAULT; goto out; } orig_optlen = ctx.optlen; if (copy_from_sockptr(ctx.optval, optval, min(ctx.optlen, max_optlen))) { ret = -EFAULT; goto out; } } lock_sock(sk); ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, &ctx, bpf_prog_run, retval, NULL); release_sock(sk); if (ret < 0) goto out; if (!sockptr_is_null(optval) && (ctx.optlen > max_optlen || ctx.optlen < 0)) { if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) { pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n", ctx.optlen, max_optlen); ret = retval; goto out; } ret = -EFAULT; goto out; } if (ctx.optlen != 0) { if (!sockptr_is_null(optval) && copy_to_sockptr(optval, ctx.optval, ctx.optlen)) { ret = -EFAULT; goto out; } if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) { ret = -EFAULT; goto out; } } out: sockopt_free_buf(&ctx, &buf); return ret; } int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, int optname, void *optval, int *optlen, int retval) { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); struct bpf_sockopt_kern ctx = { .sk = sk, .level = level, .optname = optname, .optlen = *optlen, .optval = optval, .optval_end = optval + *optlen, .current_task = current, }; int ret; /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy * user data back into BPF buffer when reval != 0. This is * done as an optimization to avoid extra copy, assuming * kernel won't populate the data in case of an error. * Here we always pass the data and memset() should * be called if that data shouldn't be "exported". */ ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, &ctx, bpf_prog_run, retval, NULL); if (ret < 0) return ret; if (ctx.optlen > *optlen) return -EFAULT; /* BPF programs can shrink the buffer, export the modifications. */ if (ctx.optlen != 0) *optlen = ctx.optlen; return ret; } #endif static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, size_t *lenp) { ssize_t tmp_ret = 0, ret; if (dir->header.parent) { tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); if (tmp_ret < 0) return tmp_ret; } ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); if (ret < 0) return ret; *bufp += ret; *lenp -= ret; ret += tmp_ret; /* Avoid leading slash. */ if (!ret) return ret; tmp_ret = strscpy(*bufp, "/", *lenp); if (tmp_ret < 0) return tmp_ret; *bufp += tmp_ret; *lenp -= tmp_ret; return ret + tmp_ret; } BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, size_t, buf_len, u64, flags) { ssize_t tmp_ret = 0, ret; if (!buf) return -EINVAL; if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { if (!ctx->head) return -EINVAL; tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); if (tmp_ret < 0) return tmp_ret; } ret = strscpy(buf, ctx->table->procname, buf_len); return ret < 0 ? ret : tmp_ret + ret; } static const struct bpf_func_proto bpf_sysctl_get_name_proto = { .func = bpf_sysctl_get_name, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; static int copy_sysctl_value(char *dst, size_t dst_len, char *src, size_t src_len) { if (!dst) return -EINVAL; if (!dst_len) return -E2BIG; if (!src || !src_len) { memset(dst, 0, dst_len); return -EINVAL; } memcpy(dst, src, min(dst_len, src_len)); if (dst_len > src_len) { memset(dst + src_len, '\0', dst_len - src_len); return src_len; } dst[dst_len - 1] = '\0'; return -E2BIG; } BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, char *, buf, size_t, buf_len) { return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); } static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { .func = bpf_sysctl_get_current_value, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, }; BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, size_t, buf_len) { if (!ctx->write) { if (buf && buf_len) memset(buf, '\0', buf_len); return -EINVAL; } return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); } static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { .func = bpf_sysctl_get_new_value, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE, }; BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, const char *, buf, size_t, buf_len) { if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) return -EINVAL; if (buf_len > PAGE_SIZE - 1) return -E2BIG; memcpy(ctx->new_val, buf, buf_len); ctx->new_len = buf_len; ctx->new_updated = 1; return 0; } static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { .func = bpf_sysctl_set_new_value, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, }; static const struct bpf_func_proto * sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { const struct bpf_func_proto *func_proto; func_proto = cgroup_common_func_proto(func_id, prog); if (func_proto) return func_proto; func_proto = cgroup_current_func_proto(func_id, prog); if (func_proto) return func_proto; switch (func_id) { case BPF_FUNC_sysctl_get_name: return &bpf_sysctl_get_name_proto; case BPF_FUNC_sysctl_get_current_value: return &bpf_sysctl_get_current_value_proto; case BPF_FUNC_sysctl_get_new_value: return &bpf_sysctl_get_new_value_proto; case BPF_FUNC_sysctl_set_new_value: return &bpf_sysctl_set_new_value_proto; case BPF_FUNC_ktime_get_coarse_ns: return &bpf_ktime_get_coarse_ns_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; default: return bpf_base_func_proto(func_id, prog); } } static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) return false; switch (off) { case bpf_ctx_range(struct bpf_sysctl, write): if (type != BPF_READ) return false; bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); case bpf_ctx_range(struct bpf_sysctl, file_pos): if (type == BPF_READ) { bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); } else { return size == size_default; } default: return false; } } static u32 sysctl_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; u32 read_size; switch (si->off) { case offsetof(struct bpf_sysctl, write): *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct bpf_sysctl_kern, write, sizeof_field(struct bpf_sysctl_kern, write), target_size)); break; case offsetof(struct bpf_sysctl, file_pos): /* ppos is a pointer so it should be accessed via indirect * loads and stores. Also for stores additional temporary * register is used since neither src_reg nor dst_reg can be * overridden. */ if (type == BPF_WRITE) { int treg = BPF_REG_9; if (si->src_reg == treg || si->dst_reg == treg) --treg; if (si->src_reg == treg || si->dst_reg == treg) --treg; *insn++ = BPF_STX_MEM( BPF_DW, si->dst_reg, treg, offsetof(struct bpf_sysctl_kern, tmp_reg)); *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), treg, si->dst_reg, offsetof(struct bpf_sysctl_kern, ppos)); *insn++ = BPF_RAW_INSN( BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32), treg, si->src_reg, bpf_ctx_narrow_access_offset( 0, sizeof(u32), sizeof(loff_t)), si->imm); *insn++ = BPF_LDX_MEM( BPF_DW, treg, si->dst_reg, offsetof(struct bpf_sysctl_kern, tmp_reg)); } else { *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), si->dst_reg, si->src_reg, offsetof(struct bpf_sysctl_kern, ppos)); read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->dst_reg, bpf_ctx_narrow_access_offset( 0, read_size, sizeof(loff_t))); } *target_size = sizeof(u32); break; } return insn - insn_buf; } const struct bpf_verifier_ops cg_sysctl_verifier_ops = { .get_func_proto = sysctl_func_proto, .is_valid_access = sysctl_is_valid_access, .convert_ctx_access = sysctl_convert_ctx_access, }; const struct bpf_prog_ops cg_sysctl_prog_ops = { }; #ifdef CONFIG_NET BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx) { const struct net *net = ctx ? sock_net(ctx->sk) : &init_net; return net->net_cookie; } static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = { .func = bpf_get_netns_cookie_sockopt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX_OR_NULL, }; #endif static const struct bpf_func_proto * cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { const struct bpf_func_proto *func_proto; func_proto = cgroup_common_func_proto(func_id, prog); if (func_proto) return func_proto; func_proto = cgroup_current_func_proto(func_id, prog); if (func_proto) return func_proto; switch (func_id) { #ifdef CONFIG_NET case BPF_FUNC_get_netns_cookie: return &bpf_get_netns_cookie_sockopt_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; case BPF_FUNC_setsockopt: if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT) return &bpf_sk_setsockopt_proto; return NULL; case BPF_FUNC_getsockopt: if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT) return &bpf_sk_getsockopt_proto; return NULL; #endif #ifdef CONFIG_INET case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; #endif case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; default: return bpf_base_func_proto(func_id, prog); } } static bool cg_sockopt_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sockopt)) return false; if (off % size != 0) return false; if (type == BPF_WRITE) { switch (off) { case offsetof(struct bpf_sockopt, retval): if (size != size_default) return false; return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; case offsetof(struct bpf_sockopt, optname): fallthrough; case offsetof(struct bpf_sockopt, level): if (size != size_default) return false; return prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT; case offsetof(struct bpf_sockopt, optlen): return size == size_default; default: return false; } } switch (off) { case offsetof(struct bpf_sockopt, sk): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_SOCKET; break; case offsetof(struct bpf_sockopt, optval): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_PACKET; break; case offsetof(struct bpf_sockopt, optval_end): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_PACKET_END; break; case offsetof(struct bpf_sockopt, retval): if (size != size_default) return false; return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; default: if (size != size_default) return false; break; } return true; } #define CG_SOCKOPT_READ_FIELD(F) \ BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ si->dst_reg, si->src_reg, \ offsetof(struct bpf_sockopt_kern, F)) #define CG_SOCKOPT_WRITE_FIELD(F) \ BPF_RAW_INSN((BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F) | \ BPF_MEM | BPF_CLASS(si->code)), \ si->dst_reg, si->src_reg, \ offsetof(struct bpf_sockopt_kern, F), \ si->imm) static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct bpf_sockopt, sk): *insn++ = CG_SOCKOPT_READ_FIELD(sk); break; case offsetof(struct bpf_sockopt, level): if (type == BPF_WRITE) *insn++ = CG_SOCKOPT_WRITE_FIELD(level); else *insn++ = CG_SOCKOPT_READ_FIELD(level); break; case offsetof(struct bpf_sockopt, optname): if (type == BPF_WRITE) *insn++ = CG_SOCKOPT_WRITE_FIELD(optname); else *insn++ = CG_SOCKOPT_READ_FIELD(optname); break; case offsetof(struct bpf_sockopt, optlen): if (type == BPF_WRITE) *insn++ = CG_SOCKOPT_WRITE_FIELD(optlen); else *insn++ = CG_SOCKOPT_READ_FIELD(optlen); break; case offsetof(struct bpf_sockopt, retval): BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0); if (type == BPF_WRITE) { int treg = BPF_REG_9; if (si->src_reg == treg || si->dst_reg == treg) --treg; if (si->src_reg == treg || si->dst_reg == treg) --treg; *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg, offsetof(struct bpf_sockopt_kern, tmp_reg)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task), treg, si->dst_reg, offsetof(struct bpf_sockopt_kern, current_task)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx), treg, treg, offsetof(struct task_struct, bpf_ctx)); *insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM | BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), treg, si->src_reg, offsetof(struct bpf_cg_run_ctx, retval), si->imm); *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg, offsetof(struct bpf_sockopt_kern, tmp_reg)); } else { *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task), si->dst_reg, si->src_reg, offsetof(struct bpf_sockopt_kern, current_task)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx), si->dst_reg, si->dst_reg, offsetof(struct task_struct, bpf_ctx)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), si->dst_reg, si->dst_reg, offsetof(struct bpf_cg_run_ctx, retval)); } break; case offsetof(struct bpf_sockopt, optval): *insn++ = CG_SOCKOPT_READ_FIELD(optval); break; case offsetof(struct bpf_sockopt, optval_end): *insn++ = CG_SOCKOPT_READ_FIELD(optval_end); break; } return insn - insn_buf; } static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { /* Nothing to do for sockopt argument. The data is kzalloc'ated. */ return 0; } const struct bpf_verifier_ops cg_sockopt_verifier_ops = { .get_func_proto = cg_sockopt_func_proto, .is_valid_access = cg_sockopt_is_valid_access, .convert_ctx_access = cg_sockopt_convert_ctx_access, .gen_prologue = cg_sockopt_get_prologue, }; const struct bpf_prog_ops cg_sockopt_prog_ops = { }; /* Common helpers for cgroup hooks. */ const struct bpf_func_proto * cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; case BPF_FUNC_get_retval: switch (prog->expected_attach_type) { case BPF_CGROUP_INET_INGRESS: case BPF_CGROUP_INET_EGRESS: case BPF_CGROUP_SOCK_OPS: case BPF_CGROUP_UDP4_RECVMSG: case BPF_CGROUP_UDP6_RECVMSG: case BPF_CGROUP_UNIX_RECVMSG: case BPF_CGROUP_INET4_GETPEERNAME: case BPF_CGROUP_INET6_GETPEERNAME: case BPF_CGROUP_UNIX_GETPEERNAME: case BPF_CGROUP_INET4_GETSOCKNAME: case BPF_CGROUP_INET6_GETSOCKNAME: case BPF_CGROUP_UNIX_GETSOCKNAME: return NULL; default: return &bpf_get_retval_proto; } case BPF_FUNC_set_retval: switch (prog->expected_attach_type) { case BPF_CGROUP_INET_INGRESS: case BPF_CGROUP_INET_EGRESS: case BPF_CGROUP_SOCK_OPS: case BPF_CGROUP_UDP4_RECVMSG: case BPF_CGROUP_UDP6_RECVMSG: case BPF_CGROUP_UNIX_RECVMSG: case BPF_CGROUP_INET4_GETPEERNAME: case BPF_CGROUP_INET6_GETPEERNAME: case BPF_CGROUP_UNIX_GETPEERNAME: case BPF_CGROUP_INET4_GETSOCKNAME: case BPF_CGROUP_INET6_GETSOCKNAME: case BPF_CGROUP_UNIX_GETSOCKNAME: return NULL; default: return &bpf_set_retval_proto; } default: return NULL; } } /* Common helpers for cgroup hooks with valid process context. */ const struct bpf_func_proto * cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_get_current_uid_gid: return &bpf_get_current_uid_gid_proto; case BPF_FUNC_get_current_comm: return &bpf_get_current_comm_proto; #ifdef CONFIG_CGROUP_NET_CLASSID case BPF_FUNC_get_cgroup_classid: return &bpf_get_cgroup_classid_curr_proto; #endif case BPF_FUNC_current_task_under_cgroup: return &bpf_current_task_under_cgroup_proto; default: return NULL; } } |
48 149 18 18 18 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 | // SPDX-License-Identifier: GPL-2.0-only /* dummy.c: a dummy net driver The purpose of this driver is to provide a device to point a route through, but not to actually transmit packets. Why? If you have a machine whose only connection is an occasional PPP/SLIP/PLIP link, you can only connect to your own hostname when the link is up. Otherwise you have to use localhost. This isn't very consistent. One solution is to set up a dummy link using PPP/SLIP/PLIP, but this seems (to me) too much overhead for too little gain. This driver provides a small alternative. Thus you can do [when not running slip] ifconfig dummy slip.addr.ess.here up [to go to slip] ifconfig dummy down dip whatever This was written by looking at Donald Becker's skeleton driver and the loopback driver. I then threw away anything that didn't apply! Thanks to Alan Cox for the key clue on what to do with misguided packets. Nick Holloway, 27th May 1994 [I tweaked this explanation a little but that's all] Alan Cox, 30th May 1994 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/rtnetlink.h> #include <linux/net_tstamp.h> #include <net/netdev_lock.h> #include <net/rtnetlink.h> #include <linux/u64_stats_sync.h> #define DRV_NAME "dummy" static int numdummies = 1; /* fake multicast ability */ static void set_multicast_list(struct net_device *dev) { } static void dummy_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { dev_lstats_read(dev, &stats->tx_packets, &stats->tx_bytes); } static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) { dev_lstats_add(dev, skb->len); skb_tx_timestamp(skb); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int dummy_dev_init(struct net_device *dev) { dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS; netdev_lockdep_set_classes(dev); return 0; } static int dummy_change_carrier(struct net_device *dev, bool new_carrier) { if (new_carrier) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, .ndo_start_xmit = dummy_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = dummy_get_stats64, .ndo_change_carrier = dummy_change_carrier, }; static const struct ethtool_ops dummy_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static void dummy_setup(struct net_device *dev) { ether_setup(dev); /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; dev->needs_free_netdev = true; dev->request_ops_lock = true; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->lltx = true; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; dev->features |= NETIF_F_GSO_SOFTWARE; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; dev->features |= NETIF_F_GSO_ENCAP_ALL; dev->hw_features |= dev->features; dev->hw_enc_features |= dev->features; eth_hw_addr_random(dev); dev->min_mtu = 0; dev->max_mtu = 0; } static int dummy_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static struct rtnl_link_ops dummy_link_ops __read_mostly = { .kind = DRV_NAME, .setup = dummy_setup, .validate = dummy_validate, }; /* Number of dummy devices to be set up by this module. */ module_param(numdummies, int, 0); MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); static int __init dummy_init_one(void) { struct net_device *dev_dummy; int err; dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_ENUM, dummy_setup); if (!dev_dummy) return -ENOMEM; dev_dummy->rtnl_link_ops = &dummy_link_ops; err = register_netdevice(dev_dummy); if (err < 0) goto err; return 0; err: free_netdev(dev_dummy); return err; } static int __init dummy_init_module(void) { int i, err = 0; err = rtnl_link_register(&dummy_link_ops); if (err < 0) return err; rtnl_net_lock(&init_net); for (i = 0; i < numdummies && !err; i++) { err = dummy_init_one(); cond_resched(); } rtnl_net_unlock(&init_net); if (err < 0) rtnl_link_unregister(&dummy_link_ops); return err; } static void __exit dummy_cleanup_module(void) { rtnl_link_unregister(&dummy_link_ops); } module_init(dummy_init_module); module_exit(dummy_cleanup_module); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Dummy netdevice driver which discards all packets sent to it"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); |
14 13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> */ #include <media/dvb_frontend.h> #include "as102_fe.h" struct as102_state { struct dvb_frontend frontend; struct as10x_demod_stats demod_stats; const struct as102_fe_ops *ops; void *priv; uint8_t elna_cfg; /* signal strength */ uint16_t signal_strength; /* bit error rate */ uint32_t ber; }; static uint8_t as102_fe_get_code_rate(enum fe_code_rate arg) { uint8_t c; switch (arg) { case FEC_1_2: c = CODE_RATE_1_2; break; case FEC_2_3: c = CODE_RATE_2_3; break; case FEC_3_4: c = CODE_RATE_3_4; break; case FEC_5_6: c = CODE_RATE_5_6; break; case FEC_7_8: c = CODE_RATE_7_8; break; default: c = CODE_RATE_UNKNOWN; break; } return c; } static int as102_fe_set_frontend(struct dvb_frontend *fe) { struct as102_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct as10x_tune_args tune_args = { 0 }; /* set frequency */ tune_args.freq = c->frequency / 1000; /* fix interleaving_mode */ tune_args.interleaving_mode = INTLV_NATIVE; switch (c->bandwidth_hz) { case 8000000: tune_args.bandwidth = BW_8_MHZ; break; case 7000000: tune_args.bandwidth = BW_7_MHZ; break; case 6000000: tune_args.bandwidth = BW_6_MHZ; break; default: tune_args.bandwidth = BW_8_MHZ; } switch (c->guard_interval) { case GUARD_INTERVAL_1_32: tune_args.guard_interval = GUARD_INT_1_32; break; case GUARD_INTERVAL_1_16: tune_args.guard_interval = GUARD_INT_1_16; break; case GUARD_INTERVAL_1_8: tune_args.guard_interval = GUARD_INT_1_8; break; case GUARD_INTERVAL_1_4: tune_args.guard_interval = GUARD_INT_1_4; break; case GUARD_INTERVAL_AUTO: default: tune_args.guard_interval = GUARD_UNKNOWN; break; } switch (c->modulation) { case QPSK: tune_args.modulation = CONST_QPSK; break; case QAM_16: tune_args.modulation = CONST_QAM16; break; case QAM_64: tune_args.modulation = CONST_QAM64; break; default: tune_args.modulation = CONST_UNKNOWN; break; } switch (c->transmission_mode) { case TRANSMISSION_MODE_2K: tune_args.transmission_mode = TRANS_MODE_2K; break; case TRANSMISSION_MODE_8K: tune_args.transmission_mode = TRANS_MODE_8K; break; default: tune_args.transmission_mode = TRANS_MODE_UNKNOWN; } switch (c->hierarchy) { case HIERARCHY_NONE: tune_args.hierarchy = HIER_NONE; break; case HIERARCHY_1: tune_args.hierarchy = HIER_ALPHA_1; break; case HIERARCHY_2: tune_args.hierarchy = HIER_ALPHA_2; break; case HIERARCHY_4: tune_args.hierarchy = HIER_ALPHA_4; break; case HIERARCHY_AUTO: tune_args.hierarchy = HIER_UNKNOWN; break; } pr_debug("as102: tuner parameters: freq: %d bw: 0x%02x gi: 0x%02x\n", c->frequency, tune_args.bandwidth, tune_args.guard_interval); /* * Detect a hierarchy selection * if HP/LP are both set to FEC_NONE, HP will be selected. */ if ((tune_args.hierarchy != HIER_NONE) && ((c->code_rate_LP == FEC_NONE) || (c->code_rate_HP == FEC_NONE))) { if (c->code_rate_LP == FEC_NONE) { tune_args.hier_select = HIER_HIGH_PRIORITY; tune_args.code_rate = as102_fe_get_code_rate(c->code_rate_HP); } if (c->code_rate_HP == FEC_NONE) { tune_args.hier_select = HIER_LOW_PRIORITY; tune_args.code_rate = as102_fe_get_code_rate(c->code_rate_LP); } pr_debug("as102: \thierarchy: 0x%02x selected: %s code_rate_%s: 0x%02x\n", tune_args.hierarchy, tune_args.hier_select == HIER_HIGH_PRIORITY ? "HP" : "LP", tune_args.hier_select == HIER_HIGH_PRIORITY ? "HP" : "LP", tune_args.code_rate); } else { tune_args.code_rate = as102_fe_get_code_rate(c->code_rate_HP); } /* Set frontend arguments */ return state->ops->set_tune(state->priv, &tune_args); } static int as102_fe_get_frontend(struct dvb_frontend *fe, struct dtv_frontend_properties *c) { struct as102_state *state = fe->demodulator_priv; int ret = 0; struct as10x_tps tps = { 0 }; /* send abilis command: GET_TPS */ ret = state->ops->get_tps(state->priv, &tps); if (ret < 0) return ret; /* extract constellation */ switch (tps.modulation) { case CONST_QPSK: c->modulation = QPSK; break; case CONST_QAM16: c->modulation = QAM_16; break; case CONST_QAM64: c->modulation = QAM_64; break; } /* extract hierarchy */ switch (tps.hierarchy) { case HIER_NONE: c->hierarchy = HIERARCHY_NONE; break; case HIER_ALPHA_1: c->hierarchy = HIERARCHY_1; break; case HIER_ALPHA_2: c->hierarchy = HIERARCHY_2; break; case HIER_ALPHA_4: c->hierarchy = HIERARCHY_4; break; } /* extract code rate HP */ switch (tps.code_rate_HP) { case CODE_RATE_1_2: c->code_rate_HP = FEC_1_2; break; case CODE_RATE_2_3: c->code_rate_HP = FEC_2_3; break; case CODE_RATE_3_4: c->code_rate_HP = FEC_3_4; break; case CODE_RATE_5_6: c->code_rate_HP = FEC_5_6; break; case CODE_RATE_7_8: c->code_rate_HP = FEC_7_8; break; } /* extract code rate LP */ switch (tps.code_rate_LP) { case CODE_RATE_1_2: c->code_rate_LP = FEC_1_2; break; case CODE_RATE_2_3: c->code_rate_LP = FEC_2_3; break; case CODE_RATE_3_4: c->code_rate_LP = FEC_3_4; break; case CODE_RATE_5_6: c->code_rate_LP = FEC_5_6; break; case CODE_RATE_7_8: c->code_rate_LP = FEC_7_8; break; } /* extract guard interval */ switch (tps.guard_interval) { case GUARD_INT_1_32: c->guard_interval = GUARD_INTERVAL_1_32; break; case GUARD_INT_1_16: c->guard_interval = GUARD_INTERVAL_1_16; break; case GUARD_INT_1_8: c->guard_interval = GUARD_INTERVAL_1_8; break; case GUARD_INT_1_4: c->guard_interval = GUARD_INTERVAL_1_4; break; } /* extract transmission mode */ switch (tps.transmission_mode) { case TRANS_MODE_2K: c->transmission_mode = TRANSMISSION_MODE_2K; break; case TRANS_MODE_8K: c->transmission_mode = TRANSMISSION_MODE_8K; break; } return 0; } static int as102_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *settings) { settings->min_delay_ms = 1000; return 0; } static int as102_fe_read_status(struct dvb_frontend *fe, enum fe_status *status) { int ret = 0; struct as102_state *state = fe->demodulator_priv; struct as10x_tune_status tstate = { 0 }; /* send abilis command: GET_TUNE_STATUS */ ret = state->ops->get_status(state->priv, &tstate); if (ret < 0) return ret; state->signal_strength = tstate.signal_strength; state->ber = tstate.BER; switch (tstate.tune_state) { case TUNE_STATUS_SIGNAL_DVB_OK: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER; break; case TUNE_STATUS_STREAM_DETECTED: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC | FE_HAS_VITERBI; break; case TUNE_STATUS_STREAM_TUNED: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC | FE_HAS_LOCK | FE_HAS_VITERBI; break; default: *status = TUNE_STATUS_NOT_TUNED; } pr_debug("as102: tuner status: 0x%02x, strength %d, per: %d, ber: %d\n", tstate.tune_state, tstate.signal_strength, tstate.PER, tstate.BER); if (!(*status & FE_HAS_LOCK)) { memset(&state->demod_stats, 0, sizeof(state->demod_stats)); return 0; } ret = state->ops->get_stats(state->priv, &state->demod_stats); if (ret < 0) memset(&state->demod_stats, 0, sizeof(state->demod_stats)); return ret; } /* * Note: * - in AS102 SNR=MER * - the SNR will be returned in linear terms, i.e. not in dB * - the accuracy equals ±2dB for a SNR range from 4dB to 30dB * - the accuracy is >2dB for SNR values outside this range */ static int as102_fe_read_snr(struct dvb_frontend *fe, u16 *snr) { struct as102_state *state = fe->demodulator_priv; *snr = state->demod_stats.mer; return 0; } static int as102_fe_read_ber(struct dvb_frontend *fe, u32 *ber) { struct as102_state *state = fe->demodulator_priv; *ber = state->ber; return 0; } static int as102_fe_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct as102_state *state = fe->demodulator_priv; *strength = (((0xffff * 400) * state->signal_strength + 41000) * 2); return 0; } static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct as102_state *state = fe->demodulator_priv; if (state->demod_stats.has_started) *ucblocks = state->demod_stats.bad_frame_count; else *ucblocks = 0; return 0; } static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire) { struct as102_state *state = fe->demodulator_priv; return state->ops->stream_ctrl(state->priv, acquire, state->elna_cfg); } static void as102_fe_release(struct dvb_frontend *fe) { struct as102_state *state = fe->demodulator_priv; kfree(state); } static const struct dvb_frontend_ops as102_fe_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Abilis AS102 DVB-T", .frequency_min_hz = 174 * MHz, .frequency_max_hz = 862 * MHz, .frequency_stepsize_hz = 166667, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QPSK | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .set_frontend = as102_fe_set_frontend, .get_frontend = as102_fe_get_frontend, .get_tune_settings = as102_fe_get_tune_settings, .read_status = as102_fe_read_status, .read_snr = as102_fe_read_snr, .read_ber = as102_fe_read_ber, .read_signal_strength = as102_fe_read_signal_strength, .read_ucblocks = as102_fe_read_ucblocks, .ts_bus_ctrl = as102_fe_ts_bus_ctrl, .release = as102_fe_release, }; struct dvb_frontend *as102_attach(const char *name, const struct as102_fe_ops *ops, void *priv, uint8_t elna_cfg) { struct as102_state *state; struct dvb_frontend *fe; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; fe = &state->frontend; fe->demodulator_priv = state; state->ops = ops; state->priv = priv; state->elna_cfg = elna_cfg; /* init frontend callback ops */ memcpy(&fe->ops, &as102_fe_ops, sizeof(struct dvb_frontend_ops)); strscpy(fe->ops.info.name, name, sizeof(fe->ops.info.name)); return fe; } EXPORT_SYMBOL_GPL(as102_attach); MODULE_DESCRIPTION("as102-fe"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierrick Hascoet <pierrick.hascoet@abilis.com>"); |
6 1 3 6 4 1 3 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 | // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/list.h> #include "adf_cfg.h" #include "adf_common_drv.h" static LIST_HEAD(accel_table); static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); static u32 num_devices; static u8 id_map[ADF_MAX_DEVICES]; struct vf_id_map { u32 bdf; u32 id; u32 fake_id; bool attached; struct list_head list; }; static int adf_get_vf_id(struct adf_accel_dev *vf) { return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) + PCI_FUNC(accel_to_pci_dev(vf)->devfn) + (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)); } static int adf_get_vf_num(struct adf_accel_dev *vf) { return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf); } static struct vf_id_map *adf_find_vf(u32 bdf) { struct list_head *itr; list_for_each(itr, &vfs_table) { struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); if (ptr->bdf == bdf) return ptr; } return NULL; } static int adf_get_vf_real_id(u32 fake) { struct list_head *itr; list_for_each(itr, &vfs_table) { struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); if (ptr->fake_id == fake) return ptr->id; } return -1; } /** * adf_clean_vf_map() - Cleans VF id mappings * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs * * Function cleans internal ids for virtual functions. */ void adf_clean_vf_map(bool vf) { struct vf_id_map *map; struct list_head *ptr, *tmp; mutex_lock(&table_lock); list_for_each_safe(ptr, tmp, &vfs_table) { map = list_entry(ptr, struct vf_id_map, list); if (map->bdf != -1) { id_map[map->id] = 0; num_devices--; } if (vf && map->bdf == -1) continue; list_del(ptr); kfree(map); } mutex_unlock(&table_lock); } EXPORT_SYMBOL_GPL(adf_clean_vf_map); /** * adf_devmgr_update_class_index() - Update internal index * @hw_data: Pointer to internal device data. * * Function updates internal dev index for VFs */ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) { struct adf_hw_device_class *class = hw_data->dev_class; struct list_head *itr; int i = 0; list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr->hw_device->dev_class == class) ptr->hw_device->instance_id = i++; if (i == class->instances) break; } } EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); static unsigned int adf_find_free_id(void) { unsigned int i; for (i = 0; i < ADF_MAX_DEVICES; i++) { if (!id_map[i]) { id_map[i] = 1; return i; } } return ADF_MAX_DEVICES + 1; } /** * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework * @accel_dev: Pointer to acceleration device. * @pf: Corresponding PF if the accel_dev is a VF * * Function adds acceleration device to the acceleration framework. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf) { struct list_head *itr; int ret = 0; if (num_devices == ADF_MAX_DEVICES) { dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", ADF_MAX_DEVICES); return -EFAULT; } mutex_lock(&table_lock); atomic_set(&accel_dev->ref_count, 0); /* PF on host or VF on guest - optimized to remove redundant is_vf */ if (!accel_dev->is_vf || !pf) { struct vf_id_map *map; list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr == accel_dev) { ret = -EEXIST; goto unlock; } } list_add_tail(&accel_dev->list, &accel_table); accel_dev->accel_id = adf_find_free_id(); if (accel_dev->accel_id > ADF_MAX_DEVICES) { ret = -EFAULT; goto unlock; } num_devices++; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { ret = -ENOMEM; goto unlock; } map->bdf = ~0; map->id = accel_dev->accel_id; map->fake_id = map->id; map->attached = true; list_add_tail(&map->list, &vfs_table); } else if (accel_dev->is_vf && pf) { /* VF on host */ struct vf_id_map *map; map = adf_find_vf(adf_get_vf_num(accel_dev)); if (map) { struct vf_id_map *next; accel_dev->accel_id = map->id; list_add_tail(&accel_dev->list, &accel_table); map->fake_id++; map->attached = true; next = list_next_entry(map, list); while (next && &next->list != &vfs_table) { next->fake_id++; next = list_next_entry(next, list); } ret = 0; goto unlock; } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { ret = -ENOMEM; goto unlock; } accel_dev->accel_id = adf_find_free_id(); if (accel_dev->accel_id > ADF_MAX_DEVICES) { kfree(map); ret = -EFAULT; goto unlock; } num_devices++; list_add_tail(&accel_dev->list, &accel_table); map->bdf = adf_get_vf_num(accel_dev); map->id = accel_dev->accel_id; map->fake_id = map->id; map->attached = true; list_add_tail(&map->list, &vfs_table); } mutex_init(&accel_dev->state_lock); unlock: mutex_unlock(&table_lock); return ret; } EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); struct list_head *adf_devmgr_get_head(void) { return &accel_table; } /** * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. * @accel_dev: Pointer to acceleration device. * @pf: Corresponding PF if the accel_dev is a VF * * Function removes acceleration device from the acceleration framework. * To be used by QAT device specific drivers. * * Return: void */ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf) { mutex_lock(&table_lock); /* PF on host or VF on guest - optimized to remove redundant is_vf */ if (!accel_dev->is_vf || !pf) { id_map[accel_dev->accel_id] = 0; num_devices--; } else if (accel_dev->is_vf && pf) { struct vf_id_map *map, *next; map = adf_find_vf(adf_get_vf_num(accel_dev)); if (!map) { dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n"); goto unlock; } map->fake_id--; map->attached = false; next = list_next_entry(map, list); while (next && &next->list != &vfs_table) { next->fake_id--; next = list_next_entry(next, list); } } unlock: mutex_destroy(&accel_dev->state_lock); list_del(&accel_dev->list); mutex_unlock(&table_lock); } EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); /** * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev. * @pci_dev: Pointer to PCI device. * * Function returns acceleration device associated with the given PCI device. * To be used by QAT device specific drivers. * * Return: pointer to accel_dev or NULL if not found. */ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) { struct list_head *itr; mutex_lock(&table_lock); list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr->accel_pci_dev.pci_dev == pci_dev) { mutex_unlock(&table_lock); return ptr; } } mutex_unlock(&table_lock); return NULL; } EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id) { struct list_head *itr; int real_id; mutex_lock(&table_lock); real_id = adf_get_vf_real_id(id); if (real_id < 0) goto unlock; id = real_id; list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr->accel_id == id) { mutex_unlock(&table_lock); return ptr; } } unlock: mutex_unlock(&table_lock); return NULL; } int adf_devmgr_verify_id(u32 id) { if (id == ADF_CFG_ALL_DEVICES) return 0; if (adf_devmgr_get_dev_by_id(id)) return 0; return -ENODEV; } static int adf_get_num_dettached_vfs(void) { struct list_head *itr; int vfs = 0; mutex_lock(&table_lock); list_for_each(itr, &vfs_table) { struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); if (ptr->bdf != ~0 && !ptr->attached) vfs++; } mutex_unlock(&table_lock); return vfs; } void adf_devmgr_get_num_dev(u32 *num) { *num = num_devices - adf_get_num_dettached_vfs(); } /** * adf_dev_in_use() - Check whether accel_dev is currently in use * @accel_dev: Pointer to acceleration device. * * To be used by QAT device specific drivers. * * Return: 1 when device is in use, 0 otherwise. */ int adf_dev_in_use(struct adf_accel_dev *accel_dev) { return atomic_read(&accel_dev->ref_count) != 0; } EXPORT_SYMBOL_GPL(adf_dev_in_use); /** * adf_dev_get() - Increment accel_dev reference count * @accel_dev: Pointer to acceleration device. * * Increment the accel_dev refcount and if this is the first time * incrementing it during this period the accel_dev is in use, * increment the module refcount too. * To be used by QAT device specific drivers. * * Return: 0 when successful, EFAULT when fail to bump module refcount */ int adf_dev_get(struct adf_accel_dev *accel_dev) { if (atomic_add_return(1, &accel_dev->ref_count) == 1) if (!try_module_get(accel_dev->owner)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(adf_dev_get); /** * adf_dev_put() - Decrement accel_dev reference count * @accel_dev: Pointer to acceleration device. * * Decrement the accel_dev refcount and if this is the last time * decrementing it during this period the accel_dev is in use, * decrement the module refcount too. * To be used by QAT device specific drivers. * * Return: void */ void adf_dev_put(struct adf_accel_dev *accel_dev) { if (atomic_sub_return(1, &accel_dev->ref_count) == 0) module_put(accel_dev->owner); } EXPORT_SYMBOL_GPL(adf_dev_put); /** * adf_devmgr_in_reset() - Check whether device is in reset * @accel_dev: Pointer to acceleration device. * * To be used by QAT device specific drivers. * * Return: 1 when the device is being reset, 0 otherwise. */ int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) { return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); } EXPORT_SYMBOL_GPL(adf_devmgr_in_reset); /** * adf_dev_started() - Check whether device has started * @accel_dev: Pointer to acceleration device. * * To be used by QAT device specific drivers. * * Return: 1 when the device has started, 0 otherwise */ int adf_dev_started(struct adf_accel_dev *accel_dev) { return test_bit(ADF_STATUS_STARTED, &accel_dev->status); } EXPORT_SYMBOL_GPL(adf_dev_started); |
1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 | // SPDX-License-Identifier: GPL-2.0-only /* DVB USB framework compliant Linux driver for the * DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101, * TeVii S421, S480, S482, S600, S630, S632, S650, S660, S662, * Prof 1100, 7500, * Geniatech SU3000, T220, * TechnoTrend S2-4600, * Terratec Cinergy S2 cards * Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include <media/dvb-usb-ids.h> #include "dw2102.h" #include "si21xx.h" #include "stv0299.h" #include "z0194a.h" #include "stv0288.h" #include "stb6000.h" #include "eds1547.h" #include "cx24116.h" #include "tda1002x.h" #include "mt312.h" #include "zl10039.h" #include "ts2020.h" #include "ds3000.h" #include "stv0900.h" #include "stv6110.h" #include "stb6100.h" #include "stb6100_proc.h" #include "m88rs2000.h" #include "tda18271.h" #include "cxd2820r.h" #include "m88ds3103.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 #define DW210X_READ_MSG 0 #define DW210X_WRITE_MSG 1 #define REG_1F_SYMBOLRATE_BYTE0 0x1f #define REG_20_SYMBOLRATE_BYTE1 0x20 #define REG_21_SYMBOLRATE_BYTE2 0x21 /* on my own*/ #define DW2102_VOLTAGE_CTRL (0x1800) #define SU3000_STREAM_CTRL (0x1900) #define DW2102_RC_QUERY (0x1a00) #define DW2102_LED_CTRL (0x1b00) #define DW2101_FIRMWARE "dvb-usb-dw2101.fw" #define DW2102_FIRMWARE "dvb-usb-dw2102.fw" #define DW2104_FIRMWARE "dvb-usb-dw2104.fw" #define DW3101_FIRMWARE "dvb-usb-dw3101.fw" #define S630_FIRMWARE "dvb-usb-s630.fw" #define S660_FIRMWARE "dvb-usb-s660.fw" #define P1100_FIRMWARE "dvb-usb-p1100.fw" #define P7500_FIRMWARE "dvb-usb-p7500.fw" #define err_str "did not find the firmware file '%s'. You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware" struct dw2102_state { u8 initialized; u8 last_lock; u8 data[MAX_XFER_SIZE + 4]; struct i2c_client *i2c_client_demod; struct i2c_client *i2c_client_tuner; /* fe hook functions*/ int (*old_set_voltage)(struct dvb_frontend *f, enum fe_sec_voltage v); int (*fe_read_status)(struct dvb_frontend *fe, enum fe_status *status); }; /* debug */ static int dvb_usb_dw2102_debug; module_param_named(debug, dvb_usb_dw2102_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))." DVB_USB_DEBUG_STATUS); /* demod probe */ static int demod_probe = 1; module_param_named(demod, demod_probe, int, 0644); MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 4=stv0903+stb6100(or-able))."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value, u16 index, u8 *data, u16 len, int flags) { int ret; u8 *u8buf; unsigned int pipe = (flags == DW210X_READ_MSG) ? usb_rcvctrlpipe(dev, 0) : usb_sndctrlpipe(dev, 0); u8 request_type = (flags == DW210X_READ_MSG) ? USB_DIR_IN : USB_DIR_OUT; u8buf = kmalloc(len, GFP_KERNEL); if (!u8buf) return -ENOMEM; if (flags == DW210X_WRITE_MSG) memcpy(u8buf, data, len); ret = usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR, value, index, u8buf, len, 2000); if (flags == DW210X_READ_MSG) memcpy(data, u8buf, len); kfree(u8buf); return ret; } /* I2C */ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0; u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0}; u16 value; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: if (msg[0].len < 1) { num = -EOPNOTSUPP; break; } /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { dw210x_op_rw(d->udev, 0xb5, value + i, 0, buf6, 2, DW210X_READ_MSG); msg[1].buf[i] = buf6[0]; } break; case 1: switch (msg[0].addr) { case 0x68: if (msg[0].len < 2) { num = -EOPNOTSUPP; break; } /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; buf6[2] = msg[0].buf[1]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 3, DW210X_WRITE_MSG); break; case 0x60: if (msg[0].flags == 0) { if (msg[0].len < 4) { num = -EOPNOTSUPP; break; } /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; buf6[2] = 0xc0; buf6[3] = msg[0].buf[0]; buf6[4] = msg[0].buf[1]; buf6[5] = msg[0].buf[2]; buf6[6] = msg[0].buf[3]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { if (msg[0].len < 1) { num = -EOPNOTSUPP; break; } /* read from tuner */ dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; } break; case (DW2102_RC_QUERY): if (msg[0].len < 2) { num = -EOPNOTSUPP; break; } dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): if (msg[0].len < 1) { num = -EOPNOTSUPP; break; } buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); u8 buf6[] = {0, 0, 0, 0, 0, 0, 0}; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: if (msg[0].len != 1) { warn("i2c rd: len=%d is not 1!\n", msg[0].len); num = -EOPNOTSUPP; break; } if (2 + msg[1].len > sizeof(buf6)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); num = -EOPNOTSUPP; break; } /* read si2109 register by number */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; buf6[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); /* read si2109 register */ dw210x_op_rw(d->udev, 0xc3, 0xd0, 0, buf6, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, buf6 + 2, msg[1].len); break; case 1: switch (msg[0].addr) { case 0x68: if (2 + msg[0].len > sizeof(buf6)) { warn("i2c wr: len=%d is too big!\n", msg[0].len); num = -EOPNOTSUPP; break; } /* write to si2109 register */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; memcpy(buf6 + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); break; case(DW2102_RC_QUERY): dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case(DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[MAX_XFER_SIZE], obuf[3]; if (2 + msg[0].len != sizeof(obuf)) { warn("i2c rd: len=%d is not 1!\n", msg[0].len); ret = -EOPNOTSUPP; goto unlock; } if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ dw210x_op_rw(d->udev, 0xc3, 0xd1, 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x68: { /* write to register */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case 0x61: { /* write to tuner */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf, 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } } break; } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int len, i, j, ret; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[j].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /* case 0x55: cx24116 * case 0x6a: stv0903 * case 0x68: ds3000, stv0903 * case 0x60: ts2020, stv6110, stb6100 */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 2, msg[j].len); mdelay(10); } else if (((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) || ((msg[j].buf[0] == 0xf7) && (msg[j].addr == 0x55))) { /* write firmware */ u8 obuf[19]; obuf[0] = msg[j].addr << 1; obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len); obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else { /* write registers */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); } break; } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; int i; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[MAX_XFER_SIZE], obuf[3]; if (2 + msg[0].len != sizeof(obuf)) { warn("i2c rd: len=%d is not 1!\n", msg[0].len); ret = -EOPNOTSUPP; goto unlock; } if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ dw210x_op_rw(d->udev, 0xc3, 0x19, 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x60: case 0x0c: { /* write to register */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[0].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf, 2); break; } } break; } for (i = 0; i < num; i++) { deb_xfer("%02x:%02x: %s ", i, msg[i].addr, msg[i].flags == 0 ? ">>>" : "<<<"); debug_dump(msg[i].buf, msg[i].len, deb_xfer); } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_device *udev; int len, i, j, ret; if (!d) return -ENODEV; udev = d->udev; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case (DW2102_RC_QUERY): { u8 ibuf[5]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 5, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 3, 2); break; } case (DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 1; obuf[1] = msg[j].buf[1];/* off-on */ dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); obuf[0] = 3; obuf[1] = msg[j].buf[0];/* 13v-18v */ dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } case (DW2102_LED_CTRL): { u8 obuf[2]; obuf[0] = 5; obuf[1] = msg[j].buf[0]; dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /* case 0x55: cx24116 * case 0x6a: stv0903 * case 0x68: ds3000, stv0903, rs2000 * case 0x60: ts2020, stv6110, stb6100 * case 0xa0: eeprom */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[MAX_XFER_SIZE]; if (msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, msg[j].len); break; } else if ((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) { /* write firmware */ u8 obuf[19]; obuf[0] = (msg[j].len > 16 ? 18 : msg[j].len + 1); obuf[1] = msg[j].addr << 1; obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else if (j < (num - 1)) { /* write register addr before read */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, le16_to_cpu(udev->descriptor.idProduct) == 0x7500 ? 0x92 : 0x90, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } else { /* write registers */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } break; } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dw2102_state *state; int j; if (!d) return -ENODEV; state = d->priv; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; if (mutex_lock_interruptible(&d->data_mutex) < 0) { mutex_unlock(&d->i2c_mutex); return -EAGAIN; } j = 0; while (j < num) { switch (msg[j].addr) { case SU3000_STREAM_CTRL: state->data[0] = msg[j].buf[0] + 0x36; state->data[1] = 3; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 0, 0) < 0) err("i2c transfer failed."); break; case DW2102_RC_QUERY: state->data[0] = 0x10; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 2, 0) < 0) err("i2c transfer failed."); msg[j].buf[1] = state->data[0]; msg[j].buf[0] = state->data[1]; break; default: /* if the current write msg is followed by a another * read msg to/from the same address */ if ((j + 1 < num) && (msg[j + 1].flags & I2C_M_RD) && (msg[j].addr == msg[j + 1].addr)) { /* join both i2c msgs to one usb read command */ if (4 + msg[j].len > sizeof(state->data)) { warn("i2c combined wr/rd: write len=%d is too big!\n", msg[j].len); num = -EOPNOTSUPP; break; } if (1 + msg[j + 1].len > sizeof(state->data)) { warn("i2c combined wr/rd: read len=%d is too big!\n", msg[j + 1].len); num = -EOPNOTSUPP; break; } state->data[0] = 0x09; state->data[1] = msg[j].len; state->data[2] = msg[j + 1].len; state->data[3] = msg[j].addr; memcpy(&state->data[4], msg[j].buf, msg[j].len); if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4, state->data, msg[j + 1].len + 1, 0) < 0) err("i2c transfer failed."); memcpy(msg[j + 1].buf, &state->data[1], msg[j + 1].len); j++; break; } if (msg[j].flags & I2C_M_RD) { /* single read */ if (4 + msg[j].len > sizeof(state->data)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); num = -EOPNOTSUPP; break; } state->data[0] = 0x09; state->data[1] = 0; state->data[2] = msg[j].len; state->data[3] = msg[j].addr; memcpy(&state->data[4], msg[j].buf, msg[j].len); if (dvb_usb_generic_rw(d, state->data, 4, state->data, msg[j].len + 1, 0) < 0) err("i2c transfer failed."); memcpy(msg[j].buf, &state->data[1], msg[j].len); break; } /* single write */ if (3 + msg[j].len > sizeof(state->data)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); num = -EOPNOTSUPP; break; } state->data[0] = 0x08; state->data[1] = msg[j].addr; state->data[2] = msg[j].len; memcpy(&state->data[3], msg[j].buf, msg[j].len); if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3, state->data, 1, 0) < 0) err("i2c transfer failed."); } // switch j++; } // while mutex_unlock(&d->data_mutex); mutex_unlock(&d->i2c_mutex); return num; } static u32 dw210x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static const struct i2c_algorithm dw2102_i2c_algo = { .master_xfer = dw2102_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw2102_serit_i2c_algo = { .master_xfer = dw2102_serit_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw2102_earda_i2c_algo = { .master_xfer = dw2102_earda_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw2104_i2c_algo = { .master_xfer = dw2104_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw3101_i2c_algo = { .master_xfer = dw3101_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm s6x0_i2c_algo = { .master_xfer = s6x0_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm su3000_i2c_algo = { .master_xfer = su3000_i2c_transfer, .functionality = dw210x_i2c_func, }; static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 ibuf[] = {0, 0}; u8 eeprom[256], eepromline[16]; for (i = 0; i < 256; i++) { if (dw210x_op_rw(d->udev, 0xb6, 0xa0, i, ibuf, 2, DW210X_READ_MSG) < 0) { err("read eeprom failed."); return -EIO; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 8, 6); return 0; }; static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i, ret; u8 ibuf[] = { 0 }, obuf[] = { 0 }; u8 eeprom[256], eepromline[16]; struct i2c_msg msg[] = { { .addr = 0xa0 >> 1, .flags = 0, .buf = obuf, .len = 1, }, { .addr = 0xa0 >> 1, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 256; i++) { obuf[0] = i; ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2); if (ret != 2) { err("read eeprom failed."); return -EIO; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 16, 6); return 0; }; static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { static u8 command_start[] = {0x00}; static u8 command_stop[] = {0x01}; struct i2c_msg msg = { .addr = SU3000_STREAM_CTRL, .flags = 0, .buf = onoff ? command_start : command_stop, .len = 1 }; i2c_transfer(&adap->dev->i2c_adap, &msg, 1); return 0; } static int su3000_power_ctrl(struct dvb_usb_device *d, int i) { struct dw2102_state *state = d->priv; int ret = 0; info("%s: %d, initialized %d", __func__, i, state->initialized); if (i && !state->initialized) { mutex_lock(&d->data_mutex); state->data[0] = 0xde; state->data[1] = 0; state->initialized = 1; /* reset board */ ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0); mutex_unlock(&d->data_mutex); } return ret; } static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 obuf[] = { 0x1f, 0xf0 }; u8 ibuf[] = { 0 }; struct i2c_msg msg[] = { { .addr = 0x51, .flags = 0, .buf = obuf, .len = 2, }, { .addr = 0x51, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) return -EIO; else mac[i] = ibuf[0]; } return 0; } static int su3000_identify_state(struct usb_device *udev, const struct dvb_usb_device_properties *props, const struct dvb_usb_device_description **desc, int *cold) { *cold = 0; return 0; } static int dw210x_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { static u8 command_13v[] = {0x00, 0x01}; static u8 command_18v[] = {0x01, 0x01}; static u8 command_off[] = {0x00, 0x00}; struct i2c_msg msg = { .addr = DW2102_VOLTAGE_CTRL, .flags = 0, .buf = command_off, .len = 2, }; struct dvb_usb_adapter *udev_adap = fe->dvb->priv; if (voltage == SEC_VOLTAGE_18) msg.buf = command_18v; else if (voltage == SEC_VOLTAGE_13) msg.buf = command_13v; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); return 0; } static int s660_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { struct dvb_usb_adapter *d = fe->dvb->priv; struct dw2102_state *st = d->dev->priv; dw210x_set_voltage(fe, voltage); if (st->old_set_voltage) st->old_set_voltage(fe, voltage); return 0; } static void dw210x_led_ctrl(struct dvb_frontend *fe, int offon) { static u8 led_off[] = { 0 }; static u8 led_on[] = { 1 }; struct i2c_msg msg = { .addr = DW2102_LED_CTRL, .flags = 0, .buf = led_off, .len = 1 }; struct dvb_usb_adapter *udev_adap = fe->dvb->priv; if (offon) msg.buf = led_on; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); } static int tt_s2_4600_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct dvb_usb_adapter *d = fe->dvb->priv; struct dw2102_state *st = d->dev->priv; int ret; ret = st->fe_read_status(fe, status); /* resync slave fifo when signal change from unlock to lock */ if ((*status & FE_HAS_LOCK) && (!st->last_lock)) su3000_streaming_ctrl(d, 1); st->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0; return ret; } static struct stv0299_config sharp_z0194a_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct cx24116_config dw2104_config = { .demod_address = 0x55, .mpg_clk_pos_pol = 0x01, }; static struct si21xx_config serit_sp1511lhb_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct tda10023_config dw3101_tda10023_config = { .demod_address = 0x0c, .invert = 1, }; static struct mt312_config zl313_config = { .demod_address = 0x0e, }; static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; static struct ts2020_config dw2104_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, .frequency_div = 1060000, }; static struct ds3000_config s660_ds3000_config = { .demod_address = 0x68, .ci_mode = 1, .set_lock_led = dw210x_led_ctrl, }; static struct ts2020_config s660_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, .frequency_div = 1146000, }; static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, }; static struct stb6100_config dw2104a_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static struct stv0900_config dw2104_stv0900_config = { .demod_address = 0x68, .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1,/* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config dw2104_stv6110_config = { .i2c_address = 0x60, .mclk = 16000000, .clk_div = 1, }; static struct stv0900_config prof_7500_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, .tun1_type = 3, .set_lock_led = dw210x_led_ctrl, }; static struct ds3000_config su3000_ds3000_config = { .demod_address = 0x68, .ci_mode = 1, .set_lock_led = dw210x_led_ctrl, }; static struct cxd2820r_config cxd2820r_config = { .i2c_address = 0x6c, /* (0xd8 >> 1) */ .ts_mode = 0x38, .ts_clock_inv = 1, }; static struct tda18271_config tda18271_config = { .output_opt = TDA18271_OUTPUT_LT_OFF, .gate = TDA18271_GATE_DIGITAL, }; static u8 m88rs2000_inittab[] = { DEMOD_WRITE, 0x9a, 0x30, DEMOD_WRITE, 0x00, 0x01, WRITE_DELAY, 0x19, 0x00, DEMOD_WRITE, 0x00, 0x00, DEMOD_WRITE, 0x9a, 0xb0, DEMOD_WRITE, 0x81, 0xc1, DEMOD_WRITE, 0x81, 0x81, DEMOD_WRITE, 0x86, 0xc6, DEMOD_WRITE, 0x9a, 0x30, DEMOD_WRITE, 0xf0, 0x80, DEMOD_WRITE, 0xf1, 0xbf, DEMOD_WRITE, 0xb0, 0x45, DEMOD_WRITE, 0xb2, 0x01, DEMOD_WRITE, 0x9a, 0xb0, 0xff, 0xaa, 0xff }; static struct m88rs2000_config s421_m88rs2000_config = { .demod_addr = 0x68, .inittab = m88rs2000_inittab, }; static int dw2104_frontend_attach(struct dvb_usb_adapter *d) { struct dvb_tuner_ops *tuner_ops = NULL; if (demod_probe & 4) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe) { if (dvb_attach(stb6100_attach, d->fe_adap[0].fe, &dw2104a_stb6100_config, &d->dev->i2c_adap)) { tuner_ops = &d->fe_adap[0].fe->ops.tuner_ops; tuner_ops->set_frequency = stb6100_set_freq; tuner_ops->get_frequency = stb6100_get_freq; tuner_ops->set_bandwidth = stb6100_set_bandw; tuner_ops->get_bandwidth = stb6100_get_bandw; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100!"); return 0; } } } if (demod_probe & 2) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe) { if (dvb_attach(stv6110_attach, d->fe_adap[0].fe, &dw2104_stv6110_config, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STV6110A!"); return 0; } } } if (demod_probe & 1) { d->fe_adap[0].fe = dvb_attach(cx24116_attach, &dw2104_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached cx24116!"); return 0; } } d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, &d->dev->i2c_adap); d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached DS3000!"); return 0; } return -EIO; } static struct dvb_usb_device_properties dw2102_properties; static struct dvb_usb_device_properties dw2104_properties; static struct dvb_usb_device_properties s6x0_properties; static int dw2102_frontend_attach(struct dvb_usb_adapter *d) { if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = NULL;*/ d->fe_adap[0].fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached si21xx!"); return 0; } } if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) { d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288!"); return 0; } } } if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/ d->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194a_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0299!"); return 0; } } return -EIO; } static int dw3101_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config, &d->dev->i2c_adap, 0x48); if (d->fe_adap[0].fe) { info("Attached tda10023!"); return 0; } return -EIO; } static int zl100313_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(mt312_attach, &zl313_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { if (dvb_attach(zl10039_attach, d->fe_adap[0].fe, 0x60, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached zl100313+zl10039!"); return 0; } } return -EIO; } static int stv0288_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (!d->fe_adap[0].fe) return -EIO; if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap) == NULL) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached stv0288+stb6000!"); return 0; } static int ds3000_frontend_attach(struct dvb_usb_adapter *d) { struct dw2102_state *st = d->dev->priv; u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(ds3000_attach, &s660_ds3000_config, &d->dev->i2c_adap); if (!d->fe_adap[0].fe) return -EIO; dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config, &d->dev->i2c_adap); st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage; d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached ds3000+ts2020!"); return 0; } static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config, &d->dev->i2c_adap, 0); if (!d->fe_adap[0].fe) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached STV0900+STB6100A!"); return 0; } static int su3000_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; mutex_lock(&d->data_mutex); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x02; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); mutex_unlock(&d->data_mutex); adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, &d->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, &dw2104_ts2020_config, &d->i2c_adap)) { info("Attached DS3000/TS2020!"); return 0; } info("Failed to attach DS3000/TS2020!"); return -EIO; } static int t220_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; mutex_lock(&d->data_mutex); state->data[0] = 0xe; state->data[1] = 0x87; state->data[2] = 0x0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x86; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(50); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); mutex_unlock(&d->data_mutex); adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, &d->i2c_adap, NULL); if (adap->fe_adap[0].fe) { if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60, &d->i2c_adap, &tda18271_config)) { info("Attached TDA18271HD/CXD2820R!"); return 0; } } info("Failed to attach TDA18271HD/CXD2820R!"); return -EIO; } static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; mutex_lock(&d->data_mutex); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); mutex_unlock(&d->data_mutex); adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, &d->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, &dw2104_ts2020_config, &d->i2c_adap)) { info("Attached RS2000/TS2020!"); return 0; } info("Failed to attach RS2000/TS2020!"); return -EIO; } static int tt_s2_4600_frontend_attach_probe_demod(struct dvb_usb_device *d, const int probe_addr) { struct dw2102_state *state = d->priv; state->data[0] = 0x9; state->data[1] = 0x1; state->data[2] = 0x1; state->data[3] = probe_addr; state->data[4] = 0x0; if (dvb_usb_generic_rw(d, state->data, 5, state->data, 2, 0) < 0) { err("i2c probe for address 0x%x failed.", probe_addr); return 0; } if (state->data[0] != 8) /* fail(7) or error, no device at address */ return 0; /* probing successful */ return 1; } static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; struct i2c_adapter *i2c_adapter; struct i2c_client *client; struct i2c_board_info board_info; struct m88ds3103_platform_data m88ds3103_pdata = {}; struct ts2020_config ts2020_config = {}; int demod_addr; mutex_lock(&d->data_mutex); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 0x0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x02; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); /* probe for demodulator i2c address */ demod_addr = -1; if (tt_s2_4600_frontend_attach_probe_demod(d, 0x68)) demod_addr = 0x68; else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x69)) demod_addr = 0x69; else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x6a)) demod_addr = 0x6a; mutex_unlock(&d->data_mutex); if (demod_addr < 0) { err("probing for demodulator failed. Is the external power switched on?"); return -ENODEV; } /* attach demod */ m88ds3103_pdata.clk = 27000000; m88ds3103_pdata.i2c_wr_max = 33; m88ds3103_pdata.ts_mode = M88DS3103_TS_CI; m88ds3103_pdata.ts_clk = 16000; m88ds3103_pdata.ts_clk_pol = 0; m88ds3103_pdata.spec_inv = 0; m88ds3103_pdata.agc = 0x99; m88ds3103_pdata.agc_inv = 0; m88ds3103_pdata.clk_out = M88DS3103_CLOCK_OUT_ENABLED; m88ds3103_pdata.envelope_mode = 0; m88ds3103_pdata.lnb_hv_pol = 1; m88ds3103_pdata.lnb_en_pol = 0; memset(&board_info, 0, sizeof(board_info)); if (demod_addr == 0x6a) strscpy(board_info.type, "m88ds3103b", I2C_NAME_SIZE); else strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE); board_info.addr = demod_addr; board_info.platform_data = &m88ds3103_pdata; request_module("m88ds3103"); client = i2c_new_client_device(&d->i2c_adap, &board_info); if (!i2c_client_has_driver(client)) return -ENODEV; if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); return -ENODEV; } adap->fe_adap[0].fe = m88ds3103_pdata.get_dvb_frontend(client); i2c_adapter = m88ds3103_pdata.get_i2c_adapter(client); state->i2c_client_demod = client; /* attach tuner */ ts2020_config.fe = adap->fe_adap[0].fe; memset(&board_info, 0, sizeof(board_info)); strscpy(board_info.type, "ts2022", I2C_NAME_SIZE); board_info.addr = 0x60; board_info.platform_data = &ts2020_config; request_module("ts2020"); client = i2c_new_client_device(i2c_adapter, &board_info); if (!i2c_client_has_driver(client)) { dvb_frontend_detach(adap->fe_adap[0].fe); return -ENODEV; } if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); dvb_frontend_detach(adap->fe_adap[0].fe); return -ENODEV; } /* delegate signal strength measurement to tuner */ adap->fe_adap[0].fe->ops.read_signal_strength = adap->fe_adap[0].fe->ops.tuner_ops.get_rf_strength; state->i2c_client_tuner = client; /* hook fe: need to resync the slave fifo when signal locks */ state->fe_read_status = adap->fe_adap[0].fe->ops.read_status; adap->fe_adap[0].fe->ops.read_status = tt_s2_4600_read_status; state->last_lock = 0; return 0; } static int dw2102_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_OPERA1); return 0; } static int dw3101_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_TUA6034); return 0; } static int dw2102_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0], 0); } } return 0; } static int prof_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0] ^ 0xff, 0); } } return 0; } static int su3000_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_PROTO_RC5, RC_SCANCODE_RC5(key[1], key[0]), 0); } } return 0; } enum dw2102_table_entry { CYPRESS_DW2102, CYPRESS_DW2101, CYPRESS_DW2104, TEVII_S650, TERRATEC_CINERGY_S, CYPRESS_DW3101, TEVII_S630, PROF_1100, TEVII_S660, PROF_7500, GENIATECH_SU3000, HAUPPAUGE_MAX_S2, TERRATEC_CINERGY_S2_R1, TEVII_S480_1, TEVII_S480_2, GENIATECH_X3M_SPC1400HD, TEVII_S421, TEVII_S632, TERRATEC_CINERGY_S2_R2, TERRATEC_CINERGY_S2_R3, TERRATEC_CINERGY_S2_R4, TERRATEC_CINERGY_S2_1, TERRATEC_CINERGY_S2_2, GOTVIEW_SAT_HD, GENIATECH_T220, TECHNOTREND_CONNECT_S2_4600, TEVII_S482_1, TEVII_S482_2, TEVII_S662 }; static const struct usb_device_id dw2102_table[] = { DVB_USB_DEV(CYPRESS, CYPRESS_DW2102), DVB_USB_DEV(CYPRESS, CYPRESS_DW2101), DVB_USB_DEV(CYPRESS, CYPRESS_DW2104), DVB_USB_DEV(TEVII, TEVII_S650), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S), DVB_USB_DEV(CYPRESS, CYPRESS_DW3101), DVB_USB_DEV(TEVII, TEVII_S630), DVB_USB_DEV(PROF_1, PROF_1100), DVB_USB_DEV(TEVII, TEVII_S660), DVB_USB_DEV(PROF_2, PROF_7500), DVB_USB_DEV(GTEK, GENIATECH_SU3000), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_MAX_S2), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R1), DVB_USB_DEV(TEVII, TEVII_S480_1), DVB_USB_DEV(TEVII, TEVII_S480_2), DVB_USB_DEV(GTEK, GENIATECH_X3M_SPC1400HD), DVB_USB_DEV(TEVII, TEVII_S421), DVB_USB_DEV(TEVII, TEVII_S632), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R2), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R3), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R4), DVB_USB_DEV(TERRATEC_2, TERRATEC_CINERGY_S2_1), DVB_USB_DEV(TERRATEC_2, TERRATEC_CINERGY_S2_2), DVB_USB_DEV(GOTVIEW, GOTVIEW_SAT_HD), DVB_USB_DEV(GTEK, GENIATECH_T220), DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2_4600), DVB_USB_DEV(TEVII, TEVII_S482_1), DVB_USB_DEV(TEVII, TEVII_S482_2), DVB_USB_DEV(TEVII, TEVII_S662), { } }; MODULE_DEVICE_TABLE(usb, dw2102_table); static int dw2102_load_firmware(struct usb_device *dev, const struct firmware *frmwr) { u8 *b, *p; int ret = 0, i; u8 reset; u8 reset16[] = {0, 0, 0, 0, 0, 0, 0}; const struct firmware *fw; switch (le16_to_cpu(dev->descriptor.idProduct)) { case 0x2101: ret = request_firmware(&fw, DW2101_FIRMWARE, &dev->dev); if (ret != 0) { err(err_str, DW2101_FIRMWARE); return ret; } break; default: fw = frmwr; break; } info("start downloading DW210X firmware"); p = kmalloc(fw->size, GFP_KERNEL); reset = 1; /*stop the CPU*/ dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG); if (p) { memcpy(p, fw->data, fw->size); for (i = 0; i < fw->size; i += 0x40) { b = (u8 *)p + i; if (dw210x_op_rw(dev, 0xa0, i, 0, b, 0x40, DW210X_WRITE_MSG) != 0x40) { err("error while transferring firmware"); ret = -EINVAL; break; } } /* restart the CPU */ reset = 0; if (ret || dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } if (ret || dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } /* init registers */ switch (le16_to_cpu(dev->descriptor.idProduct)) { case USB_PID_TEVII_S650: dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC; fallthrough; case USB_PID_CYPRESS_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); fallthrough; case USB_PID_CYPRESS_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); break; case USB_PID_TERRATEC_CINERGY_S: case USB_PID_CYPRESS_DW2102: dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); /* check STV0299 frontend */ dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2, DW210X_READ_MSG); if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) { dw2102_properties.i2c_algo = &dw2102_i2c_algo; dw2102_properties.adapter->fe[0].tuner_attach = &dw2102_tuner_attach; break; } /* check STV0288 frontend */ reset16[0] = 0xd0; reset16[1] = 1; reset16[2] = 0; dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3, DW210X_READ_MSG); if (reset16[2] == 0x11) { dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo; break; } fallthrough; case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); break; } msleep(100); kfree(p); } if (le16_to_cpu(dev->descriptor.idProduct) == 0x2101) release_firmware(fw); return ret; } static struct dvb_usb_device_properties dw2102_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW2102_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw2102_serit_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2102_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 3, .devices = { {"DVBWorld DVB-S 2102 USB2.0", {&dw2102_table[CYPRESS_DW2102], NULL}, {NULL}, }, {"DVBWorld DVB-S 2101 USB2.0", {&dw2102_table[CYPRESS_DW2101], NULL}, {NULL}, }, {"TerraTec Cinergy S USB", {&dw2102_table[TERRATEC_CINERGY_S], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw2104_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW2104_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw2104_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2104_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 2, .devices = { { "DVBWorld DW2104 USB2.0", {&dw2102_table[CYPRESS_DW2104], NULL}, {NULL}, }, { "TeVii S650 USB2.0", {&dw2102_table[TEVII_S650], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw3101_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW3101_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw3101_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw3101_frontend_attach, .tuner_attach = dw3101_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { { "DVBWorld DVB-C 3101 USB2.0", {&dw2102_table[CYPRESS_DW3101], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s6x0_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = S630_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TEVII_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = zl100313_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { {"TeVii S630 USB", {&dw2102_table[TEVII_S630], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties p1100_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = P1100_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TBS_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = prof_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = stv0288_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, } }, .num_device_descs = 1, .devices = { {"Prof 1100 USB ", {&dw2102_table[PROF_1100], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s660_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = S660_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TEVII_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = ds3000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, } }, .num_device_descs = 3, .devices = { {"TeVii S660 USB", {&dw2102_table[TEVII_S660], NULL}, {NULL}, }, {"TeVii S480.1 USB", {&dw2102_table[TEVII_S480_1], NULL}, {NULL}, }, {"TeVii S480.2 USB", {&dw2102_table[TEVII_S480_2], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties p7500_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = P7500_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TBS_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = prof_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = prof_7500_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, } }, .num_device_descs = 1, .devices = { {"Prof 7500 USB DVB-S2", {&dw2102_table[PROF_7500], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties su3000_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = su3000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } }}, } }, .num_device_descs = 9, .devices = { { "SU3000HD DVB-S USB2.0", { &dw2102_table[GENIATECH_SU3000], NULL }, { NULL }, }, { "Hauppauge MAX S2 or WinTV NOVA HD USB2.0", { &dw2102_table[HAUPPAUGE_MAX_S2], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD", { &dw2102_table[TERRATEC_CINERGY_S2_R1], NULL }, { NULL }, }, { "X3M TV SPC1400HD PCI", { &dw2102_table[GENIATECH_X3M_SPC1400HD], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD Rev.2", { &dw2102_table[TERRATEC_CINERGY_S2_R2], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD Rev.3", { &dw2102_table[TERRATEC_CINERGY_S2_R3], NULL }, { NULL }, }, { "Terratec Cinergy S2 PCIe Dual Port 1", { &dw2102_table[TERRATEC_CINERGY_S2_1], NULL }, { NULL }, }, { "Terratec Cinergy S2 PCIe Dual Port 2", { &dw2102_table[TERRATEC_CINERGY_S2_2], NULL }, { NULL }, }, { "GOTVIEW Satellite HD", { &dw2102_table[GOTVIEW_SAT_HD], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties s421_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = m88rs2000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 2, .devices = { { "TeVii S421 PCI", { &dw2102_table[TEVII_S421], NULL }, { NULL }, }, { "TeVii S632 USB", { &dw2102_table[TEVII_S632], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties t220_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = { { .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = t220_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 1, .devices = { { "Geniatech T220 DVB-T/T2 USB2.0", { &dw2102_table[GENIATECH_T220], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties tt_s2_4600_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 250, .rc_codes = RC_MAP_TT_1500, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = tt_s2_4600_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 5, .devices = { { "TechnoTrend TT-connect S2-4600", { &dw2102_table[TECHNOTREND_CONNECT_S2_4600], NULL }, { NULL }, }, { "TeVii S482 (tuner 1)", { &dw2102_table[TEVII_S482_1], NULL }, { NULL }, }, { "TeVii S482 (tuner 2)", { &dw2102_table[TEVII_S482_2], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB BOX", { &dw2102_table[TERRATEC_CINERGY_S2_R4], NULL }, { NULL }, }, { "TeVii S662", { &dw2102_table[TEVII_S662], NULL }, { NULL }, }, } }; static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (!(dvb_usb_device_init(intf, &dw2102_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &dw2104_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &dw3101_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &s6x0_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &p1100_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &s660_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &p7500_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &s421_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &su3000_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &t220_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &tt_s2_4600_properties, THIS_MODULE, NULL, adapter_nr))) { return 0; } return -ENODEV; } static void dw2102_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); struct dw2102_state *st = d->priv; struct i2c_client *client; /* remove I2C client for tuner */ client = st->i2c_client_tuner; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } /* remove I2C client for demodulator */ client = st->i2c_client_demod; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } dvb_usb_device_exit(intf); } static struct usb_driver dw2102_driver = { .name = "dw2102", .probe = dw2102_probe, .disconnect = dw2102_disconnect, .id_table = dw2102_table, }; module_usb_driver(dw2102_driver); MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101 USB2.0, TeVii S421, S480, S482, S600, S630, S632, S650, TeVii S660, S662, Prof 1100, 7500 USB2.0, Geniatech SU3000, T220, TechnoTrend S2-4600, Terratec Cinergy S2 devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(DW2101_FIRMWARE); MODULE_FIRMWARE(DW2102_FIRMWARE); MODULE_FIRMWARE(DW2104_FIRMWARE); MODULE_FIRMWARE(DW3101_FIRMWARE); MODULE_FIRMWARE(S630_FIRMWARE); MODULE_FIRMWARE(S660_FIRMWARE); MODULE_FIRMWARE(P1100_FIRMWARE); MODULE_FIRMWARE(P7500_FIRMWARE); |
682 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Device core Trace Support * Copyright (C) 2021, Intel Corporation * * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> */ #undef TRACE_SYSTEM #define TRACE_SYSTEM dev #if !defined(__DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define __DEV_TRACE_H #include <linux/device.h> #include <linux/tracepoint.h> #include <linux/types.h> DECLARE_EVENT_CLASS(devres, TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size), TP_ARGS(dev, op, node, name, size), TP_STRUCT__entry( __string(devname, dev_name(dev)) __field(struct device *, dev) __field(const char *, op) __field(void *, node) __string(name, name) __field(size_t, size) ), TP_fast_assign( __assign_str(devname); __entry->op = op; __entry->node = node; __assign_str(name); __entry->size = size; ), TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname), __entry->op, __entry->node, __get_str(name), __entry->size) ); DEFINE_EVENT(devres, devres_log, TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size), TP_ARGS(dev, op, node, name, size) ); #endif /* __DEV_TRACE_H */ /* this part has to be here */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h> |
12 12 4 4 235 224 12 4 212 214 6 209 184 179 4 4 3 10 198 191 7 182 3 12 1 5 2 7 217 10 6 2 2 10 5 222 4 180 14 2 2 222 8 4 4 216 5 221 222 222 221 18 18 198 221 32 210 210 210 210 210 208 189 189 30 30 66 210 33 33 7 5 4 8 12 1 6 8 8 10 5 13 1 14 18 18 13 33 32 33 14 9 14 13 3 3 3 14 12 7 12 11 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 | // SPDX-License-Identifier: GPL-2.0-only /* * (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2011 Patrick McHardy <kaber@trash.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <net/xfrm.h> #include <linux/siphash.h> #include <linux/rtnetlink.h> #include <net/netfilter/nf_conntrack_bpf.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_seqadj.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <uapi/linux/netfilter/nf_nat.h> #include "nf_internals.h" #define NF_NAT_MAX_ATTEMPTS 128 #define NF_NAT_HARDER_THRESH (NF_NAT_MAX_ATTEMPTS / 4) static spinlock_t nf_nat_locks[CONNTRACK_LOCKS]; static DEFINE_MUTEX(nf_nat_proto_mutex); static unsigned int nat_net_id __read_mostly; static struct hlist_head *nf_nat_bysource __read_mostly; static unsigned int nf_nat_htable_size __read_mostly; static siphash_aligned_key_t nf_nat_hash_rnd; struct nf_nat_lookup_hook_priv { struct nf_hook_entries __rcu *entries; struct rcu_head rcu_head; }; struct nf_nat_hooks_net { struct nf_hook_ops *nat_hook_ops; unsigned int users; }; struct nat_net { struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO]; }; #ifdef CONFIG_XFRM static void nf_nat_ipv4_decode_session(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir, unsigned long statusbit, struct flowi *fl) { const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; struct flowi4 *fl4 = &fl->u.ip4; if (ct->status & statusbit) { fl4->daddr = t->dst.u3.ip; if (t->dst.protonum == IPPROTO_TCP || t->dst.protonum == IPPROTO_UDP || t->dst.protonum == IPPROTO_UDPLITE || t->dst.protonum == IPPROTO_DCCP || t->dst.protonum == IPPROTO_SCTP) fl4->fl4_dport = t->dst.u.all; } statusbit ^= IPS_NAT_MASK; if (ct->status & statusbit) { fl4->saddr = t->src.u3.ip; if (t->dst.protonum == IPPROTO_TCP || t->dst.protonum == IPPROTO_UDP || t->dst.protonum == IPPROTO_UDPLITE || t->dst.protonum == IPPROTO_DCCP || t->dst.protonum == IPPROTO_SCTP) fl4->fl4_sport = t->src.u.all; } } static void nf_nat_ipv6_decode_session(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir, unsigned long statusbit, struct flowi *fl) { #if IS_ENABLED(CONFIG_IPV6) const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; struct flowi6 *fl6 = &fl->u.ip6; if (ct->status & statusbit) { fl6->daddr = t->dst.u3.in6; if (t->dst.protonum == IPPROTO_TCP || t->dst.protonum == IPPROTO_UDP || t->dst.protonum == IPPROTO_UDPLITE || t->dst.protonum == IPPROTO_DCCP || t->dst.protonum == IPPROTO_SCTP) fl6->fl6_dport = t->dst.u.all; } statusbit ^= IPS_NAT_MASK; if (ct->status & statusbit) { fl6->saddr = t->src.u3.in6; if (t->dst.protonum == IPPROTO_TCP || t->dst.protonum == IPPROTO_UDP || t->dst.protonum == IPPROTO_UDPLITE || t->dst.protonum == IPPROTO_DCCP || t->dst.protonum == IPPROTO_SCTP) fl6->fl6_sport = t->src.u.all; } #endif } static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) { const struct nf_conn *ct; enum ip_conntrack_info ctinfo; enum ip_conntrack_dir dir; unsigned long statusbit; u8 family; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return; family = nf_ct_l3num(ct); dir = CTINFO2DIR(ctinfo); if (dir == IP_CT_DIR_ORIGINAL) statusbit = IPS_DST_NAT; else statusbit = IPS_SRC_NAT; switch (family) { case NFPROTO_IPV4: nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl); return; case NFPROTO_IPV6: nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl); return; } } #endif /* CONFIG_XFRM */ /* We keep an extra hash for each conntrack, for fast searching. */ static unsigned int hash_by_src(const struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple) { unsigned int hash; struct { struct nf_conntrack_man src; u32 net_mix; u32 protonum; u32 zone; } __aligned(SIPHASH_ALIGNMENT) combined; get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd)); memset(&combined, 0, sizeof(combined)); /* Original src, to ensure we map it consistently if poss. */ combined.src = tuple->src; combined.net_mix = net_hash_mix(net); combined.protonum = tuple->dst.protonum; /* Zone ID can be used provided its valid for both directions */ if (zone->dir == NF_CT_DEFAULT_ZONE_DIR) combined.zone = zone->id; hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd); return reciprocal_scale(hash, nf_nat_htable_size); } /** * nf_nat_used_tuple - check if proposed nat tuple clashes with existing entry * @tuple: proposed NAT binding * @ignored_conntrack: our (unconfirmed) conntrack entry * * A conntrack entry can be inserted to the connection tracking table * if there is no existing entry with an identical tuple in either direction. * * Example: * INITIATOR -> NAT/PAT -> RESPONDER * * INITIATOR passes through NAT/PAT ("us") and SNAT is done (saddr rewrite). * Then, later, NAT/PAT itself also connects to RESPONDER. * * This will not work if the SNAT done earlier has same IP:PORT source pair. * * Conntrack table has: * ORIGINAL: $IP_INITIATOR:$SPORT -> $IP_RESPONDER:$DPORT * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT * * and new locally originating connection wants: * ORIGINAL: $IP_NAT:$SPORT -> $IP_RESPONDER:$DPORT * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT * * ... which would mean incoming packets cannot be distinguished between * the existing and the newly added entry (identical IP_CT_DIR_REPLY tuple). * * @return: true if the proposed NAT mapping collides with an existing entry. */ static int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack) { /* Conntrack tracking doesn't keep track of outgoing tuples; only * incoming ones. NAT means they don't have a fixed mapping, * so we invert the tuple and look for the incoming reply. * * We could keep a separate hash if this proves too slow. */ struct nf_conntrack_tuple reply; nf_ct_invert_tuple(&reply, tuple); return nf_conntrack_tuple_taken(&reply, ignored_conntrack); } static bool nf_nat_allow_clash(const struct nf_conn *ct) { return nf_ct_l4proto_find(nf_ct_protonum(ct))->allow_clash; } /** * nf_nat_used_tuple_new - check if to-be-inserted conntrack collides with existing entry * @tuple: proposed NAT binding * @ignored_ct: our (unconfirmed) conntrack entry * * Same as nf_nat_used_tuple, but also check for rare clash in reverse * direction. Should be called only when @tuple has not been altered, i.e. * @ignored_conntrack will not be subject to NAT. * * @return: true if the proposed NAT mapping collides with existing entry. */ static noinline bool nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_ct) { static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT; const struct nf_conntrack_tuple_hash *thash; const struct nf_conntrack_zone *zone; struct nf_conn *ct; bool taken = true; struct net *net; if (!nf_nat_used_tuple(tuple, ignored_ct)) return false; if (!nf_nat_allow_clash(ignored_ct)) return true; /* Initial choice clashes with existing conntrack. * Check for (rare) reverse collision. * * This can happen when new packets are received in both directions * at the exact same time on different CPUs. * * Without SMP, first packet creates new conntrack entry and second * packet is resolved as established reply packet. * * With parallel processing, both packets could be picked up as * new and both get their own ct entry allocated. * * If ignored_conntrack and colliding ct are not subject to NAT then * pretend the tuple is available and let later clash resolution * handle this at insertion time. * * Without it, the 'reply' packet has its source port rewritten * by nat engine. */ if (READ_ONCE(ignored_ct->status) & uses_nat) return true; net = nf_ct_net(ignored_ct); zone = nf_ct_zone(ignored_ct); thash = nf_conntrack_find_get(net, zone, tuple); if (unlikely(!thash)) /* clashing entry went away */ return false; ct = nf_ct_tuplehash_to_ctrack(thash); /* NB: IP_CT_DIR_ORIGINAL should be impossible because * nf_nat_used_tuple() handles origin collisions. * * Handle remote chance other CPU confirmed its ct right after. */ if (thash->tuple.dst.dir != IP_CT_DIR_REPLY) goto out; /* clashing connection subject to NAT? Retry with new tuple. */ if (READ_ONCE(ct->status) & uses_nat) goto out; if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) && nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) { taken = false; goto out; } out: nf_ct_put(ct); return taken; } static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags) { static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT | IPS_DYING; static const unsigned long flags_needed = IPS_SRC_NAT; enum tcp_conntrack old_state; old_state = READ_ONCE(ct->proto.tcp.state); if (old_state < TCP_CONNTRACK_TIME_WAIT) return false; if (flags & flags_refuse) return false; return (flags & flags_needed) == flags_needed; } /* reverse direction will send packets to new source, so * make sure such packets are invalid. */ static bool nf_seq_has_advanced(const struct nf_conn *old, const struct nf_conn *new) { return (__s32)(new->proto.tcp.seen[0].td_end - old->proto.tcp.seen[0].td_end) > 0; } static int nf_nat_used_tuple_harder(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack, unsigned int attempts_left) { static const unsigned long flags_offload = IPS_OFFLOAD | IPS_HW_OFFLOAD; struct nf_conntrack_tuple_hash *thash; const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple reply; unsigned long flags; struct nf_conn *ct; bool taken = true; struct net *net; nf_ct_invert_tuple(&reply, tuple); if (attempts_left > NF_NAT_HARDER_THRESH || tuple->dst.protonum != IPPROTO_TCP || ignored_conntrack->proto.tcp.state != TCP_CONNTRACK_SYN_SENT) return nf_conntrack_tuple_taken(&reply, ignored_conntrack); /* :ast few attempts to find a free tcp port. Destructive * action: evict colliding if its in timewait state and the * tcp sequence number has advanced past the one used by the * old entry. */ net = nf_ct_net(ignored_conntrack); zone = nf_ct_zone(ignored_conntrack); thash = nf_conntrack_find_get(net, zone, &reply); if (!thash) return false; ct = nf_ct_tuplehash_to_ctrack(thash); if (thash->tuple.dst.dir == IP_CT_DIR_ORIGINAL) goto out; if (WARN_ON_ONCE(ct == ignored_conntrack)) goto out; flags = READ_ONCE(ct->status); if (!nf_nat_may_kill(ct, flags)) goto out; if (!nf_seq_has_advanced(ct, ignored_conntrack)) goto out; /* Even if we can evict do not reuse if entry is offloaded. */ if (nf_ct_kill(ct)) taken = flags & flags_offload; out: nf_ct_put(ct); return taken; } static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t, const struct nf_nat_range2 *range) { if (t->src.l3num == NFPROTO_IPV4) return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 && ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0; } /* Is the manipable part of the tuple between min and max incl? */ static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype, const union nf_conntrack_man_proto *min, const union nf_conntrack_man_proto *max) { __be16 port; switch (tuple->dst.protonum) { case IPPROTO_ICMP: case IPPROTO_ICMPV6: return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) && ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); case IPPROTO_GRE: /* all fall though */ case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_DCCP: case IPPROTO_SCTP: if (maniptype == NF_NAT_MANIP_SRC) port = tuple->src.u.all; else port = tuple->dst.u.all; return ntohs(port) >= ntohs(min->all) && ntohs(port) <= ntohs(max->all); default: return true; } } /* If we source map this tuple so reply looks like reply_tuple, will * that meet the constraints of range. */ static int nf_in_range(const struct nf_conntrack_tuple *tuple, const struct nf_nat_range2 *range) { /* If we are supposed to map IPs, then we must be in the * range specified, otherwise let this drag us onto a new src IP. */ if (range->flags & NF_NAT_RANGE_MAP_IPS && !nf_nat_inet_in_range(tuple, range)) return 0; if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) return 1; return l4proto_in_range(tuple, NF_NAT_MANIP_SRC, &range->min_proto, &range->max_proto); } static inline int same_src(const struct nf_conn *ct, const struct nf_conntrack_tuple *tuple) { const struct nf_conntrack_tuple *t; t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; return (t->dst.protonum == tuple->dst.protonum && nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && t->src.u.all == tuple->src.u.all); } /* Only called for SRC manip */ static int find_appropriate_src(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *result, const struct nf_nat_range2 *range) { unsigned int h = hash_by_src(net, zone, tuple); const struct nf_conn *ct; hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { if (same_src(ct, tuple) && net_eq(net, nf_ct_net(ct)) && nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { /* Copy source part from reply tuple. */ nf_ct_invert_tuple(result, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); result->dst = tuple->dst; if (nf_in_range(result, range)) return 1; } } return 0; } /* For [FUTURE] fragmentation handling, we want the least-used * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports * 1-65535, we don't do pro-rata allocation based on ports; we choose * the ip with the lowest src-ip/dst-ip/proto usage. */ static void find_best_ips_proto(const struct nf_conntrack_zone *zone, struct nf_conntrack_tuple *tuple, const struct nf_nat_range2 *range, const struct nf_conn *ct, enum nf_nat_manip_type maniptype) { union nf_inet_addr *var_ipp; unsigned int i, max; /* Host order */ u32 minip, maxip, j, dist; bool full_range; /* No IP mapping? Do nothing. */ if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) return; if (maniptype == NF_NAT_MANIP_SRC) var_ipp = &tuple->src.u3; else var_ipp = &tuple->dst.u3; /* Fast path: only one choice. */ if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { *var_ipp = range->min_addr; return; } if (nf_ct_l3num(ct) == NFPROTO_IPV4) max = sizeof(var_ipp->ip) / sizeof(u32) - 1; else max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; /* Hashing source and destination IPs gives a fairly even * spread in practice (if there are a small number of IPs * involved, there usually aren't that many connections * anyway). The consistency means that servers see the same * client coming from the same IP (some Internet Banking sites * like this), even across reboots. */ j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), range->flags & NF_NAT_RANGE_PERSISTENT ? 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id); full_range = false; for (i = 0; i <= max; i++) { /* If first bytes of the address are at the maximum, use the * distance. Otherwise use the full range. */ if (!full_range) { minip = ntohl((__force __be32)range->min_addr.all[i]); maxip = ntohl((__force __be32)range->max_addr.all[i]); dist = maxip - minip + 1; } else { minip = 0; dist = ~0; } var_ipp->all[i] = (__force __u32) htonl(minip + reciprocal_scale(j, dist)); if (var_ipp->all[i] != range->max_addr.all[i]) full_range = true; if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) j ^= (__force u32)tuple->dst.u3.all[i]; } } /* Alter the per-proto part of the tuple (depending on maniptype), to * give a unique tuple in the given range if possible. * * Per-protocol part of tuple is initialized to the incoming packet. */ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range2 *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { unsigned int range_size, min, max, i, attempts; __be16 *keyptr; u16 off; switch (tuple->dst.protonum) { case IPPROTO_ICMP: case IPPROTO_ICMPV6: /* id is same for either direction... */ keyptr = &tuple->src.u.icmp.id; if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { min = 0; range_size = 65536; } else { min = ntohs(range->min_proto.icmp.id); range_size = ntohs(range->max_proto.icmp.id) - ntohs(range->min_proto.icmp.id) + 1; } goto find_free_id; #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE) case IPPROTO_GRE: /* If there is no master conntrack we are not PPTP, do not change tuples */ if (!ct->master) return; if (maniptype == NF_NAT_MANIP_SRC) keyptr = &tuple->src.u.gre.key; else keyptr = &tuple->dst.u.gre.key; if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { min = 1; range_size = 65535; } else { min = ntohs(range->min_proto.gre.key); range_size = ntohs(range->max_proto.gre.key) - min + 1; } goto find_free_id; #endif case IPPROTO_UDP: case IPPROTO_UDPLITE: case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_DCCP: if (maniptype == NF_NAT_MANIP_SRC) keyptr = &tuple->src.u.all; else keyptr = &tuple->dst.u.all; break; default: return; } /* If no range specified... */ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { /* If it's dst rewrite, can't change port */ if (maniptype == NF_NAT_MANIP_DST) return; if (ntohs(*keyptr) < 1024) { /* Loose convention: >> 512 is credential passing */ if (ntohs(*keyptr) < 512) { min = 1; range_size = 511 - min + 1; } else { min = 600; range_size = 1023 - min + 1; } } else { min = 1024; range_size = 65535 - 1024 + 1; } } else { min = ntohs(range->min_proto.all); max = ntohs(range->max_proto.all); if (unlikely(max < min)) swap(max, min); range_size = max - min + 1; } find_free_id: if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) off = (ntohs(*keyptr) - ntohs(range->base_proto.all)); else if ((range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL) || maniptype != NF_NAT_MANIP_DST) off = get_random_u16(); else off = 0; attempts = range_size; if (attempts > NF_NAT_MAX_ATTEMPTS) attempts = NF_NAT_MAX_ATTEMPTS; /* We are in softirq; doing a search of the entire range risks * soft lockup when all tuples are already used. * * If we can't find any free port from first offset, pick a new * one and try again, with ever smaller search window. */ another_round: for (i = 0; i < attempts; i++, off++) { *keyptr = htons(min + off % range_size); if (!nf_nat_used_tuple_harder(tuple, ct, attempts - i)) return; } if (attempts >= range_size || attempts < 16) return; attempts /= 2; off = get_random_u16(); goto another_round; } /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, * we change the source to map into the range. For NF_INET_PRE_ROUTING * and NF_INET_LOCAL_OUT, we change the destination to map into the * range. It might not be possible to get a unique tuple, but we try. * At worst (or if we race), we will end up with a final duplicate in * __nf_conntrack_confirm and drop the packet. */ static void get_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig_tuple, const struct nf_nat_range2 *range, struct nf_conn *ct, enum nf_nat_manip_type maniptype) { const struct nf_conntrack_zone *zone; struct net *net = nf_ct_net(ct); zone = nf_ct_zone(ct); /* 1) If this srcip/proto/src-proto-part is currently mapped, * and that same mapping gives a unique tuple within the given * range, use that. * * This is only required for source (ie. NAT/masq) mappings. * So far, we don't do local source mappings, so multiple * manips not an issue. */ if (maniptype == NF_NAT_MANIP_SRC && !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { /* try the original tuple first */ if (nf_in_range(orig_tuple, range)) { if (!nf_nat_used_tuple_new(orig_tuple, ct)) { *tuple = *orig_tuple; return; } } else if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) { pr_debug("get_unique_tuple: Found current src map\n"); if (!nf_nat_used_tuple(tuple, ct)) return; } } /* 2) Select the least-used IP/proto combination in the given range */ *tuple = *orig_tuple; find_best_ips_proto(zone, tuple, range, ct, maniptype); /* 3) The per-protocol part of the manip is made to map into * the range to make a unique tuple. */ /* Only bother mapping if it's not already in range and unique */ if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) && l4proto_in_range(tuple, maniptype, &range->min_proto, &range->max_proto) && (range->min_proto.all == range->max_proto.all || !nf_nat_used_tuple(tuple, ct))) return; } else if (!nf_nat_used_tuple(tuple, ct)) { return; } } /* Last chance: get protocol to try to obtain unique tuple. */ nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct); } struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) { struct nf_conn_nat *nat = nfct_nat(ct); if (nat) return nat; if (!nf_ct_is_confirmed(ct)) nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); return nat; } EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); unsigned int nf_nat_setup_info(struct nf_conn *ct, const struct nf_nat_range2 *range, enum nf_nat_manip_type maniptype) { struct net *net = nf_ct_net(ct); struct nf_conntrack_tuple curr_tuple, new_tuple; /* Can't setup nat info for confirmed ct. */ if (nf_ct_is_confirmed(ct)) return NF_ACCEPT; WARN_ON(maniptype != NF_NAT_MANIP_SRC && maniptype != NF_NAT_MANIP_DST); if (WARN_ON(nf_nat_initialized(ct, maniptype))) return NF_DROP; /* What we've got will look like inverse of reply. Normally * this is what is in the conntrack, except for prior * manipulations (future optimization: if num_manips == 0, * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */ nf_ct_invert_tuple(&curr_tuple, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { struct nf_conntrack_tuple reply; /* Alter conntrack table so will recognize replies. */ nf_ct_invert_tuple(&reply, &new_tuple); nf_conntrack_alter_reply(ct, &reply); /* Non-atomic: we own this at the moment. */ if (maniptype == NF_NAT_MANIP_SRC) ct->status |= IPS_SRC_NAT; else ct->status |= IPS_DST_NAT; if (nfct_help(ct) && !nfct_seqadj(ct)) if (!nfct_seqadj_ext_add(ct)) return NF_DROP; } if (maniptype == NF_NAT_MANIP_SRC) { unsigned int srchash; spinlock_t *lock; srchash = hash_by_src(net, nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS]; spin_lock_bh(lock); hlist_add_head_rcu(&ct->nat_bysource, &nf_nat_bysource[srchash]); spin_unlock_bh(lock); } /* It's done. */ if (maniptype == NF_NAT_MANIP_DST) ct->status |= IPS_DST_NAT_DONE; else ct->status |= IPS_SRC_NAT_DONE; return NF_ACCEPT; } EXPORT_SYMBOL(nf_nat_setup_info); static unsigned int __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) { /* Force range to this IP; let proto decide mapping for * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). * Use reply in case it's already been mangled (eg local packet). */ union nf_inet_addr ip = (manip == NF_NAT_MANIP_SRC ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); struct nf_nat_range2 range = { .flags = NF_NAT_RANGE_MAP_IPS, .min_addr = ip, .max_addr = ip, }; return nf_nat_setup_info(ct, &range, manip); } unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) { return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); } EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); /* Do packet manipulations according to nf_nat_setup_info. */ unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum, struct sk_buff *skb) { enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned int verdict = NF_ACCEPT; unsigned long statusbit; if (mtype == NF_NAT_MANIP_SRC) statusbit = IPS_SRC_NAT; else statusbit = IPS_DST_NAT; /* Invert if this is reply dir. */ if (dir == IP_CT_DIR_REPLY) statusbit ^= IPS_NAT_MASK; /* Non-atomic: these bits don't change. */ if (ct->status & statusbit) verdict = nf_nat_manip_pkt(skb, ct, mtype, dir); return verdict; } EXPORT_SYMBOL_GPL(nf_nat_packet); static bool in_vrf_postrouting(const struct nf_hook_state *state) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (state->hook == NF_INET_POST_ROUTING && netif_is_l3_master(state->out)) return true; #endif return false; } unsigned int nf_nat_inet_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; struct nf_conn_nat *nat; /* maniptype == SRC for postrouting. */ enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); ct = nf_ct_get(skb, &ctinfo); /* Can't track? It's not due to stress, or conntrack would * have dropped it. Hence it's the user's responsibilty to * packet filter it out, or implement conntrack/NAT for that * protocol. 8) --RR */ if (!ct || in_vrf_postrouting(state)) return NF_ACCEPT; nat = nfct_nat(ct); switch (ctinfo) { case IP_CT_RELATED: case IP_CT_RELATED_REPLY: /* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */ case IP_CT_NEW: /* Seen it before? This can happen for loopback, retrans, * or local packets. */ if (!nf_nat_initialized(ct, maniptype)) { struct nf_nat_lookup_hook_priv *lpriv = priv; struct nf_hook_entries *e = rcu_dereference(lpriv->entries); unsigned int ret; int i; if (!e) goto null_bind; for (i = 0; i < e->num_hook_entries; i++) { ret = e->hooks[i].hook(e->hooks[i].priv, skb, state); if (ret != NF_ACCEPT) return ret; if (nf_nat_initialized(ct, maniptype)) goto do_nat; } null_bind: ret = nf_nat_alloc_null_binding(ct, state->hook); if (ret != NF_ACCEPT) return ret; } else { pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n", maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", ct, ct->status); if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out)) goto oif_changed; } break; default: /* ESTABLISHED */ WARN_ON(ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY); if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out)) goto oif_changed; } do_nat: return nf_nat_packet(ct, ctinfo, state->hook, skb); oif_changed: nf_ct_kill_acct(ct, ctinfo, skb); return NF_DROP; } EXPORT_SYMBOL_GPL(nf_nat_inet_fn); struct nf_nat_proto_clean { u8 l3proto; u8 l4proto; }; /* kill conntracks with affected NAT section */ static int nf_nat_proto_remove(struct nf_conn *i, void *data) { const struct nf_nat_proto_clean *clean = data; if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) return 0; return i->status & IPS_NAT_MASK ? 1 : 0; } static void nf_nat_cleanup_conntrack(struct nf_conn *ct) { unsigned int h; h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); hlist_del_rcu(&ct->nat_bysource); spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); } static int nf_nat_proto_clean(struct nf_conn *ct, void *data) { if (nf_nat_proto_remove(ct, data)) return 1; /* This module is being removed and conntrack has nat null binding. * Remove it from bysource hash, as the table will be freed soon. * * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() * will delete entry from already-freed table. */ if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status)) nf_nat_cleanup_conntrack(ct); /* don't delete conntrack. Although that would make things a lot * simpler, we'd end up flushing all conntracks on nat rmmod. */ return 0; } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, }; static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range2 *range) { if (tb[CTA_PROTONAT_PORT_MIN]) { range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); range->max_proto.all = range->min_proto.all; range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } if (tb[CTA_PROTONAT_PORT_MAX]) { range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]); range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } return 0; } static int nfnetlink_parse_nat_proto(struct nlattr *attr, const struct nf_conn *ct, struct nf_nat_range2 *range) { struct nlattr *tb[CTA_PROTONAT_MAX+1]; int err; err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy, NULL); if (err < 0) return err; return nf_nat_l4proto_nlattr_to_range(tb, range); } static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, [CTA_NAT_PROTO] = { .type = NLA_NESTED }, }; static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range2 *range) { if (tb[CTA_NAT_V4_MINIP]) { range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]); range->flags |= NF_NAT_RANGE_MAP_IPS; } range->max_addr.ip = nla_get_be32_default(tb[CTA_NAT_V4_MAXIP], range->min_addr.ip); return 0; } static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range2 *range) { if (tb[CTA_NAT_V6_MINIP]) { nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP], sizeof(struct in6_addr)); range->flags |= NF_NAT_RANGE_MAP_IPS; } if (tb[CTA_NAT_V6_MAXIP]) nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP], sizeof(struct in6_addr)); else range->max_addr = range->min_addr; return 0; } static int nfnetlink_parse_nat(const struct nlattr *nat, const struct nf_conn *ct, struct nf_nat_range2 *range) { struct nlattr *tb[CTA_NAT_MAX+1]; int err; memset(range, 0, sizeof(*range)); err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL); if (err < 0) return err; switch (nf_ct_l3num(ct)) { case NFPROTO_IPV4: err = nf_nat_ipv4_nlattr_to_range(tb, range); break; case NFPROTO_IPV6: err = nf_nat_ipv6_nlattr_to_range(tb, range); break; default: err = -EPROTONOSUPPORT; break; } if (err) return err; if (!tb[CTA_NAT_PROTO]) return 0; return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); } /* This function is called under rcu_read_lock() */ static int nfnetlink_parse_nat_setup(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) { struct nf_nat_range2 range; int err; /* Should not happen, restricted to creating new conntracks * via ctnetlink. */ if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) return -EEXIST; /* No NAT information has been passed, allocate the null-binding */ if (attr == NULL) return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0; err = nfnetlink_parse_nat(attr, ct, &range); if (err < 0) return err; return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; } #else static int nfnetlink_parse_nat_setup(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) { return -EOPNOTSUPP; } #endif static struct nf_ct_helper_expectfn follow_master_nat = { .name = "nat-follow-master", .expectfn = nf_nat_follow_master, }; int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops, const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count) { struct nat_net *nat_net = net_generic(net, nat_net_id); struct nf_nat_hooks_net *nat_proto_net; struct nf_nat_lookup_hook_priv *priv; unsigned int hooknum = ops->hooknum; struct nf_hook_ops *nat_ops; int i, ret; if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net))) return -EINVAL; nat_proto_net = &nat_net->nat_proto_net[pf]; for (i = 0; i < ops_count; i++) { if (orig_nat_ops[i].hooknum == hooknum) { hooknum = i; break; } } if (WARN_ON_ONCE(i == ops_count)) return -EINVAL; mutex_lock(&nf_nat_proto_mutex); if (!nat_proto_net->nat_hook_ops) { WARN_ON(nat_proto_net->users != 0); nat_ops = kmemdup_array(orig_nat_ops, ops_count, sizeof(*orig_nat_ops), GFP_KERNEL); if (!nat_ops) { mutex_unlock(&nf_nat_proto_mutex); return -ENOMEM; } for (i = 0; i < ops_count; i++) { priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv) { nat_ops[i].priv = priv; continue; } mutex_unlock(&nf_nat_proto_mutex); while (i) kfree(nat_ops[--i].priv); kfree(nat_ops); return -ENOMEM; } ret = nf_register_net_hooks(net, nat_ops, ops_count); if (ret < 0) { mutex_unlock(&nf_nat_proto_mutex); for (i = 0; i < ops_count; i++) kfree(nat_ops[i].priv); kfree(nat_ops); return ret; } nat_proto_net->nat_hook_ops = nat_ops; } nat_ops = nat_proto_net->nat_hook_ops; priv = nat_ops[hooknum].priv; if (WARN_ON_ONCE(!priv)) { mutex_unlock(&nf_nat_proto_mutex); return -EOPNOTSUPP; } ret = nf_hook_entries_insert_raw(&priv->entries, ops); if (ret == 0) nat_proto_net->users++; mutex_unlock(&nf_nat_proto_mutex); return ret; } void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops, unsigned int ops_count) { struct nat_net *nat_net = net_generic(net, nat_net_id); struct nf_nat_hooks_net *nat_proto_net; struct nf_nat_lookup_hook_priv *priv; struct nf_hook_ops *nat_ops; int hooknum = ops->hooknum; int i; if (pf >= ARRAY_SIZE(nat_net->nat_proto_net)) return; nat_proto_net = &nat_net->nat_proto_net[pf]; mutex_lock(&nf_nat_proto_mutex); if (WARN_ON(nat_proto_net->users == 0)) goto unlock; nat_proto_net->users--; nat_ops = nat_proto_net->nat_hook_ops; for (i = 0; i < ops_count; i++) { if (nat_ops[i].hooknum == hooknum) { hooknum = i; break; } } if (WARN_ON_ONCE(i == ops_count)) goto unlock; priv = nat_ops[hooknum].priv; nf_hook_entries_delete_raw(&priv->entries, ops); if (nat_proto_net->users == 0) { nf_unregister_net_hooks(net, nat_ops, ops_count); for (i = 0; i < ops_count; i++) { priv = nat_ops[i].priv; kfree_rcu(priv, rcu_head); } nat_proto_net->nat_hook_ops = NULL; kfree(nat_ops); } unlock: mutex_unlock(&nf_nat_proto_mutex); } static struct pernet_operations nat_net_ops = { .id = &nat_net_id, .size = sizeof(struct nat_net), }; static const struct nf_nat_hook nat_hook = { .parse_nat_setup = nfnetlink_parse_nat_setup, #ifdef CONFIG_XFRM .decode_session = __nf_nat_decode_session, #endif .remove_nat_bysrc = nf_nat_cleanup_conntrack, }; static int __init nf_nat_init(void) { int ret, i; /* Leave them the same for the moment. */ nf_nat_htable_size = nf_conntrack_htable_size; if (nf_nat_htable_size < CONNTRACK_LOCKS) nf_nat_htable_size = CONNTRACK_LOCKS; nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); if (!nf_nat_bysource) return -ENOMEM; for (i = 0; i < CONNTRACK_LOCKS; i++) spin_lock_init(&nf_nat_locks[i]); ret = register_pernet_subsys(&nat_net_ops); if (ret < 0) { kvfree(nf_nat_bysource); return ret; } nf_ct_helper_expectfn_register(&follow_master_nat); WARN_ON(nf_nat_hook != NULL); RCU_INIT_POINTER(nf_nat_hook, &nat_hook); ret = register_nf_nat_bpf(); if (ret < 0) { RCU_INIT_POINTER(nf_nat_hook, NULL); nf_ct_helper_expectfn_unregister(&follow_master_nat); synchronize_net(); unregister_pernet_subsys(&nat_net_ops); kvfree(nf_nat_bysource); } return ret; } static void __exit nf_nat_cleanup(void) { struct nf_nat_proto_clean clean = {}; nf_ct_iterate_destroy(nf_nat_proto_clean, &clean); nf_ct_helper_expectfn_unregister(&follow_master_nat); RCU_INIT_POINTER(nf_nat_hook, NULL); synchronize_net(); kvfree(nf_nat_bysource); unregister_pernet_subsys(&nat_net_ops); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Network address translation core"); module_init(nf_nat_init); module_exit(nf_nat_cleanup); |
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Interface to Linux block layer for MTD 'translation layers'. * * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/list.h> #include <linux/fs.h> #include <linux/mtd/blktrans.h> #include <linux/mtd/mtd.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/blkpg.h> #include <linux/spinlock.h> #include <linux/hdreg.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include "mtdcore.h" static LIST_HEAD(blktrans_majors); static void blktrans_dev_release(struct kref *kref) { struct mtd_blktrans_dev *dev = container_of(kref, struct mtd_blktrans_dev, ref); put_disk(dev->disk); blk_mq_free_tag_set(dev->tag_set); kfree(dev->tag_set); list_del(&dev->list); kfree(dev); } static void blktrans_dev_put(struct mtd_blktrans_dev *dev) { kref_put(&dev->ref, blktrans_dev_release); } static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, struct mtd_blktrans_dev *dev, struct request *req) { struct req_iterator iter; struct bio_vec bvec; unsigned long block, nsect; char *buf; block = blk_rq_pos(req) << 9 >> tr->blkshift; nsect = blk_rq_cur_bytes(req) >> tr->blkshift; switch (req_op(req)) { case REQ_OP_FLUSH: if (tr->flush(dev)) return BLK_STS_IOERR; return BLK_STS_OK; case REQ_OP_DISCARD: if (tr->discard(dev, block, nsect)) return BLK_STS_IOERR; return BLK_STS_OK; case REQ_OP_READ: buf = kmap(bio_page(req->bio)) + bio_offset(req->bio); for (; nsect > 0; nsect--, block++, buf += tr->blksize) { if (tr->readsect(dev, block, buf)) { kunmap(bio_page(req->bio)); return BLK_STS_IOERR; } } kunmap(bio_page(req->bio)); rq_for_each_segment(bvec, req, iter) flush_dcache_page(bvec.bv_page); return BLK_STS_OK; case REQ_OP_WRITE: if (!tr->writesect) return BLK_STS_IOERR; rq_for_each_segment(bvec, req, iter) flush_dcache_page(bvec.bv_page); buf = kmap(bio_page(req->bio)) + bio_offset(req->bio); for (; nsect > 0; nsect--, block++, buf += tr->blksize) { if (tr->writesect(dev, block, buf)) { kunmap(bio_page(req->bio)); return BLK_STS_IOERR; } } kunmap(bio_page(req->bio)); return BLK_STS_OK; default: return BLK_STS_IOERR; } } int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) { return dev->bg_stop; } EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); static struct request *mtd_next_request(struct mtd_blktrans_dev *dev) { struct request *rq; rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist); if (rq) { list_del_init(&rq->queuelist); blk_mq_start_request(rq); return rq; } return NULL; } static void mtd_blktrans_work(struct mtd_blktrans_dev *dev) __releases(&dev->queue_lock) __acquires(&dev->queue_lock) { struct mtd_blktrans_ops *tr = dev->tr; struct request *req = NULL; int background_done = 0; while (1) { blk_status_t res; dev->bg_stop = false; if (!req && !(req = mtd_next_request(dev))) { if (tr->background && !background_done) { spin_unlock_irq(&dev->queue_lock); mutex_lock(&dev->lock); tr->background(dev); mutex_unlock(&dev->lock); spin_lock_irq(&dev->queue_lock); /* * Do background processing just once per idle * period. */ background_done = !dev->bg_stop; continue; } break; } spin_unlock_irq(&dev->queue_lock); mutex_lock(&dev->lock); res = do_blktrans_request(dev->tr, dev, req); mutex_unlock(&dev->lock); if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) { __blk_mq_end_request(req, res); req = NULL; } background_done = 0; cond_resched(); spin_lock_irq(&dev->queue_lock); } } static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct mtd_blktrans_dev *dev; dev = hctx->queue->queuedata; if (!dev) { blk_mq_start_request(bd->rq); return BLK_STS_IOERR; } spin_lock_irq(&dev->queue_lock); list_add_tail(&bd->rq->queuelist, &dev->rq_list); mtd_blktrans_work(dev); spin_unlock_irq(&dev->queue_lock); return BLK_STS_OK; } static int blktrans_open(struct gendisk *disk, blk_mode_t mode) { struct mtd_blktrans_dev *dev = disk->private_data; int ret = 0; kref_get(&dev->ref); mutex_lock(&dev->lock); if (dev->open) goto unlock; __module_get(dev->tr->owner); if (!dev->mtd) goto unlock; if (dev->tr->open) { ret = dev->tr->open(dev); if (ret) goto error_put; } ret = __get_mtd_device(dev->mtd); if (ret) goto error_release; dev->writable = mode & BLK_OPEN_WRITE; unlock: dev->open++; mutex_unlock(&dev->lock); return ret; error_release: if (dev->tr->release) dev->tr->release(dev); error_put: module_put(dev->tr->owner); mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; } static void blktrans_release(struct gendisk *disk) { struct mtd_blktrans_dev *dev = disk->private_data; mutex_lock(&dev->lock); if (--dev->open) goto unlock; module_put(dev->tr->owner); if (dev->mtd) { if (dev->tr->release) dev->tr->release(dev); __put_mtd_device(dev->mtd); } unlock: mutex_unlock(&dev->lock); blktrans_dev_put(dev); } static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; int ret = -ENXIO; mutex_lock(&dev->lock); if (!dev->mtd) goto unlock; ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY; unlock: mutex_unlock(&dev->lock); return ret; } static const struct block_device_operations mtd_block_ops = { .owner = THIS_MODULE, .open = blktrans_open, .release = blktrans_release, .getgeo = blktrans_getgeo, }; static const struct blk_mq_ops mtd_mq_ops = { .queue_rq = mtd_queue_rq, }; int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) { struct mtd_blktrans_ops *tr = new->tr; struct mtd_blktrans_dev *d; struct queue_limits lim = { }; int last_devnum = -1; struct gendisk *gd; int ret; lockdep_assert_held(&mtd_table_mutex); list_for_each_entry(d, &tr->devs, list) { if (new->devnum == -1) { /* Use first free number */ if (d->devnum != last_devnum+1) { /* Found a free devnum. Plug it in here */ new->devnum = last_devnum+1; list_add_tail(&new->list, &d->list); goto added; } } else if (d->devnum == new->devnum) { /* Required number taken */ return -EBUSY; } else if (d->devnum > new->devnum) { /* Required number was free */ list_add_tail(&new->list, &d->list); goto added; } last_devnum = d->devnum; } ret = -EBUSY; if (new->devnum == -1) new->devnum = last_devnum+1; /* Check that the device and any partitions will get valid * minor numbers and that the disk naming code below can cope * with this number. */ if (new->devnum > (MINORMASK >> tr->part_bits) || (tr->part_bits && new->devnum >= 27 * 26)) return ret; list_add_tail(&new->list, &tr->devs); added: mutex_init(&new->lock); kref_init(&new->ref); if (!tr->writesect) new->readonly = 1; ret = -ENOMEM; new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL); if (!new->tag_set) goto out_list_del; ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2, BLK_MQ_F_BLOCKING); if (ret) goto out_kfree_tag_set; lim.logical_block_size = tr->blksize; if (tr->discard) lim.max_hw_discard_sectors = UINT_MAX; if (tr->flush) lim.features |= BLK_FEAT_WRITE_CACHE; /* Create gendisk */ gd = blk_mq_alloc_disk(new->tag_set, &lim, new); if (IS_ERR(gd)) { ret = PTR_ERR(gd); goto out_free_tag_set; } new->disk = gd; new->rq = new->disk->queue; gd->private_data = new; gd->major = tr->major; gd->first_minor = (new->devnum) << tr->part_bits; gd->minors = 1 << tr->part_bits; gd->fops = &mtd_block_ops; if (tr->part_bits) { if (new->devnum < 26) snprintf(gd->disk_name, sizeof(gd->disk_name), "%s%c", tr->name, 'a' + new->devnum); else snprintf(gd->disk_name, sizeof(gd->disk_name), "%s%c%c", tr->name, 'a' - 1 + new->devnum / 26, 'a' + new->devnum % 26); } else { snprintf(gd->disk_name, sizeof(gd->disk_name), "%s%d", tr->name, new->devnum); gd->flags |= GENHD_FL_NO_PART; } set_capacity(gd, ((u64)new->size * tr->blksize) >> 9); /* Create the request queue */ spin_lock_init(&new->queue_lock); INIT_LIST_HEAD(&new->rq_list); gd->queue = new->rq; if (new->readonly) set_disk_ro(gd, 1); ret = device_add_disk(&new->mtd->dev, gd, NULL); if (ret) goto out_cleanup_disk; if (new->disk_attributes) { ret = sysfs_create_group(&disk_to_dev(gd)->kobj, new->disk_attributes); WARN_ON(ret); } return 0; out_cleanup_disk: put_disk(new->disk); out_free_tag_set: blk_mq_free_tag_set(new->tag_set); out_kfree_tag_set: kfree(new->tag_set); out_list_del: list_del(&new->list); return ret; } int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) { unsigned long flags; unsigned int memflags; lockdep_assert_held(&mtd_table_mutex); if (old->disk_attributes) sysfs_remove_group(&disk_to_dev(old->disk)->kobj, old->disk_attributes); /* Stop new requests to arrive */ del_gendisk(old->disk); /* Kill current requests */ spin_lock_irqsave(&old->queue_lock, flags); old->rq->queuedata = NULL; spin_unlock_irqrestore(&old->queue_lock, flags); /* freeze+quiesce queue to ensure all requests are flushed */ memflags = blk_mq_freeze_queue(old->rq); blk_mq_quiesce_queue(old->rq); blk_mq_unquiesce_queue(old->rq); blk_mq_unfreeze_queue(old->rq, memflags); /* If the device is currently open, tell trans driver to close it, then put mtd device, and don't touch it again */ mutex_lock(&old->lock); if (old->open) { if (old->tr->release) old->tr->release(old); __put_mtd_device(old->mtd); } old->mtd = NULL; mutex_unlock(&old->lock); blktrans_dev_put(old); return 0; } static void blktrans_notify_remove(struct mtd_info *mtd) { struct mtd_blktrans_ops *tr; struct mtd_blktrans_dev *dev, *next; list_for_each_entry(tr, &blktrans_majors, list) list_for_each_entry_safe(dev, next, &tr->devs, list) if (dev->mtd == mtd) tr->remove_dev(dev); } static void blktrans_notify_add(struct mtd_info *mtd) { struct mtd_blktrans_ops *tr; if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME) return; list_for_each_entry(tr, &blktrans_majors, list) tr->add_mtd(tr, mtd); } static struct mtd_notifier blktrans_notifier = { .add = blktrans_notify_add, .remove = blktrans_notify_remove, }; int register_mtd_blktrans(struct mtd_blktrans_ops *tr) { struct mtd_info *mtd; int ret; /* Register the notifier if/when the first device type is registered, to prevent the link/init ordering from fucking us over. */ if (!blktrans_notifier.list.next) register_mtd_user(&blktrans_notifier); ret = register_blkdev(tr->major, tr->name); if (ret < 0) { printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", tr->name, tr->major, ret); return ret; } if (ret) tr->major = ret; tr->blkshift = ffs(tr->blksize) - 1; INIT_LIST_HEAD(&tr->devs); mutex_lock(&mtd_table_mutex); list_add(&tr->list, &blktrans_majors); mtd_for_each_device(mtd) if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME) tr->add_mtd(tr, mtd); mutex_unlock(&mtd_table_mutex); return 0; } int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) { struct mtd_blktrans_dev *dev, *next; mutex_lock(&mtd_table_mutex); /* Remove it from the list of active majors */ list_del(&tr->list); list_for_each_entry_safe(dev, next, &tr->devs, list) tr->remove_dev(dev); mutex_unlock(&mtd_table_mutex); unregister_blkdev(tr->major, tr->name); BUG_ON(!list_empty(&tr->devs)); return 0; } static void __exit mtd_blktrans_exit(void) { /* No race here -- if someone's currently in register_mtd_blktrans we're screwed anyway. */ if (blktrans_notifier.list.next) unregister_mtd_user(&blktrans_notifier); } module_exit(mtd_blktrans_exit); EXPORT_SYMBOL_GPL(register_mtd_blktrans); EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); |
39 43 2 1 7 32 32 32 2 58 1 13 13 1 43 10 10 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/net.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/types.h> #include <net/pkt_sched.h> #include "sch_mqprio_lib.h" /* Returns true if the intervals [a, b) and [c, d) overlap. */ static bool intervals_overlap(int a, int b, int c, int d) { int left = max(a, c), right = min(b, d); return left < right; } static int mqprio_validate_queue_counts(struct net_device *dev, const struct tc_mqprio_qopt *qopt, bool allow_overlapping_txqs, struct netlink_ext_ack *extack) { int i, j; for (i = 0; i < qopt->num_tc; i++) { unsigned int last = qopt->offset[i] + qopt->count[i]; if (!qopt->count[i]) { NL_SET_ERR_MSG_FMT_MOD(extack, "No queues for TC %d", i); return -EINVAL; } /* Verify the queue count is in tx range being equal to the * real_num_tx_queues indicates the last queue is in use. */ if (qopt->offset[i] >= dev->real_num_tx_queues || last > dev->real_num_tx_queues) { NL_SET_ERR_MSG_FMT_MOD(extack, "Queues %d:%d for TC %d exceed the %d TX queues available", qopt->count[i], qopt->offset[i], i, dev->real_num_tx_queues); return -EINVAL; } if (allow_overlapping_txqs) continue; /* Verify that the offset and counts do not overlap */ for (j = i + 1; j < qopt->num_tc; j++) { if (intervals_overlap(qopt->offset[i], last, qopt->offset[j], qopt->offset[j] + qopt->count[j])) { NL_SET_ERR_MSG_FMT_MOD(extack, "TC %d queues %d@%d overlap with TC %d queues %d@%d", i, qopt->count[i], qopt->offset[i], j, qopt->count[j], qopt->offset[j]); return -EINVAL; } } } return 0; } int mqprio_validate_qopt(struct net_device *dev, struct tc_mqprio_qopt *qopt, bool validate_queue_counts, bool allow_overlapping_txqs, struct netlink_ext_ack *extack) { int i, err; /* Verify num_tc is not out of max range */ if (qopt->num_tc > TC_MAX_QUEUE) { NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range"); return -EINVAL; } /* Verify priority mapping uses valid tcs */ for (i = 0; i <= TC_BITMASK; i++) { if (qopt->prio_tc_map[i] >= qopt->num_tc) { NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); return -EINVAL; } } if (validate_queue_counts) { err = mqprio_validate_queue_counts(dev, qopt, allow_overlapping_txqs, extack); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(mqprio_validate_qopt); void mqprio_qopt_reconstruct(struct net_device *dev, struct tc_mqprio_qopt *qopt) { int tc, num_tc = netdev_get_num_tc(dev); qopt->num_tc = num_tc; memcpy(qopt->prio_tc_map, dev->prio_tc_map, sizeof(qopt->prio_tc_map)); for (tc = 0; tc < num_tc; tc++) { qopt->count[tc] = dev->tc_to_txq[tc].count; qopt->offset[tc] = dev->tc_to_txq[tc].offset; } } EXPORT_SYMBOL_GPL(mqprio_qopt_reconstruct); void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE], struct tc_mqprio_qopt_offload *mqprio) { unsigned long preemptible_tcs = 0; int tc; for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) if (fp[tc] == TC_FP_PREEMPTIBLE) preemptible_tcs |= BIT(tc); mqprio->preemptible_tcs = preemptible_tcs; } EXPORT_SYMBOL_GPL(mqprio_fp_to_offload); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Shared mqprio qdisc code currently between taprio and mqprio"); |
7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | // SPDX-License-Identifier: GPL-2.0-only /* ipv6header match - matches IPv6 packets based on whether they contain certain headers */ /* Original idea: Brad Chapman * Rewritten by: Andras Kis-Szabo <kisza@sch.bme.hu> */ /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ipv6.h> #include <linux/types.h> #include <net/checksum.h> #include <net/ipv6.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter_ipv6/ip6t_ipv6header.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: IPv6 header types match"); MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); static bool ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par) { const struct ip6t_ipv6header_info *info = par->matchinfo; unsigned int temp; int len; u8 nexthdr; unsigned int ptr; /* Make sure this isn't an evil packet */ /* type of the 1st exthdr */ nexthdr = ipv6_hdr(skb)->nexthdr; /* pointer to the 1st exthdr */ ptr = sizeof(struct ipv6hdr); /* available length */ len = skb->len - ptr; temp = 0; while (nf_ip6_ext_hdr(nexthdr)) { const struct ipv6_opt_hdr *hp; struct ipv6_opt_hdr _hdr; int hdrlen; /* No more exthdr -> evaluate */ if (nexthdr == NEXTHDR_NONE) { temp |= MASK_NONE; break; } /* Is there enough space for the next ext header? */ if (len < (int)sizeof(struct ipv6_opt_hdr)) return false; /* ESP -> evaluate */ if (nexthdr == NEXTHDR_ESP) { temp |= MASK_ESP; break; } hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); if (!hp) { par->hotdrop = true; return false; } /* Calculate the header length */ if (nexthdr == NEXTHDR_FRAGMENT) hdrlen = 8; else if (nexthdr == NEXTHDR_AUTH) hdrlen = ipv6_authlen(hp); else hdrlen = ipv6_optlen(hp); /* set the flag */ switch (nexthdr) { case NEXTHDR_HOP: temp |= MASK_HOPOPTS; break; case NEXTHDR_ROUTING: temp |= MASK_ROUTING; break; case NEXTHDR_FRAGMENT: temp |= MASK_FRAGMENT; break; case NEXTHDR_AUTH: temp |= MASK_AH; break; case NEXTHDR_DEST: temp |= MASK_DSTOPTS; break; default: return false; } nexthdr = hp->nexthdr; len -= hdrlen; ptr += hdrlen; if (ptr > skb->len) break; } if (nexthdr != NEXTHDR_NONE && nexthdr != NEXTHDR_ESP) temp |= MASK_PROTO; if (info->modeflag) return !((temp ^ info->matchflags ^ info->invflags) & info->matchflags); else { if (info->invflags) return temp != info->matchflags; else return temp == info->matchflags; } } static int ipv6header_mt6_check(const struct xt_mtchk_param *par) { const struct ip6t_ipv6header_info *info = par->matchinfo; /* invflags is 0 or 0xff in hard mode */ if ((!info->modeflag) && info->invflags != 0x00 && info->invflags != 0xFF) return -EINVAL; return 0; } static struct xt_match ipv6header_mt6_reg __read_mostly = { .name = "ipv6header", .family = NFPROTO_IPV6, .match = ipv6header_mt6, .matchsize = sizeof(struct ip6t_ipv6header_info), .checkentry = ipv6header_mt6_check, .destroy = NULL, .me = THIS_MODULE, }; static int __init ipv6header_mt6_init(void) { return xt_register_match(&ipv6header_mt6_reg); } static void __exit ipv6header_mt6_exit(void) { xt_unregister_match(&ipv6header_mt6_reg); } module_init(ipv6header_mt6_init); module_exit(ipv6header_mt6_exit); |
24 317 12 3911 154 153 317 16 171 5 660 49 660 21521 1498 38 688 660 659 5 425 8 8 8 569 570 569 917 905 15 900 58 58 767 14 48 48 454 1322 114 79 48 9 79 70 171 25287 28 360 263 157 152 77 3 157 157 921 920 33 96 628 413 87 54 137 42 301 180 113 274 2 2958 8 240 3 41 315 352 235 587 871 52 870 1077 340 388 18963 263 13638 844 50 4125 609 3276 4 6 264 2049 8 43 1155 22313 1 121 121 121 126 345 19654 254 36 363 94 2 9 94 373 264 13 67 74 66 59 3 59 7 2834 19 268 268 269 268 4 2 3 4 4 4 291 3 11 291 296 11 289 409 1757 177 367 535 18 196 196 190 26 2 150 579 4082 836 5 1641 1644 131 130 4 4 4 52 3633 34 529 35 566 29 417 408 21 21 21 19283 49 1 881 272 13 2169 17019 362 5 977 4 330 6 72 13 64 148 32 21 119 266 1135 39 12 9 1195 14 867 1 356 49 1020 8 7616 42 14389 14380 83 19 10 107 99 8 242 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the AF_INET socket handler. * * Version: @(#)sock.h 1.0.4 05/13/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche <flla@stud.uni-sb.de> * * Fixes: * Alan Cox : Volatiles in skbuff pointers. See * skbuff comments. May be overdone, * better to prove they can be removed * than the reverse. * Alan Cox : Added a zapped field for tcp to note * a socket is reset and must stay shut up * Alan Cox : New fields for options * Pauline Middelink : identd support * Alan Cox : Eliminate low level recv/recvfrom * David S. Miller : New socket lookup architecture. * Steve Whitehouse: Default routines for sock_ops * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made * protinfo be just a void pointer, as the * protocol specific parts were moved to * respective headers and ipv4/v6, etc now * use private slabcaches for its socks * Pedro Hortas : New flags field for socket options */ #ifndef _SOCK_H #define _SOCK_H #include <linux/hardirq.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/list_nulls.h> #include <linux/timer.h> #include <linux/cache.h> #include <linux/bitops.h> #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/skbuff.h> /* struct sk_buff */ #include <linux/mm.h> #include <linux/security.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/page_counter.h> #include <linux/memcontrol.h> #include <linux/static_key.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/cgroup-defs.h> #include <linux/rbtree.h> #include <linux/rculist_nulls.h> #include <linux/poll.h> #include <linux/sockptr.h> #include <linux/indirect_call_wrapper.h> #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/llist.h> #include <net/dst.h> #include <net/checksum.h> #include <net/tcp_states.h> #include <linux/net_tstamp.h> #include <net/l3mdev.h> #include <uapi/linux/socket.h> /* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of * the other protocols. */ /* This is the per-socket lock. The spinlock provides a synchronization * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */ typedef struct { spinlock_t slock; int owned; wait_queue_head_t wq; /* * We express the mutex-alike socket_lock semantics * to the lock validator by explicitly managing * the slock as a lock variant (in addition to * the slock itself): */ #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif } socket_lock_t; struct sock; struct proto; struct net; typedef __u32 __bitwise __portpair; typedef __u64 __bitwise __addrpair; /** * struct sock_common - minimal network layer representation of sockets * @skc_daddr: Foreign IPv4 addr * @skc_rcv_saddr: Bound local IPv4 addr * @skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr * @skc_hash: hash value used with various protocol lookup tables * @skc_u16hashes: two u16 hash values used by UDP lookup tables * @skc_dport: placeholder for inet_dport/tw_dport * @skc_num: placeholder for inet_num/tw_num * @skc_portpair: __u32 union of @skc_dport & @skc_num * @skc_family: network address family * @skc_state: Connection state * @skc_reuse: %SO_REUSEADDR setting * @skc_reuseport: %SO_REUSEPORT setting * @skc_ipv6only: socket is IPV6 only * @skc_net_refcnt: socket is using net ref counting * @skc_bound_dev_if: bound device index if != 0 * @skc_bind_node: bind hash linkage for various protocol lookup tables * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol * @skc_prot: protocol handlers inside a network family * @skc_net: reference to the network namespace of this socket * @skc_v6_daddr: IPV6 destination address * @skc_v6_rcv_saddr: IPV6 source address * @skc_cookie: socket's cookie value * @skc_node: main hash linkage for various protocol lookup tables * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol * @skc_tx_queue_mapping: tx queue number for this connection * @skc_rx_queue_mapping: rx queue number for this connection * @skc_flags: place holder for sk_flags * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings * @skc_listener: connection request listener socket (aka rsk_listener) * [union with @skc_flags] * @skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row * [union with @skc_flags] * @skc_incoming_cpu: record/match cpu processing incoming packets * @skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled) * [union with @skc_incoming_cpu] * @skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number * [union with @skc_incoming_cpu] * @skc_refcnt: reference count * * This is the minimal network layer representation of sockets, the header * for struct sock and struct inet_timewait_sock. */ struct sock_common { union { __addrpair skc_addrpair; struct { __be32 skc_daddr; __be32 skc_rcv_saddr; }; }; union { unsigned int skc_hash; __u16 skc_u16hashes[2]; }; /* skc_dport && skc_num must be grouped as well */ union { __portpair skc_portpair; struct { __be16 skc_dport; __u16 skc_num; }; }; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse:4; unsigned char skc_reuseport:1; unsigned char skc_ipv6only:1; unsigned char skc_net_refcnt:1; int skc_bound_dev_if; union { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; }; struct proto *skc_prot; possible_net_t skc_net; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; #endif atomic64_t skc_cookie; /* following fields are padding to force * offset(struct sock, sk_refcnt) == 128 on 64bit arches * assuming IPV6 is enabled. We use this padding differently * for different kind of 'sockets' */ union { unsigned long skc_flags; struct sock *skc_listener; /* request_sock */ struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */ }; /* * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() */ /* private: */ int skc_dontcopy_begin[0]; /* public: */ union { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; }; unsigned short skc_tx_queue_mapping; #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING unsigned short skc_rx_queue_mapping; #endif union { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */ }; refcount_t skc_refcnt; /* private: */ int skc_dontcopy_end[0]; union { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */ }; /* public: */ }; struct bpf_local_storage; struct sk_filter; /** * struct sock - network layer representation of sockets * @__sk_common: shared layout with inet_timewait_sock * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings * @sk_lock: synchronizer * @sk_kern_sock: True if sock is using kernel lock classes * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst * @sk_rx_dst_cookie: cookie for @sk_rx_dst * @sk_dst_cache: destination cache * @sk_dst_pending_confirm: need to confirm neighbour * @sk_policy: flow policy * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_tsq_flags: TCP Small Queues flags * @sk_write_queue: Packet sending queue * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward * @sk_reserved_mem: space reserved and non-reclaimable for the socket * @sk_napi_id: id of the last napi context to receive data for sk * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) * @sk_pacing_status: Pacing status (requested, handled by sch_fq) * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_sndbuf: size of send buffer in bytes * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden. * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) * @sk_gso_max_size: Maximum GSO segment size to build * @sk_gso_max_segs: Maximum number of GSO segments * @sk_pacing_shift: scaling factor for TCP Small Queues * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct * @sk_error_queue: rarely used * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, * IPV6_ADDRFORM for instance) * @sk_err: last error * @sk_err_soft: errors that don't cause failure but are the cause of a * persistent failure not just 'timed out' * @sk_drops: raw/udp drops counter * @sk_ack_backlog: current listen backlog * @sk_max_ack_backlog: listen backlog set in listen() * @sk_uid: user id of owner * @sk_prefer_busy_poll: prefer busypolling over softirq processing * @sk_busy_poll_budget: napi processing budget when busypolling * @sk_priority: %SO_PRIORITY setting * @sk_type: socket type (%SOCK_STREAM, etc) * @sk_protocol: which protocol this socket belongs in this network family * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred * @sk_peer_pid: &struct pid for this socket's peer * @sk_peer_cred: %SO_PEERCRED setting * @sk_rcvlowat: %SO_RCVLOWAT setting * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_txhash: computed flow hash for use on transmit * @sk_txrehash: enable TX hash rethink * @sk_filter: socket filtering instructions * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only * @sk_tsflags: SO_TIMESTAMPING flags * @sk_bpf_cb_flags: used in bpf_setsockopt() * @sk_use_task_frag: allow sk_page_frag() to use current->task_frag. * Sockets that can be used under memory reclaim should * set this to false. * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock * for timestamping * @sk_tskey: counter to disambiguate concurrent tstamp requests * @sk_zckey: counter to order MSG_ZEROCOPY notifications * @sk_socket: Identd and reporting IO signals * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock. * @sk_frag: cached page frag * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head] * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_cgrp_data: cgroup data for this cgroup * @sk_memcg: this socket's memory cgroup association * @sk_write_pending: a write to stream socket waits to start * @sk_disconnects: number of disconnect operations performed on this sock * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed * @sk_write_space: callback to indicate there is bf sending space available * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) * @sk_backlog_rcv: callback to process the backlog * @sk_validate_xmit_skb: ptr to an optional validate function * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 * @sk_reuseport_cb: reuseport group container * @sk_bpf_storage: ptr to cache and control for bpf_sk_storage * @sk_rcu: used during RCU grace period * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME) * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME * @sk_txtime_report_errors: set report errors mode for SO_TXTIME * @sk_txtime_unused: unused txtime flags * @ns_tracker: tracker for netns reference * @sk_user_frags: xarray of pages the user is holding a reference on. * @sk_owner: reference to the real owner of the socket that calls * sock_lock_init_class_and_name(). */ struct sock { /* * Now struct inet_timewait_sock also uses sock_common, so please just * don't add nothing before this first member (__sk_common) --acme */ struct sock_common __sk_common; #define sk_node __sk_common.skc_node #define sk_nulls_node __sk_common.skc_nulls_node #define sk_refcnt __sk_common.skc_refcnt #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING #define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping #endif #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin #define sk_dontcopy_end __sk_common.skc_dontcopy_end #define sk_hash __sk_common.skc_hash #define sk_portpair __sk_common.skc_portpair #define sk_num __sk_common.skc_num #define sk_dport __sk_common.skc_dport #define sk_addrpair __sk_common.skc_addrpair #define sk_daddr __sk_common.skc_daddr #define sk_rcv_saddr __sk_common.skc_rcv_saddr #define sk_family __sk_common.skc_family #define sk_state __sk_common.skc_state #define sk_reuse __sk_common.skc_reuse #define sk_reuseport __sk_common.skc_reuseport #define sk_ipv6only __sk_common.skc_ipv6only #define sk_net_refcnt __sk_common.skc_net_refcnt #define sk_bound_dev_if __sk_common.skc_bound_dev_if #define sk_bind_node __sk_common.skc_bind_node #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net #define sk_v6_daddr __sk_common.skc_v6_daddr #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr #define sk_cookie __sk_common.skc_cookie #define sk_incoming_cpu __sk_common.skc_incoming_cpu #define sk_flags __sk_common.skc_flags #define sk_rxhash __sk_common.skc_rxhash __cacheline_group_begin(sock_write_rx); atomic_t sk_drops; __s32 sk_peek_off; struct sk_buff_head sk_error_queue; struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency * access. Therefore we special case it's implementation. * Note : rmem_alloc is in this structure to fill a hole * on 64bit arches, not because its logically part of * backlog. */ struct { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } sk_backlog; #define sk_rmem_alloc sk_backlog.rmem_alloc __cacheline_group_end(sock_write_rx); __cacheline_group_begin(sock_read_rx); /* early demux fields */ struct dst_entry __rcu *sk_rx_dst; int sk_rx_dst_ifindex; u32 sk_rx_dst_cookie; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_ll_usec; unsigned int sk_napi_id; u16 sk_busy_poll_budget; u8 sk_prefer_busy_poll; #endif u8 sk_userlocks; int sk_rcvbuf; struct sk_filter __rcu *sk_filter; union { struct socket_wq __rcu *sk_wq; /* private: */ struct socket_wq *sk_wq_raw; /* public: */ }; void (*sk_data_ready)(struct sock *sk); long sk_rcvtimeo; int sk_rcvlowat; __cacheline_group_end(sock_read_rx); __cacheline_group_begin(sock_read_rxtx); int sk_err; struct socket *sk_socket; struct mem_cgroup *sk_memcg; #ifdef CONFIG_XFRM struct xfrm_policy __rcu *sk_policy[2]; #endif __cacheline_group_end(sock_read_rxtx); __cacheline_group_begin(sock_write_rxtx); socket_lock_t sk_lock; u32 sk_reserved_mem; int sk_forward_alloc; u32 sk_tsflags; __cacheline_group_end(sock_write_rxtx); __cacheline_group_begin(sock_write_tx); int sk_write_pending; atomic_t sk_omem_alloc; int sk_sndbuf; int sk_wmem_queued; refcount_t sk_wmem_alloc; unsigned long sk_tsq_flags; union { struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; struct sk_buff_head sk_write_queue; u32 sk_dst_pending_confirm; u32 sk_pacing_status; /* see enum sk_pacing */ struct page_frag sk_frag; struct timer_list sk_timer; unsigned long sk_pacing_rate; /* bytes per second */ atomic_t sk_zckey; atomic_t sk_tskey; __cacheline_group_end(sock_write_tx); __cacheline_group_begin(sock_read_tx); unsigned long sk_max_pacing_rate; long sk_sndtimeo; u32 sk_priority; u32 sk_mark; struct dst_entry __rcu *sk_dst_cache; netdev_features_t sk_route_caps; #ifdef CONFIG_SOCK_VALIDATE_XMIT struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk, struct net_device *dev, struct sk_buff *skb); #endif u16 sk_gso_type; u16 sk_gso_max_segs; unsigned int sk_gso_max_size; gfp_t sk_allocation; u32 sk_txhash; u8 sk_pacing_shift; bool sk_use_task_frag; __cacheline_group_end(sock_read_tx); /* * Because of non atomicity rules, all * changes are protected by socket lock. */ u8 sk_gso_disabled : 1, sk_kern_sock : 1, sk_no_check_tx : 1, sk_no_check_rx : 1; u8 sk_shutdown; u16 sk_type; u16 sk_protocol; unsigned long sk_lingertime; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; kuid_t sk_uid; spinlock_t sk_peer_lock; int sk_bind_phc; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; ktime_t sk_stamp; #if BITS_PER_LONG==32 seqlock_t sk_stamp_seq; #endif int sk_disconnects; u8 sk_txrehash; u8 sk_clockid; u8 sk_txtime_deadline_mode : 1, sk_txtime_report_errors : 1, sk_txtime_unused : 6; #define SK_BPF_CB_FLAG_TEST(SK, FLAG) ((SK)->sk_bpf_cb_flags & (FLAG)) u8 sk_bpf_cb_flags; void *sk_user_data; #ifdef CONFIG_SECURITY void *sk_security; #endif struct sock_cgroup_data sk_cgrp_data; void (*sk_state_change)(struct sock *sk); void (*sk_write_space)(struct sock *sk); void (*sk_error_report)(struct sock *sk); int (*sk_backlog_rcv)(struct sock *sk, struct sk_buff *skb); void (*sk_destruct)(struct sock *sk); struct sock_reuseport __rcu *sk_reuseport_cb; #ifdef CONFIG_BPF_SYSCALL struct bpf_local_storage __rcu *sk_bpf_storage; #endif struct rcu_head sk_rcu; netns_tracker ns_tracker; struct xarray sk_user_frags; #if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES) struct module *sk_owner; #endif }; struct sock_bh_locked { struct sock *sock; local_lock_t bh_lock; }; enum sk_pacing { SK_PACING_NONE = 0, SK_PACING_NEEDED = 1, SK_PACING_FQ = 2, }; /* flag bits in sk_user_data * * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might * not be suitable for copying when cloning the socket. For instance, * it can point to a reference counted object. sk_user_data bottom * bit is set if pointer must not be copied. * * - SK_USER_DATA_BPF: Mark whether sk_user_data field is * managed/owned by a BPF reuseport array. This bit should be set * when sk_user_data's sk is added to the bpf's reuseport_array. * * - SK_USER_DATA_PSOCK: Mark whether pointer stored in * sk_user_data points to psock type. This bit should be set * when sk_user_data is assigned to a psock object. */ #define SK_USER_DATA_NOCOPY 1UL #define SK_USER_DATA_BPF 2UL #define SK_USER_DATA_PSOCK 4UL #define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\ SK_USER_DATA_PSOCK) /** * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied * @sk: socket */ static inline bool sk_user_data_is_nocopy(const struct sock *sk) { return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY); } #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) /** * __locked_read_sk_user_data_with_flags - return the pointer * only if argument flags all has been set in sk_user_data. Otherwise * return NULL * * @sk: socket * @flags: flag bits * * The caller must be holding sk->sk_callback_lock. */ static inline void * __locked_read_sk_user_data_with_flags(const struct sock *sk, uintptr_t flags) { uintptr_t sk_user_data = (uintptr_t)rcu_dereference_check(__sk_user_data(sk), lockdep_is_held(&sk->sk_callback_lock)); WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK); if ((sk_user_data & flags) == flags) return (void *)(sk_user_data & SK_USER_DATA_PTRMASK); return NULL; } /** * __rcu_dereference_sk_user_data_with_flags - return the pointer * only if argument flags all has been set in sk_user_data. Otherwise * return NULL * * @sk: socket * @flags: flag bits */ static inline void * __rcu_dereference_sk_user_data_with_flags(const struct sock *sk, uintptr_t flags) { uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk)); WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK); if ((sk_user_data & flags) == flags) return (void *)(sk_user_data & SK_USER_DATA_PTRMASK); return NULL; } #define rcu_dereference_sk_user_data(sk) \ __rcu_dereference_sk_user_data_with_flags(sk, 0) #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \ ({ \ uintptr_t __tmp1 = (uintptr_t)(ptr), \ __tmp2 = (uintptr_t)(flags); \ WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \ WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \ rcu_assign_pointer(__sk_user_data((sk)), \ __tmp1 | __tmp2); \ }) #define rcu_assign_sk_user_data(sk, ptr) \ __rcu_assign_sk_user_data_with_flags(sk, ptr, 0) static inline struct net *sock_net(const struct sock *sk) { return read_pnet(&sk->sk_net); } static inline void sock_net_set(struct sock *sk, struct net *net) { write_pnet(&sk->sk_net, net); } /* * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK * or not whether his port will be reused by someone else. SK_FORCE_REUSE * on a socket means that the socket will reuse everybody else's port * without looking at the other's sk_reuse value. */ #define SK_NO_REUSE 0 #define SK_CAN_REUSE 1 #define SK_FORCE_REUSE 2 int sk_set_peek_off(struct sock *sk, int val); static inline int sk_peek_offset(const struct sock *sk, int flags) { if (unlikely(flags & MSG_PEEK)) { return READ_ONCE(sk->sk_peek_off); } return 0; } static inline void sk_peek_offset_bwd(struct sock *sk, int val) { s32 off = READ_ONCE(sk->sk_peek_off); if (unlikely(off >= 0)) { off = max_t(s32, off - val, 0); WRITE_ONCE(sk->sk_peek_off, off); } } static inline void sk_peek_offset_fwd(struct sock *sk, int val) { sk_peek_offset_bwd(sk, -val); } /* * Hashed lists helper routines */ static inline struct sock *sk_entry(const struct hlist_node *node) { return hlist_entry(node, struct sock, sk_node); } static inline struct sock *__sk_head(const struct hlist_head *head) { return hlist_entry(head->first, struct sock, sk_node); } static inline struct sock *sk_head(const struct hlist_head *head) { return hlist_empty(head) ? NULL : __sk_head(head); } static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) { return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); } static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) { return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); } static inline struct sock *sk_next(const struct sock *sk) { return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); } static inline struct sock *sk_nulls_next(const struct sock *sk) { return (!is_a_nulls(sk->sk_nulls_node.next)) ? hlist_nulls_entry(sk->sk_nulls_node.next, struct sock, sk_nulls_node) : NULL; } static inline bool sk_unhashed(const struct sock *sk) { return hlist_unhashed(&sk->sk_node); } static inline bool sk_hashed(const struct sock *sk) { return !sk_unhashed(sk); } static inline void sk_node_init(struct hlist_node *node) { node->pprev = NULL; } static inline void __sk_del_node(struct sock *sk) { __hlist_del(&sk->sk_node); } /* NB: equivalent to hlist_del_init_rcu */ static inline bool __sk_del_node_init(struct sock *sk) { if (sk_hashed(sk)) { __sk_del_node(sk); sk_node_init(&sk->sk_node); return true; } return false; } /* Grab socket reference count. This operation is valid only when sk is ALREADY grabbed f.e. it is found in hash table or a list and the lookup is made under lock preventing hash table modifications. */ static __always_inline void sock_hold(struct sock *sk) { refcount_inc(&sk->sk_refcnt); } /* Ungrab socket in the context, which assumes that socket refcnt cannot hit zero, f.e. it is true in context of any socketcall. */ static __always_inline void __sock_put(struct sock *sk) { refcount_dec(&sk->sk_refcnt); } static inline bool sk_del_node_init(struct sock *sk) { bool rc = __sk_del_node_init(sk); if (rc) { /* paranoid for a while -acme */ WARN_ON(refcount_read(&sk->sk_refcnt) == 1); __sock_put(sk); } return rc; } #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) { if (sk_hashed(sk)) { hlist_nulls_del_init_rcu(&sk->sk_nulls_node); return true; } return false; } static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) { bool rc = __sk_nulls_del_node_init_rcu(sk); if (rc) { /* paranoid for a while -acme */ WARN_ON(refcount_read(&sk->sk_refcnt) == 1); __sock_put(sk); } return rc; } static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_node, list); } static inline void sk_add_node(struct sock *sk, struct hlist_head *list) { sock_hold(sk); __sk_add_node(sk, list); } static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) { sock_hold(sk); if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) hlist_add_tail_rcu(&sk->sk_node, list); else hlist_add_head_rcu(&sk->sk_node, list); } static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) { sock_hold(sk); hlist_add_tail_rcu(&sk->sk_node, list); } static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); } static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) { hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); } static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { sock_hold(sk); __sk_nulls_add_node_rcu(sk, list); } static inline void __sk_del_bind_node(struct sock *sk) { __hlist_del(&sk->sk_bind_node); } static inline void sk_add_bind_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_bind_node, list); } #define sk_for_each(__sk, list) \ hlist_for_each_entry(__sk, list, sk_node) #define sk_for_each_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, sk_node) #define sk_nulls_for_each(__sk, node, list) \ hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) #define sk_nulls_for_each_rcu(__sk, node, list) \ hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) #define sk_for_each_from(__sk) \ hlist_for_each_entry_from(__sk, sk_node) #define sk_nulls_for_each_from(__sk, node) \ if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) #define sk_for_each_safe(__sk, tmp, list) \ hlist_for_each_entry_safe(__sk, tmp, list, sk_node) #define sk_for_each_bound(__sk, list) \ hlist_for_each_entry(__sk, list, sk_bind_node) #define sk_for_each_bound_safe(__sk, tmp, list) \ hlist_for_each_entry_safe(__sk, tmp, list, sk_bind_node) /** * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @head: the head for your list. * @offset: offset of hlist_node within the struct. * */ #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \ for (pos = rcu_dereference(hlist_first_rcu(head)); \ pos != NULL && \ ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \ pos = rcu_dereference(hlist_next_rcu(pos))) static inline struct user_namespace *sk_user_ns(const struct sock *sk) { /* Careful only use this in a context where these parameters * can not change and must all be valid, such as recvmsg from * userspace. */ return sk->sk_socket->file->f_cred->user_ns; } /* Sock flags */ enum sock_flags { SOCK_DEAD, SOCK_DONE, SOCK_URGINLINE, SOCK_KEEPOPEN, SOCK_LINGER, SOCK_DESTROY, SOCK_BROADCAST, SOCK_TIMESTAMP, SOCK_ZAPPED, SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ SOCK_DBG, /* %SO_DEBUG setting */ SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ SOCK_MEMALLOC, /* VM depends on this socket for swapping */ SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */ SOCK_FASYNC, /* fasync() active */ SOCK_RXQ_OVFL, SOCK_ZEROCOPY, /* buffers from userspace */ SOCK_WIFI_STATUS, /* push wifi status to userspace */ SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS. * Will use last 4 bytes of packet sent from * user-space instead. */ SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ SOCK_TXTIME, SOCK_XDP, /* XDP is attached */ SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */ SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */ SOCK_RCVPRIORITY, /* Receive SO_PRIORITY ancillary data with packet */ SOCK_TIMESTAMPING_ANY, /* Copy of sk_tsflags & TSFLAGS_ANY */ }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) /* * The highest bit of sk_tsflags is reserved for kernel-internal * SOCKCM_FLAG_TS_OPT_ID. There is a check in core/sock.c to control that * SOF_TIMESTAMPING* values do not reach this reserved area */ #define SOCKCM_FLAG_TS_OPT_ID BIT(31) static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk) { nsk->sk_flags = osk->sk_flags; } static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) { __set_bit(flag, &sk->sk_flags); } static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) { __clear_bit(flag, &sk->sk_flags); } static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, int valbool) { if (valbool) sock_set_flag(sk, bit); else sock_reset_flag(sk, bit); } static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) { return test_bit(flag, &sk->sk_flags); } #ifdef CONFIG_NET DECLARE_STATIC_KEY_FALSE(memalloc_socks_key); static inline int sk_memalloc_socks(void) { return static_branch_unlikely(&memalloc_socks_key); } void __receive_sock(struct file *file); #else static inline int sk_memalloc_socks(void) { return 0; } static inline void __receive_sock(struct file *file) { } #endif static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) { return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); } static inline void sk_acceptq_removed(struct sock *sk) { WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1); } static inline void sk_acceptq_added(struct sock *sk) { WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1); } /* Note: If you think the test should be: * return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog); * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.") */ static inline bool sk_acceptq_is_full(const struct sock *sk) { return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog); } /* * Compute minimal free write space needed to queue new packets. */ static inline int sk_stream_min_wspace(const struct sock *sk) { return READ_ONCE(sk->sk_wmem_queued) >> 1; } static inline int sk_stream_wspace(const struct sock *sk) { return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); } static inline void sk_wmem_queued_add(struct sock *sk, int val) { WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); } static inline void sk_forward_alloc_add(struct sock *sk, int val) { /* Paired with lockless reads of sk->sk_forward_alloc */ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val); } void sk_stream_write_space(struct sock *sk); /* OOB backlog add */ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) { /* dont let skb dst not refcounted, we are going to leave rcu lock */ skb_dst_force(skb); if (!sk->sk_backlog.tail) WRITE_ONCE(sk->sk_backlog.head, skb); else sk->sk_backlog.tail->next = skb; WRITE_ONCE(sk->sk_backlog.tail, skb); skb->next = NULL; } /* * Take into account size of receive queue and backlog queue * Do not take into account this skb truesize, * to allow even a single big packet to come. */ static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) { unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); return qsize > limit; } /* The per-socket spinlock must be held here. */ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, unsigned int limit) { if (sk_rcvqueues_full(sk, limit)) return -ENOBUFS; /* * If the skb was allocated from pfmemalloc reserves, only * allow SOCK_MEMALLOC sockets to use it as this socket is * helping free memory */ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) return -ENOMEM; __sk_add_backlog(sk, skb); sk->sk_backlog.len += skb->truesize; return 0; } int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)); static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) { if (sk_memalloc_socks() && skb_pfmemalloc(skb)) return __sk_backlog_rcv(sk, skb); return INDIRECT_CALL_INET(sk->sk_backlog_rcv, tcp_v6_do_rcv, tcp_v4_do_rcv, sk, skb); } static inline void sk_incoming_cpu_update(struct sock *sk) { int cpu = raw_smp_processor_id(); if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu)) WRITE_ONCE(sk->sk_incoming_cpu, cpu); } static inline void sock_rps_save_rxhash(struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_RPS /* The following WRITE_ONCE() is paired with the READ_ONCE() * here, and another one in sock_rps_record_flow(). */ if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) WRITE_ONCE(sk->sk_rxhash, skb->hash); #endif } static inline void sock_rps_reset_rxhash(struct sock *sk) { #ifdef CONFIG_RPS /* Paired with READ_ONCE() in sock_rps_record_flow() */ WRITE_ONCE(sk->sk_rxhash, 0); #endif } #define sk_wait_event(__sk, __timeo, __condition, __wait) \ ({ int __rc, __dis = __sk->sk_disconnects; \ release_sock(__sk); \ __rc = __condition; \ if (!__rc) { \ *(__timeo) = wait_woken(__wait, \ TASK_INTERRUPTIBLE, \ *(__timeo)); \ } \ sched_annotate_sleep(); \ lock_sock(__sk); \ __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \ __rc; \ }) int sk_stream_wait_connect(struct sock *sk, long *timeo_p); int sk_stream_wait_memory(struct sock *sk, long *timeo_p); void sk_stream_wait_close(struct sock *sk, long timeo_p); int sk_stream_error(struct sock *sk, int flags, int err); void sk_stream_kill_queues(struct sock *sk); void sk_set_memalloc(struct sock *sk); void sk_clear_memalloc(struct sock *sk); void __sk_flush_backlog(struct sock *sk); static inline bool sk_flush_backlog(struct sock *sk) { if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { __sk_flush_backlog(sk); return true; } return false; } int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb); struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct smc_hashinfo; struct module; struct sk_psock; /* * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes * un-modified. Special care is taken when initializing object to zero. */ static inline void sk_prot_clear_nulls(struct sock *sk, int size) { if (offsetof(struct sock, sk_node.next) != 0) memset(sk, 0, offsetof(struct sock, sk_node.next)); memset(&sk->sk_node.pprev, 0, size - offsetof(struct sock, sk_node.pprev)); } struct proto_accept_arg { int flags; int err; int is_empty; bool kern; }; /* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface */ struct proto { void (*close)(struct sock *sk, long timeout); int (*pre_connect)(struct sock *sk, struct sockaddr *uaddr, int addr_len); int (*connect)(struct sock *sk, struct sockaddr *uaddr, int addr_len); int (*disconnect)(struct sock *sk, int flags); struct sock * (*accept)(struct sock *sk, struct proto_accept_arg *arg); int (*ioctl)(struct sock *sk, int cmd, int *karg); int (*init)(struct sock *sk); void (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); int (*setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *option); void (*keepalive)(struct sock *sk, int valbool); #ifdef CONFIG_COMPAT int (*compat_ioctl)(struct sock *sk, unsigned int cmd, unsigned long arg); #endif int (*sendmsg)(struct sock *sk, struct msghdr *msg, size_t len); int (*recvmsg)(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len); void (*splice_eof)(struct socket *sock); int (*bind)(struct sock *sk, struct sockaddr *addr, int addr_len); int (*bind_add)(struct sock *sk, struct sockaddr *addr, int addr_len); int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); bool (*bpf_bypass_getsockopt)(int level, int optname); void (*release_cb)(struct sock *sk); /* Keeping track of sk's, looking them up, and port selection methods. */ int (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); void (*put_port)(struct sock *sk); #ifdef CONFIG_BPF_SYSCALL int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, bool restore); #endif /* Keeping track of sockets in use */ #ifdef CONFIG_PROC_FS unsigned int inuse_idx; #endif bool (*stream_memory_free)(const struct sock *sk, int wake); bool (*sock_is_readable)(struct sock *sk); /* Memory pressure */ void (*enter_memory_pressure)(struct sock *sk); void (*leave_memory_pressure)(struct sock *sk); atomic_long_t *memory_allocated; /* Current allocated memory. */ int __percpu *per_cpu_fw_alloc; struct percpu_counter *sockets_allocated; /* Current number of sockets. */ /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ unsigned long *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; u32 sysctl_wmem_offset; u32 sysctl_rmem_offset; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; unsigned int ipv6_pinfo_offset; slab_flags_t slab_flags; unsigned int useroffset; /* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */ unsigned int __percpu *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; struct smc_hashinfo *smc_hash; } h; struct module *owner; char name[32]; struct list_head node; int (*diag_destroy)(struct sock *sk, int err); } __randomize_layout; int proto_register(struct proto *prot, int alloc_slab); void proto_unregister(struct proto *prot); int sock_load_diag_module(int family, int protocol); INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake)); static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) { if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) return false; return sk->sk_prot->stream_memory_free ? INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free, tcp_stream_memory_free, sk, wake) : true; } static inline bool sk_stream_memory_free(const struct sock *sk) { return __sk_stream_memory_free(sk, 0); } static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake) { return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && __sk_stream_memory_free(sk, wake); } static inline bool sk_stream_is_writeable(const struct sock *sk) { return __sk_stream_is_writeable(sk, 0); } static inline int sk_under_cgroup_hierarchy(struct sock *sk, struct cgroup *ancestor) { #ifdef CONFIG_SOCK_CGROUP_DATA return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), ancestor); #else return -ENOTSUPP; #endif } #define SK_ALLOC_PERCPU_COUNTER_BATCH 16 static inline void sk_sockets_allocated_dec(struct sock *sk) { percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1, SK_ALLOC_PERCPU_COUNTER_BATCH); } static inline void sk_sockets_allocated_inc(struct sock *sk) { percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1, SK_ALLOC_PERCPU_COUNTER_BATCH); } static inline u64 sk_sockets_allocated_read_positive(struct sock *sk) { return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); } static inline int proto_sockets_allocated_sum_positive(struct proto *prot) { return percpu_counter_sum_positive(prot->sockets_allocated); } #ifdef CONFIG_PROC_FS #define PROTO_INUSE_NR 64 /* should be enough for the first time */ struct prot_inuse { int all; int val[PROTO_INUSE_NR]; }; static inline void sock_prot_inuse_add(const struct net *net, const struct proto *prot, int val) { this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val); } static inline void sock_inuse_add(const struct net *net, int val) { this_cpu_add(net->core.prot_inuse->all, val); } int sock_prot_inuse_get(struct net *net, struct proto *proto); int sock_inuse_get(struct net *net); #else static inline void sock_prot_inuse_add(const struct net *net, const struct proto *prot, int val) { } static inline void sock_inuse_add(const struct net *net, int val) { } #endif /* With per-bucket locks this operation is not-atomic, so that * this version is not worse. */ static inline int __sk_prot_rehash(struct sock *sk) { sk->sk_prot->unhash(sk); return sk->sk_prot->hash(sk); } /* About 10 seconds */ #define SOCK_DESTROY_TIME (10*HZ) /* Sockets 0-1023 can't be bound to unless you are superuser */ #define PROT_SOCK 1024 #define SHUTDOWN_MASK 3 #define RCV_SHUTDOWN 1 #define SEND_SHUTDOWN 2 #define SOCK_BINDADDR_LOCK 4 #define SOCK_BINDPORT_LOCK 8 struct socket_alloc { struct socket socket; struct inode vfs_inode; }; static inline struct socket *SOCKET_I(struct inode *inode) { return &container_of(inode, struct socket_alloc, vfs_inode)->socket; } static inline struct inode *SOCK_INODE(struct socket *socket) { return &container_of(socket, struct socket_alloc, socket)->vfs_inode; } /* * Functions for memory accounting */ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind); int __sk_mem_schedule(struct sock *sk, int size, int kind); void __sk_mem_reduce_allocated(struct sock *sk, int amount); void __sk_mem_reclaim(struct sock *sk, int amount); #define SK_MEM_SEND 0 #define SK_MEM_RECV 1 /* sysctl_mem values are in pages */ static inline long sk_prot_mem_limits(const struct sock *sk, int index) { return READ_ONCE(sk->sk_prot->sysctl_mem[index]); } static inline int sk_mem_pages(int amt) { return (amt + PAGE_SIZE - 1) >> PAGE_SHIFT; } static inline bool sk_has_account(struct sock *sk) { /* return true if protocol supports memory accounting */ return !!sk->sk_prot->memory_allocated; } static inline bool sk_wmem_schedule(struct sock *sk, int size) { int delta; if (!sk_has_account(sk)) return true; delta = size - sk->sk_forward_alloc; return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND); } static inline bool __sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc) { int delta; if (!sk_has_account(sk)) return true; delta = size - sk->sk_forward_alloc; return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || pfmemalloc; } static inline bool sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) { return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb)); } static inline int sk_unused_reserved_mem(const struct sock *sk) { int unused_mem; if (likely(!sk->sk_reserved_mem)) return 0; unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - atomic_read(&sk->sk_rmem_alloc); return unused_mem > 0 ? unused_mem : 0; } static inline void sk_mem_reclaim(struct sock *sk) { int reclaimable; if (!sk_has_account(sk)) return; reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); if (reclaimable >= (int)PAGE_SIZE) __sk_mem_reclaim(sk, reclaimable); } static inline void sk_mem_reclaim_final(struct sock *sk) { sk->sk_reserved_mem = 0; sk_mem_reclaim(sk); } static inline void sk_mem_charge(struct sock *sk, int size) { if (!sk_has_account(sk)) return; sk_forward_alloc_add(sk, -size); } static inline void sk_mem_uncharge(struct sock *sk, int size) { if (!sk_has_account(sk)) return; sk_forward_alloc_add(sk, size); sk_mem_reclaim(sk); } #if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES) static inline void sk_owner_set(struct sock *sk, struct module *owner) { __module_get(owner); sk->sk_owner = owner; } static inline void sk_owner_clear(struct sock *sk) { sk->sk_owner = NULL; } static inline void sk_owner_put(struct sock *sk) { module_put(sk->sk_owner); } #else static inline void sk_owner_set(struct sock *sk, struct module *owner) { } static inline void sk_owner_clear(struct sock *sk) { } static inline void sk_owner_put(struct sock *sk) { } #endif /* * Macro so as to not evaluate some arguments when * lockdep is not enabled. * * Mark both the sk_lock and the sk_lock.slock as a * per-address-family lock class. */ #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ do { \ sk_owner_set(sk, THIS_MODULE); \ sk->sk_lock.owned = 0; \ init_waitqueue_head(&sk->sk_lock.wq); \ spin_lock_init(&(sk)->sk_lock.slock); \ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ sizeof((sk)->sk_lock)); \ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ (skey), (sname)); \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) static inline bool lockdep_sock_is_held(const struct sock *sk) { return lockdep_is_held(&sk->sk_lock) || lockdep_is_held(&sk->sk_lock.slock); } void lock_sock_nested(struct sock *sk, int subclass); static inline void lock_sock(struct sock *sk) { lock_sock_nested(sk, 0); } void __lock_sock(struct sock *sk); void __release_sock(struct sock *sk); void release_sock(struct sock *sk); /* BH context may only use the following locking interface. */ #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) #define bh_lock_sock_nested(__sk) \ spin_lock_nested(&((__sk)->sk_lock.slock), \ SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock); /** * lock_sock_fast - fast version of lock_sock * @sk: socket * * This version should be used for very small section, where process won't block * return false if fast path is taken: * * sk_lock.slock locked, owned = 0, BH disabled * * return true if slow path is taken: * * sk_lock.slock unlocked, owned = 1, BH enabled */ static inline bool lock_sock_fast(struct sock *sk) { /* The sk_lock has mutex_lock() semantics here. */ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); return __lock_sock_fast(sk); } /* fast socket lock variant for caller already holding a [different] socket lock */ static inline bool lock_sock_fast_nested(struct sock *sk) { mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); return __lock_sock_fast(sk); } /** * unlock_sock_fast - complement of lock_sock_fast * @sk: socket * @slow: slow mode * * fast unlock socket for user context. * If slow mode is on, we call regular release_sock() */ static inline void unlock_sock_fast(struct sock *sk, bool slow) __releases(&sk->sk_lock.slock) { if (slow) { release_sock(sk); __release(&sk->sk_lock.slock); } else { mutex_release(&sk->sk_lock.dep_map, _RET_IP_); spin_unlock_bh(&sk->sk_lock.slock); } } void sockopt_lock_sock(struct sock *sk); void sockopt_release_sock(struct sock *sk); bool sockopt_ns_capable(struct user_namespace *ns, int cap); bool sockopt_capable(int cap); /* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming * packets, so that we won't get any new data or any * packets that change the state of the socket. * * While locked, BH processing will add new packets to * the backlog queue. This queue is processed by the * owner of the socket lock right before it is released. * * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */ static inline void sock_owned_by_me(const struct sock *sk) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); #endif } static inline void sock_not_owned_by_me(const struct sock *sk) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks); #endif } static inline bool sock_owned_by_user(const struct sock *sk) { sock_owned_by_me(sk); return sk->sk_lock.owned; } static inline bool sock_owned_by_user_nocheck(const struct sock *sk) { return sk->sk_lock.owned; } static inline void sock_release_ownership(struct sock *sk) { DEBUG_NET_WARN_ON_ONCE(!sock_owned_by_user_nocheck(sk)); sk->sk_lock.owned = 0; /* The sk_lock has mutex_unlock() semantics: */ mutex_release(&sk->sk_lock.dep_map, _RET_IP_); } /* no reclassification while locks are held */ static inline bool sock_allow_reclassification(const struct sock *csk) { struct sock *sk = (struct sock *)csk; return !sock_owned_by_user_nocheck(sk) && !spin_is_locked(&sk->sk_lock.slock); } struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); void sk_free(struct sock *sk); void sk_net_refcnt_upgrade(struct sock *sk); void sk_destruct(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); void sk_free_unlock_clone(struct sock *sk); struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); void __sock_wfree(struct sk_buff *skb); void sock_wfree(struct sk_buff *skb); struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, gfp_t priority); void skb_orphan_partial(struct sk_buff *skb); void sock_rfree(struct sk_buff *skb); void sock_efree(struct sk_buff *skb); #ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); void sock_pfree(struct sk_buff *skb); static inline void skb_set_owner_edemux(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); if (refcount_inc_not_zero(&sk->sk_refcnt)) { skb->sk = sk; skb->destructor = sock_edemux; } } #else #define sock_edemux sock_efree #endif int sk_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int sock_setsockopt(struct socket *sock, int level, int op, sockptr_t optval, unsigned int optlen); int do_sock_setsockopt(struct socket *sock, bool compat, int level, int optname, sockptr_t optval, int optlen); int do_sock_getsockopt(struct socket *sock, bool compat, int level, int optname, sockptr_t optval, sockptr_t optlen); int sk_getsockopt(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); int sock_gettstamp(struct socket *sock, void __user *userstamp, bool timeval, bool time32); struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode, int max_page_order); static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode) { return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); } void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); void *sock_kmemdup(struct sock *sk, const void *src, int size, gfp_t priority); void sock_kfree_s(struct sock *sk, void *mem, int size); void sock_kzfree_s(struct sock *sk, void *mem, int size); void sk_send_sigurg(struct sock *sk); static inline void sock_replace_proto(struct sock *sk, struct proto *proto) { if (sk->sk_socket) clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); WRITE_ONCE(sk->sk_prot, proto); } struct sockcm_cookie { u64 transmit_time; u32 mark; u32 tsflags; u32 ts_opt_id; u32 priority; }; static inline void sockcm_init(struct sockcm_cookie *sockc, const struct sock *sk) { *sockc = (struct sockcm_cookie) { .mark = READ_ONCE(sk->sk_mark), .tsflags = READ_ONCE(sk->sk_tsflags), .priority = READ_ONCE(sk->sk_priority), }; } int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, struct sockcm_cookie *sockc); int sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct sockcm_cookie *sockc); /* * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */ int sock_no_bind(struct socket *, struct sockaddr *, int); int sock_no_connect(struct socket *, struct sockaddr *, int, int); int sock_no_socketpair(struct socket *, struct socket *); int sock_no_accept(struct socket *, struct socket *, struct proto_accept_arg *); int sock_no_getname(struct socket *, struct sockaddr *, int); int sock_no_ioctl(struct socket *, unsigned int, unsigned long); int sock_no_listen(struct socket *, int); int sock_no_shutdown(struct socket *, int); int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len); int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma); /* * Functions to fill in entries in struct proto_ops when a protocol * uses the inet style. */ int sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int sock_common_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen); void sk_common_release(struct sock *sk); /* * Default socket callbacks and setup code */ /* Initialise core socket variables using an explicit uid. */ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid); /* Initialise core socket variables. * Assumes struct socket *sock is embedded in a struct socket_alloc. */ void sock_init_data(struct socket *sock, struct sock *sk); /* * Socket reference counting postulates. * * * Each user of socket SHOULD hold a reference count. * * Each access point to socket (an hash table bucket, reference from a list, * running timer, skb in flight MUST hold a reference count. * * When reference count hits 0, it means it will never increase back. * * When reference count hits 0, it means that no references from * outside exist to this socket and current process on current CPU * is last user and may/should destroy this socket. * * sk_free is called from any context: process, BH, IRQ. When * it is called, socket has no references from outside -> sk_free * may release descendant resources allocated by the socket, but * to the time when it is called, socket is NOT referenced by any * hash tables, lists etc. * * Packets, delivered from outside (from network or from another process) * and enqueued on receive/error queues SHOULD NOT grab reference count, * when they sit in queue. Otherwise, packets will leak to hole, when * socket is looked up by one cpu and unhasing is made by another CPU. * It is true for udp/raw, netlink (leak to receive and error queues), tcp * (leak to backlog). Packet socket does all the processing inside * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets * use separate SMP lock, so that they are prone too. */ /* Ungrab socket and destroy it, if it was the last reference. */ static inline void sock_put(struct sock *sk) { if (refcount_dec_and_test(&sk->sk_refcnt)) sk_free(sk); } /* Generic version of sock_put(), dealing with all sockets * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...) */ void sock_gen_put(struct sock *sk); int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, unsigned int trim_cap, bool refcounted); static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) { return __sk_receive_skb(sk, skb, nested, 1, true); } static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) { /* sk_tx_queue_mapping accept only upto a 16-bit value */ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX)) return; /* Paired with READ_ONCE() in sk_tx_queue_get() and * other WRITE_ONCE() because socket lock might be not held. */ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue); } #define NO_QUEUE_MAPPING USHRT_MAX static inline void sk_tx_queue_clear(struct sock *sk) { /* Paired with READ_ONCE() in sk_tx_queue_get() and * other WRITE_ONCE() because socket lock might be not held. */ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING); } static inline int sk_tx_queue_get(const struct sock *sk) { if (sk) { /* Paired with WRITE_ONCE() in sk_tx_queue_clear() * and sk_tx_queue_set(). */ int val = READ_ONCE(sk->sk_tx_queue_mapping); if (val != NO_QUEUE_MAPPING) return val; } return -1; } static inline void __sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb, bool force_set) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb); if (force_set || unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) { __sk_rx_queue_set(sk, skb, true); } static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) { __sk_rx_queue_set(sk, skb, false); } static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); #endif } static inline int sk_rx_queue_get(const struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (sk) { int res = READ_ONCE(sk->sk_rx_queue_mapping); if (res != NO_QUEUE_MAPPING) return res; } #endif return -1; } static inline void sk_set_socket(struct sock *sk, struct socket *sock) { sk->sk_socket = sock; } static inline wait_queue_head_t *sk_sleep(struct sock *sk) { BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0); return &rcu_dereference_raw(sk->sk_wq)->wait; } /* Detach socket from process context. * Announce socket dead, detach it from wait queue and inode. * Note that parent inode held reference count on this struct sock, * we do not release it in this function, because protocol * probably wants some additional cleanups or even continuing * to work with this socket (TCP). */ static inline void sock_orphan(struct sock *sk) { write_lock_bh(&sk->sk_callback_lock); sock_set_flag(sk, SOCK_DEAD); sk_set_socket(sk, NULL); sk->sk_wq = NULL; write_unlock_bh(&sk->sk_callback_lock); } static inline void sock_graft(struct sock *sk, struct socket *parent) { WARN_ON(parent->sk); write_lock_bh(&sk->sk_callback_lock); rcu_assign_pointer(sk->sk_wq, &parent->wq); parent->sk = sk; sk_set_socket(sk, parent); sk->sk_uid = SOCK_INODE(parent)->i_uid; security_sock_graft(sk, parent); write_unlock_bh(&sk->sk_callback_lock); } kuid_t sock_i_uid(struct sock *sk); unsigned long __sock_i_ino(struct sock *sk); unsigned long sock_i_ino(struct sock *sk); static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) { return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); } static inline u32 net_tx_rndhash(void) { u32 v = get_random_u32(); return v ?: 1; } static inline void sk_set_txhash(struct sock *sk) { /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); } static inline bool sk_rethink_txhash(struct sock *sk) { if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) { sk_set_txhash(sk); return true; } return false; } static inline struct dst_entry * __sk_dst_get(const struct sock *sk) { return rcu_dereference_check(sk->sk_dst_cache, lockdep_sock_is_held(sk)); } static inline struct dst_entry * sk_dst_get(const struct sock *sk) { struct dst_entry *dst; rcu_read_lock(); dst = rcu_dereference(sk->sk_dst_cache); if (dst && !rcuref_get(&dst->__rcuref)) dst = NULL; rcu_read_unlock(); return dst; } static inline void __dst_negative_advice(struct sock *sk) { struct dst_entry *dst = __sk_dst_get(sk); if (dst && dst->ops->negative_advice) dst->ops->negative_advice(sk, dst); } static inline void dst_negative_advice(struct sock *sk) { sk_rethink_txhash(sk); __dst_negative_advice(sk); } static inline void __sk_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old_dst; sk_tx_queue_clear(sk); WRITE_ONCE(sk->sk_dst_pending_confirm, 0); old_dst = rcu_dereference_protected(sk->sk_dst_cache, lockdep_sock_is_held(sk)); rcu_assign_pointer(sk->sk_dst_cache, dst); dst_release(old_dst); } static inline void sk_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old_dst; sk_tx_queue_clear(sk); WRITE_ONCE(sk->sk_dst_pending_confirm, 0); old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst))); dst_release(old_dst); } static inline void __sk_dst_reset(struct sock *sk) { __sk_dst_set(sk, NULL); } static inline void sk_dst_reset(struct sock *sk) { sk_dst_set(sk, NULL); } struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); static inline void sk_dst_confirm(struct sock *sk) { if (!READ_ONCE(sk->sk_dst_pending_confirm)) WRITE_ONCE(sk->sk_dst_pending_confirm, 1); } static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) { if (skb_get_dst_pending_confirm(skb)) { struct sock *sk = skb->sk; if (sk && READ_ONCE(sk->sk_dst_pending_confirm)) WRITE_ONCE(sk->sk_dst_pending_confirm, 0); neigh_confirm(n); } } bool sk_mc_loop(const struct sock *sk); static inline bool sk_can_gso(const struct sock *sk) { return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); } void sk_setup_caps(struct sock *sk, struct dst_entry *dst); static inline void sk_gso_disable(struct sock *sk) { sk->sk_gso_disabled = 1; sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, struct iov_iter *from, char *to, int copy, int offset) { if (skb->ip_summed == CHECKSUM_NONE) { __wsum csum = 0; if (!csum_and_copy_from_iter_full(to, copy, &csum, from)) return -EFAULT; skb->csum = csum_block_add(skb->csum, csum, offset); } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { if (!copy_from_iter_full_nocache(to, copy, from)) return -EFAULT; } else if (!copy_from_iter_full(to, copy, from)) return -EFAULT; return 0; } static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, struct iov_iter *from, int copy) { int err, offset = skb->len; err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), copy, offset); if (err) __skb_trim(skb, offset); return err; } static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, struct sk_buff *skb, struct page *page, int off, int copy) { int err; err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, copy, skb->len); if (err) return err; skb_len_add(skb, copy); sk_wmem_queued_add(sk, copy); sk_mem_charge(sk, copy); return 0; } /** * sk_wmem_alloc_get - returns write allocations * @sk: socket * * Return: sk_wmem_alloc minus initial offset of one */ static inline int sk_wmem_alloc_get(const struct sock *sk) { return refcount_read(&sk->sk_wmem_alloc) - 1; } /** * sk_rmem_alloc_get - returns read allocations * @sk: socket * * Return: sk_rmem_alloc */ static inline int sk_rmem_alloc_get(const struct sock *sk) { return atomic_read(&sk->sk_rmem_alloc); } /** * sk_has_allocations - check if allocations are outstanding * @sk: socket * * Return: true if socket has write or read allocations */ static inline bool sk_has_allocations(const struct sock *sk) { return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); } /** * skwq_has_sleeper - check if there are any waiting processes * @wq: struct socket_wq * * Return: true if socket_wq has waiting processes * * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory * barrier call. They were added due to the race found within the tcp code. * * Consider following tcp code paths:: * * CPU1 CPU2 * sys_select receive packet * ... ... * __add_wait_queue update tp->rcv_nxt * ... ... * tp->rcv_nxt check sock_def_readable * ... { * schedule rcu_read_lock(); * wq = rcu_dereference(sk->sk_wq); * if (wq && waitqueue_active(&wq->wait)) * wake_up_interruptible(&wq->wait) * ... * } * * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1 * could then endup calling schedule and sleep forever if there are no more * data on the socket. * */ static inline bool skwq_has_sleeper(struct socket_wq *wq) { return wq && wq_has_sleeper(&wq->wait); } /** * sock_poll_wait - wrapper for the poll_wait call. * @filp: file * @sock: socket to wait on * @p: poll_table * * See the comments in the wq_has_sleeper function. */ static inline void sock_poll_wait(struct file *filp, struct socket *sock, poll_table *p) { /* Provides a barrier we need to be sure we are in sync * with the socket flags modification. * * This memory barrier is paired in the wq_has_sleeper. */ poll_wait(filp, &sock->wq.wait, p); } static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) { /* This pairs with WRITE_ONCE() in sk_set_txhash() */ u32 txhash = READ_ONCE(sk->sk_txhash); if (txhash) { skb->l4_hash = 1; skb->hash = txhash; } } void skb_set_owner_w(struct sk_buff *skb, struct sock *sk); /* * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in * and play with them. * * Inlined as it's very short and called for pretty much every * packet ever received. */ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); skb->sk = sk; skb->destructor = sock_rfree; atomic_add(skb->truesize, &sk->sk_rmem_alloc); sk_mem_charge(sk, skb->truesize); } static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) { if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { skb_orphan(skb); skb->destructor = sock_efree; skb->sk = sk; return true; } return false; } static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk) { skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (skb) { if (sk_rmem_schedule(sk, skb, skb->truesize)) { skb_set_owner_r(skb, sk); return skb; } __kfree_skb(skb); } return NULL; } static inline void skb_prepare_for_gro(struct sk_buff *skb) { if (skb->destructor != sock_wfree) { skb_orphan(skb); return; } skb->slow_gro = 1; } void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); void sk_stop_timer(struct sock *sk, struct timer_list *timer); void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer); int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, struct sk_buff *skb, unsigned int flags, void (*destructor)(struct sock *sk, struct sk_buff *skb)); int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason); static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { return sock_queue_rcv_skb_reason(sk, skb, NULL); } int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); struct sk_buff *sock_dequeue_err_skb(struct sock *sk); /* * Recover an error report and clear atomically */ static inline int sock_error(struct sock *sk) { int err; /* Avoid an atomic operation for the common case. * This is racy since another cpu/thread can change sk_err under us. */ if (likely(data_race(!sk->sk_err))) return 0; err = xchg(&sk->sk_err, 0); return -err; } void sk_error_report(struct sock *sk); static inline unsigned long sock_wspace(struct sock *sk) { int amt = 0; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); if (amt < 0) amt = 0; } return amt; } /* Note: * We use sk->sk_wq_raw, from contexts knowing this * pointer is not NULL and cannot disappear/change. */ static inline void sk_set_bit(int nr, struct sock *sk) { if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && !sock_flag(sk, SOCK_FASYNC)) return; set_bit(nr, &sk->sk_wq_raw->flags); } static inline void sk_clear_bit(int nr, struct sock *sk) { if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && !sock_flag(sk, SOCK_FASYNC)) return; clear_bit(nr, &sk->sk_wq_raw->flags); } static inline void sk_wake_async(const struct sock *sk, int how, int band) { if (sock_flag(sk, SOCK_FASYNC)) { rcu_read_lock(); sock_wake_async(rcu_dereference(sk->sk_wq), how, band); rcu_read_unlock(); } } static inline void sk_wake_async_rcu(const struct sock *sk, int how, int band) { if (unlikely(sock_flag(sk, SOCK_FASYNC))) sock_wake_async(rcu_dereference(sk->sk_wq), how, band); } /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. * Note: for send buffers, TCP works better if we can build two skbs at * minimum. */ #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE static inline void sk_stream_moderate_sndbuf(struct sock *sk) { u32 val; if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) return; val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); val = max_t(u32, val, sk_unused_reserved_mem(sk)); WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); } /** * sk_page_frag - return an appropriate page_frag * @sk: socket * * Use the per task page_frag instead of the per socket one for * optimization when we know that we're in process context and own * everything that's associated with %current. * * Both direct reclaim and page faults can nest inside other * socket operations and end up recursing into sk_page_frag() * while it's already in use: explicitly avoid task page_frag * when users disable sk_use_task_frag. * * Return: a per task page_frag if context allows that, * otherwise a per socket one. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { if (sk->sk_use_task_frag) return ¤t->task_frag; return &sk->sk_frag; } bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); /* * Default write policy as shown to user space via poll/select/SIGIO */ static inline bool sock_writeable(const struct sock *sk) { return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1); } static inline gfp_t gfp_any(void) { return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } static inline gfp_t gfp_memcg_charge(void) { return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_rcvtimeo; } static inline long sock_sndtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_sndtimeo; } static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) { int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len); return v ?: 1; } /* Alas, with timeout socket operations are not restartable. * Compare this to poll(). */ static inline int sock_intr_errno(long timeo) { return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } struct sock_skb_cb { u32 dropcount; }; /* Store sock_skb_cb at the end of skb->cb[] so protocol families * using skb->cb[] would keep using it directly and utilize its * alignment guarantee. */ #define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \ sizeof(struct sock_skb_cb))) #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \ SOCK_SKB_CB_OFFSET)) #define sock_skb_cb_check_size(size) \ BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) static inline void sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) { SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? atomic_read(&sk->sk_drops) : 0; } static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) { int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs); atomic_add(segs, &sk->sk_drops); } static inline ktime_t sock_read_timestamp(struct sock *sk) { #if BITS_PER_LONG==32 unsigned int seq; ktime_t kt; do { seq = read_seqbegin(&sk->sk_stamp_seq); kt = sk->sk_stamp; } while (read_seqretry(&sk->sk_stamp_seq, seq)); return kt; #else return READ_ONCE(sk->sk_stamp); #endif } static inline void sock_write_timestamp(struct sock *sk, ktime_t kt) { #if BITS_PER_LONG==32 write_seqlock(&sk->sk_stamp_seq); sk->sk_stamp = kt; write_sequnlock(&sk->sk_stamp_seq); #else WRITE_ONCE(sk->sk_stamp, kt); #endif } void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); static inline void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); u32 tsflags = READ_ONCE(sk->sk_tsflags); ktime_t kt = skb->tstamp; /* * generate control messages if * - receive time stamping in software requested * - software time stamp available and wanted * - hardware time stamps available and wanted */ if (sock_flag(sk, SOCK_RCVTSTAMP) || (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) || (hwtstamps->hwtstamp && (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) __sock_recv_timestamp(msg, sk, skb); else sock_write_timestamp(sk, kt); if (sock_flag(sk, SOCK_WIFI_STATUS) && skb_wifi_acked_valid(skb)) __sock_recv_wifi_status(msg, sk, skb); } void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC) static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { #define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \ (1UL << SOCK_RCVTSTAMP) | \ (1UL << SOCK_RCVMARK) | \ (1UL << SOCK_RCVPRIORITY) | \ (1UL << SOCK_TIMESTAMPING_ANY)) #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \ SOF_TIMESTAMPING_RAW_HARDWARE) if (READ_ONCE(sk->sk_flags) & FLAGS_RECV_CMSGS) __sock_recv_cmsgs(msg, sk, skb); else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) sock_write_timestamp(sk, skb->tstamp); else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP)) sock_write_timestamp(sk, 0); } void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags); /** * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet * @sockc: pointer to socket cmsg cookie to get timestamping info * @tx_flags: completed with instructions for time stamping * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno) * * Note: callers should take care of initial ``*tx_flags`` value (usually 0) */ static inline void _sock_tx_timestamp(struct sock *sk, const struct sockcm_cookie *sockc, __u8 *tx_flags, __u32 *tskey) { __u32 tsflags = sockc->tsflags; if (unlikely(tsflags)) { __sock_tx_timestamp(tsflags, tx_flags); if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey && tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) { if (tsflags & SOCKCM_FLAG_TS_OPT_ID) *tskey = sockc->ts_opt_id; else *tskey = atomic_inc_return(&sk->sk_tskey) - 1; } } if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) *tx_flags |= SKBTX_WIFI_STATUS; } static inline void sock_tx_timestamp(struct sock *sk, const struct sockcm_cookie *sockc, __u8 *tx_flags) { _sock_tx_timestamp(sk, sockc, tx_flags, NULL); } static inline void skb_setup_tx_timestamp(struct sk_buff *skb, const struct sockcm_cookie *sockc) { _sock_tx_timestamp(skb->sk, sockc, &skb_shinfo(skb)->tx_flags, &skb_shinfo(skb)->tskey); } static inline bool sk_is_inet(const struct sock *sk) { int family = READ_ONCE(sk->sk_family); return family == AF_INET || family == AF_INET6; } static inline bool sk_is_tcp(const struct sock *sk) { return sk_is_inet(sk) && sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; } static inline bool sk_is_udp(const struct sock *sk) { return sk_is_inet(sk) && sk->sk_type == SOCK_DGRAM && sk->sk_protocol == IPPROTO_UDP; } static inline bool sk_is_stream_unix(const struct sock *sk) { return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM; } static inline bool sk_is_vsock(const struct sock *sk) { return sk->sk_family == AF_VSOCK; } /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from * @skb: socket buffer to eat * * This routine must be called with interrupts disabled or with the socket * locked so that the sk_buff queue operation is ok. */ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } static inline bool skb_sk_is_prefetched(struct sk_buff *skb) { #ifdef CONFIG_INET return skb->destructor == sock_pfree; #else return false; #endif /* CONFIG_INET */ } /* This helper checks if a socket is a full socket, * ie _not_ a timewait or request socket. */ static inline bool sk_fullsock(const struct sock *sk) { return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); } static inline bool sk_is_refcounted(struct sock *sk) { /* Only full sockets have sk->sk_flags. */ return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); } /* Checks if this SKB belongs to an HW offloaded socket * and whether any SW fallbacks are required based on dev. * Check decrypted mark in case skb_orphan() cleared socket. */ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) { #ifdef CONFIG_SOCK_VALIDATE_XMIT struct sock *sk = skb->sk; if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { skb = sk->sk_validate_xmit_skb(sk, dev, skb); } else if (unlikely(skb_is_decrypted(skb))) { pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); kfree_skb(skb); skb = NULL; } #endif return skb; } /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV * SYNACK messages can be attached to either ones (depending on SYNCOOKIE) */ static inline bool sk_listener(const struct sock *sk) { return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); } /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV or TIME_WAIT * TCP SYNACK messages can be attached to LISTEN or NEW_SYN_RECV (depending on SYNCOOKIE) * TCP RST and ACK can be attached to TIME_WAIT. */ static inline bool sk_listener_or_tw(const struct sock *sk) { return (1 << READ_ONCE(sk->sk_state)) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV | TCPF_TIME_WAIT); } void sock_enable_timestamp(struct sock *sk, enum sock_flags flag); int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type); bool sk_ns_capable(const struct sock *sk, struct user_namespace *user_ns, int cap); bool sk_capable(const struct sock *sk, int cap); bool sk_net_capable(const struct sock *sk, int cap); void sk_get_meminfo(const struct sock *sk, u32 *meminfo); /* Take into consideration the size of the struct sk_buff overhead in the * determination of these values, since that is non-constant across * platforms. This makes socket queueing behavior and performance * not depend upon such differences. */ #define _SK_MEM_PACKETS 256 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; #define SKB_FRAG_PAGE_ORDER get_order(32768) DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_wmem ? */ if (proto->sysctl_wmem_offset) return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); return READ_ONCE(*proto->sysctl_wmem); } static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_rmem ? */ if (proto->sysctl_rmem_offset) return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); return READ_ONCE(*proto->sysctl_rmem); } /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10) * Some wifi drivers need to tweak it to get more chunks. * They can use this helper from their ndo_start_xmit() */ static inline void sk_pacing_shift_update(struct sock *sk, int val) { if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) return; WRITE_ONCE(sk->sk_pacing_shift, val); } /* if a socket is bound to a device, check that the given device * index is either the same or that the socket is bound to an L3 * master device and the given device index is also enslaved to * that L3 master */ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) { int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); int mdif; if (!bound_dev_if || bound_dev_if == dif) return true; mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); if (mdif && mdif == bound_dev_if) return true; return false; } void sock_def_readable(struct sock *sk); int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); void sock_set_timestamp(struct sock *sk, int optname, bool valbool); int sock_set_timestamping(struct sock *sk, int optname, struct so_timestamping timestamping); void sock_enable_timestamps(struct sock *sk); #if defined(CONFIG_CGROUP_BPF) void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op); #else static inline void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) { } #endif void sock_no_linger(struct sock *sk); void sock_set_keepalive(struct sock *sk); void sock_set_priority(struct sock *sk, u32 priority); void sock_set_rcvbuf(struct sock *sk, int val); void sock_set_mark(struct sock *sk, u32 val); void sock_set_reuseaddr(struct sock *sk); void sock_set_reuseport(struct sock *sk); void sock_set_sndtimeo(struct sock *sk, s64 secs); int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len); int sock_get_timeout(long timeo, void *optval, bool old_timeval); int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, sockptr_t optval, int optlen, bool old_timeval); int sock_ioctl_inout(struct sock *sk, unsigned int cmd, void __user *arg, void *karg, size_t size); int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); static inline bool sk_is_readable(struct sock *sk) { if (sk->sk_prot->sock_is_readable) return sk->sk_prot->sock_is_readable(sk); return false; } #endif /* _SOCK_H */ |
9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 | /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <drm/drm_atomic_helper.h> #include <drm/drm_client_event.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> /** * DOC: aux kms helpers * * This helper library contains various one-off functions which don't really fit * anywhere else in the DRM modeset helper library. */ /** * drm_helper_move_panel_connectors_to_head() - move panels to the front in the * connector list * @dev: drm device to operate on * * Some userspace presumes that the first connected connector is the main * display, where it's supposed to display e.g. the login screen. For * laptops, this should be the main panel. Use this function to sort all * (eDP/LVDS/DSI) panels to the front of the connector list, instead of * painstakingly trying to initialize them in the right order. */ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) { struct drm_connector *connector, *tmp; struct list_head panel_list; INIT_LIST_HEAD(&panel_list); spin_lock_irq(&dev->mode_config.connector_list_lock); list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || connector->connector_type == DRM_MODE_CONNECTOR_eDP || connector->connector_type == DRM_MODE_CONNECTOR_DSI) list_move_tail(&connector->head, &panel_list); } list_splice(&panel_list, &dev->mode_config.connector_list); spin_unlock_irq(&dev->mode_config.connector_list_lock); } EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); /** * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata * @dev: DRM device * @fb: drm_framebuffer object to fill out * @mode_cmd: metadata from the userspace fb creation request * * This helper can be used in a drivers fb_create callback to pre-fill the fb's * metadata fields. */ void drm_helper_mode_fill_fb_struct(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd) { int i; fb->dev = dev; fb->format = drm_get_format_info(dev, mode_cmd); fb->width = mode_cmd->width; fb->height = mode_cmd->height; for (i = 0; i < 4; i++) { fb->pitches[i] = mode_cmd->pitches[i]; fb->offsets[i] = mode_cmd->offsets[i]; } fb->modifier = mode_cmd->modifier[0]; fb->flags = mode_cmd->flags; } EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); /* * This is the minimal list of formats that seem to be safe for modeset use * with all current DRM drivers. Most hardware can actually support more * formats than this and drivers may specify a more accurate list when * creating the primary plane. */ static const uint32_t safe_modeset_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, }; static const struct drm_plane_funcs primary_plane_funcs = { DRM_PLANE_NON_ATOMIC_FUNCS, }; /** * drm_crtc_init - Legacy CRTC initialization function * @dev: DRM device * @crtc: CRTC object to init * @funcs: callbacks for the new CRTC * * Initialize a CRTC object with a default helper-provided primary plane and no * cursor plane. * * Note that we make some assumptions about hardware limitations that may not be * true for all hardware: * * 1. Primary plane cannot be repositioned. * 2. Primary plane cannot be scaled. * 3. Primary plane must cover the entire CRTC. * 4. Subpixel positioning is not supported. * 5. The primary plane must always be on if the CRTC is enabled. * * This is purely a backwards compatibility helper for old drivers. Drivers * should instead implement their own primary plane. Atomic drivers must do so. * Drivers with the above hardware restriction can look into using &struct * drm_simple_display_pipe, which encapsulates the above limitations into a nice * interface. * * Returns: * Zero on success, error code on failure. */ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs) { struct drm_plane *primary; int ret; /* possible_crtc's will be filled in later by crtc_init */ primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0, &primary_plane_funcs, safe_modeset_formats, ARRAY_SIZE(safe_modeset_formats), NULL, DRM_PLANE_TYPE_PRIMARY, NULL); if (IS_ERR(primary)) return PTR_ERR(primary); /* * Remove the format_default field from drm_plane when dropping * this helper. */ primary->format_default = true; ret = drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, NULL); if (ret) goto err_drm_plane_cleanup; return 0; err_drm_plane_cleanup: drm_plane_cleanup(primary); kfree(primary); return ret; } EXPORT_SYMBOL(drm_crtc_init); /** * drm_mode_config_helper_suspend - Modeset suspend helper * @dev: DRM device * * This helper function takes care of suspending the modeset side. It disables * output polling if initialized, suspends fbdev if used and finally calls * drm_atomic_helper_suspend(). * If suspending fails, fbdev and polling is re-enabled. * * Returns: * Zero on success, negative error code on error. * * See also: * drm_kms_helper_poll_disable() and drm_client_dev_suspend(). */ int drm_mode_config_helper_suspend(struct drm_device *dev) { struct drm_atomic_state *state; if (!dev) return 0; /* * Don't disable polling if it was never initialized */ if (dev->mode_config.poll_enabled) drm_kms_helper_poll_disable(dev); drm_client_dev_suspend(dev, false); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_client_dev_resume(dev, false); /* * Don't enable polling if it was never initialized */ if (dev->mode_config.poll_enabled) drm_kms_helper_poll_enable(dev); return PTR_ERR(state); } dev->mode_config.suspend_state = state; return 0; } EXPORT_SYMBOL(drm_mode_config_helper_suspend); /** * drm_mode_config_helper_resume - Modeset resume helper * @dev: DRM device * * This helper function takes care of resuming the modeset side. It calls * drm_atomic_helper_resume(), resumes fbdev if used and enables output polling * if initiaized. * * Returns: * Zero on success, negative error code on error. * * See also: * drm_client_dev_resume() and drm_kms_helper_poll_enable(). */ int drm_mode_config_helper_resume(struct drm_device *dev) { int ret; if (!dev) return 0; if (WARN_ON(!dev->mode_config.suspend_state)) return -EINVAL; ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); if (ret) DRM_ERROR("Failed to resume (%d)\n", ret); dev->mode_config.suspend_state = NULL; drm_client_dev_resume(dev, false); /* * Don't enable polling if it is not initialized */ if (dev->mode_config.poll_enabled) drm_kms_helper_poll_enable(dev); return ret; } EXPORT_SYMBOL(drm_mode_config_helper_resume); |
4 2110 1933 184 16 14 4 4 4 4 4 4 1 1 2 1 1 5 1 1 1 1 1 5 5 21 21 24 1 1 1 21 8 5 4 4 4 9 1 2 3 1 2 4 4 7 7 7 2 3 2 3 2 1 3 3 5 2 3 3 3 1 2 3 1 1 1 1 1 76 15 1 1 5 1 1 1 3 18 2 2 2 2 4 6 2 5 3 4 2 1 5 2 1 2 1 1 1 1 1 1 1 1 1 1 1 1 14 14 14 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 | // SPDX-License-Identifier: GPL-2.0-or-later /* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * History * X.25 001 Jonathan Naylor Started coding. * X.25 002 Jonathan Naylor Centralised disconnect handling. * New timer architecture. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of * facilities negotiation and increased * the throughput upper limit. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). * Fixed x25_output() related skb leakage. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to * x25_proc.c, using seq_file * 2005-04-02 Shaun Pereira Selective sub address matching * with call user data * 2005-04-15 Shaun Pereira Fast select with no restriction on * response */ #define pr_fmt(fmt) "X25: " fmt #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/notifier.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ctype.h> #include <net/x25.h> #include <net/compat.h> int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20; int sysctl_x25_call_request_timeout = X25_DEFAULT_T21; int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; int sysctl_x25_forward = 0; HLIST_HEAD(x25_list); DEFINE_RWLOCK(x25_list_lock); static const struct proto_ops x25_proto_ops; static const struct x25_address null_x25_address = {" "}; #ifdef CONFIG_COMPAT struct compat_x25_subscrip_struct { char device[200-sizeof(compat_ulong_t)]; compat_ulong_t global_facil_mask; compat_uint_t extended; }; #endif int x25_parse_address_block(struct sk_buff *skb, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned char len; int needed; int rc; if (!pskb_may_pull(skb, 1)) { /* packet has no address block */ rc = 0; goto empty; } len = *skb->data; needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2; if (!pskb_may_pull(skb, needed)) { /* packet is too short to hold the addresses it claims to hold */ rc = -1; goto empty; } return x25_addr_ntoa(skb->data, called_addr, calling_addr); empty: *called_addr->x25_addr = 0; *calling_addr->x25_addr = 0; return rc; } int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; unsigned int i; called_len = (*p >> 0) & 0x0F; calling_len = (*p >> 4) & 0x0F; called = called_addr->x25_addr; calling = calling_addr->x25_addr; p++; for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *called++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *called++ = ((*p >> 4) & 0x0F) + '0'; } } else { if (i % 2 != 0) { *calling++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *calling++ = ((*p >> 4) & 0x0F) + '0'; } } } *called = *calling = '\0'; return 1 + (called_len + calling_len + 1) / 2; } int x25_addr_aton(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; int i; called = called_addr->x25_addr; calling = calling_addr->x25_addr; called_len = strlen(called); calling_len = strlen(calling); *p++ = (calling_len << 4) | (called_len << 0); for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *p |= (*called++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*called++ - '0') << 4; } } else { if (i % 2 != 0) { *p |= (*calling++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*calling++ - '0') << 4; } } } return 1 + (called_len + calling_len + 1) / 2; } /* * Socket removal during an interrupt is now safe. */ static void x25_remove_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_del_node_init(sk); write_unlock_bh(&x25_list_lock); } /* * Handle device status changes. */ static int x25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type == ARPHRD_X25) { switch (event) { case NETDEV_REGISTER: case NETDEV_POST_TYPE_CHANGE: x25_link_device_up(dev); break; case NETDEV_DOWN: nb = x25_get_neigh(dev); if (nb) { x25_link_terminated(nb); x25_neigh_put(nb); } x25_route_device_down(dev); break; case NETDEV_PRE_TYPE_CHANGE: case NETDEV_UNREGISTER: x25_link_device_down(dev); break; case NETDEV_CHANGE: if (!netif_carrier_ok(dev)) { nb = x25_get_neigh(dev); if (nb) { x25_link_terminated(nb); x25_neigh_put(nb); } } break; } } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void x25_insert_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_add_node(sk, &x25_list); write_unlock_bh(&x25_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. Check the full list for an address/cud match. * If no cuds match return the next_best thing, an address match. * Note: if a listening socket has cud set it must only get calls * with matching cud. */ static struct sock *x25_find_listener(struct x25_address *addr, struct sk_buff *skb) { struct sock *s; struct sock *next_best; read_lock_bh(&x25_list_lock); next_best = NULL; sk_for_each(s, &x25_list) if ((!strcmp(addr->x25_addr, x25_sk(s)->source_addr.x25_addr) || !strcmp(x25_sk(s)->source_addr.x25_addr, null_x25_address.x25_addr)) && s->sk_state == TCP_LISTEN) { /* * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ if (x25_sk(s)->cudmatchlength > 0 && skb->len >= x25_sk(s)->cudmatchlength) { if((memcmp(x25_sk(s)->calluserdata.cuddata, skb->data, x25_sk(s)->cudmatchlength)) == 0) { sock_hold(s); goto found; } } else next_best = s; } if (next_best) { s = next_best; sock_hold(s); goto found; } s = NULL; found: read_unlock_bh(&x25_list_lock); return s; } /* * Find a connected X.25 socket given my LCI and neighbour. */ static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; sk_for_each(s, &x25_list) if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { sock_hold(s); goto found; } s = NULL; found: return s; } struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; read_lock_bh(&x25_list_lock); s = __x25_find_socket(lci, nb); read_unlock_bh(&x25_list_lock); return s; } /* * Find a unique LCI for a given device. */ static unsigned int x25_new_lci(struct x25_neigh *nb) { unsigned int lci = 1; struct sock *sk; while ((sk = x25_find_socket(lci, nb)) != NULL) { sock_put(sk); if (++lci == 4096) { lci = 0; break; } cond_resched(); } return lci; } /* * Deferred destroy. */ static void __x25_destroy_socket(struct sock *); /* * handler for deferred kills. */ static void x25_destroy_timer(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); x25_destroy_socket_from_timer(sk); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupting users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. * Not static as it's used by the timer */ static void __x25_destroy_socket(struct sock *sk) { struct sk_buff *skb; x25_stop_heartbeat(sk); x25_stop_timer(sk); x25_remove_socket(sk); x25_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* * Queue the unaccepted socket for death */ skb->sk->sk_state = TCP_LISTEN; sock_set_flag(skb->sk, SOCK_DEAD); x25_start_heartbeat(skb->sk); x25_sk(skb->sk)->state = X25_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.expires = jiffies + 10 * HZ; sk->sk_timer.function = x25_destroy_timer; add_timer(&sk->sk_timer); } else { /* drop last reference so sock_put will free */ __sock_put(sk); } } void x25_destroy_socket_from_timer(struct sock *sk) { sock_hold(sk); bh_lock_sock(sk); __x25_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * X.25 socket object. */ static int x25_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { int opt; struct sock *sk = sock->sk; int rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EINVAL; if (optlen < sizeof(int)) goto out; rc = -EFAULT; if (copy_from_sockptr(&opt, optval, sizeof(int))) goto out; if (opt) set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); else clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = 0; out: return rc; } static int x25_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int val, len, rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EFAULT; if (get_user(len, optlen)) goto out; rc = -EINVAL; if (len < 0) goto out; len = min_t(unsigned int, len, sizeof(int)); rc = -EFAULT; if (put_user(len, optlen)) goto out; val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; out: return rc; } static int x25_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EOPNOTSUPP; lock_sock(sk); if (sock->state != SS_UNCONNECTED) { rc = -EINVAL; release_sock(sk); return rc; } if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; rc = 0; } release_sock(sk); return rc; } static struct proto x25_proto = { .name = "X25", .owner = THIS_MODULE, .obj_size = sizeof(struct x25_sock), }; static struct sock *x25_alloc_socket(struct net *net, int kern) { struct x25_sock *x25; struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, kern); if (!sk) goto out; sock_init_data(NULL, sk); x25 = x25_sk(sk); skb_queue_head_init(&x25->ack_queue); skb_queue_head_init(&x25->fragment_queue); skb_queue_head_init(&x25->interrupt_in_queue); skb_queue_head_init(&x25->interrupt_out_queue); out: return sk; } static int x25_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct x25_sock *x25; int rc = -EAFNOSUPPORT; if (!net_eq(net, &init_net)) goto out; rc = -ESOCKTNOSUPPORT; if (sock->type != SOCK_SEQPACKET) goto out; rc = -EINVAL; if (protocol) goto out; rc = -ENOMEM; if ((sk = x25_alloc_socket(net, kern)) == NULL) goto out; x25 = x25_sk(sk); sock_init_data(sock, sk); x25_init_timers(sk); sock->ops = &x25_proto_ops; sk->sk_protocol = protocol; sk->sk_backlog_rcv = x25_backlog_rcv; x25->t21 = sysctl_x25_call_request_timeout; x25->t22 = sysctl_x25_reset_request_timeout; x25->t23 = sysctl_x25_clear_request_timeout; x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; x25->cudmatchlength = 0; set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ /* on call accept */ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; x25->facilities.throughput = 0; /* by default don't negotiate throughput */ x25->facilities.reverse = X25_DEFAULT_REVERSE; x25->dte_facilities.calling_len = 0; x25->dte_facilities.called_len = 0; memset(x25->dte_facilities.called_ae, '\0', sizeof(x25->dte_facilities.called_ae)); memset(x25->dte_facilities.calling_ae, '\0', sizeof(x25->dte_facilities.calling_ae)); rc = 0; out: return rc; } static struct sock *x25_make_new(struct sock *osk) { struct sock *sk = NULL; struct x25_sock *x25, *ox25; if (osk->sk_type != SOCK_SEQPACKET) goto out; if ((sk = x25_alloc_socket(sock_net(osk), 0)) == NULL) goto out; x25 = x25_sk(sk); sk->sk_type = osk->sk_type; sk->sk_priority = READ_ONCE(osk->sk_priority); sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sk->sk_backlog_rcv = osk->sk_backlog_rcv; sock_copy_flags(sk, osk); ox25 = x25_sk(osk); x25->t21 = ox25->t21; x25->t22 = ox25->t22; x25->t23 = ox25->t23; x25->t2 = ox25->t2; x25->flags = ox25->flags; x25->facilities = ox25->facilities; x25->dte_facilities = ox25->dte_facilities; x25->cudmatchlength = ox25->cudmatchlength; clear_bit(X25_INTERRUPT_FLAG, &x25->flags); x25_init_timers(sk); out: return sk; } static int x25_release(struct socket *sock) { struct sock *sk = sock->sk; struct x25_sock *x25; if (!sk) return 0; x25 = x25_sk(sk); sock_hold(sk); lock_sock(sk); switch (x25->state) { case X25_STATE_0: case X25_STATE_2: x25_disconnect(sk, 0, 0, 0); __x25_destroy_socket(sk); goto out; case X25_STATE_1: case X25_STATE_3: case X25_STATE_4: x25_clear_queues(sk); x25_write_internal(sk, X25_CLEAR_REQUEST); x25_start_t23timer(sk); x25->state = X25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; case X25_STATE_5: x25_write_internal(sk, X25_CLEAR_REQUEST); x25_disconnect(sk, 0, 0, 0); __x25_destroy_socket(sk); goto out; } sock_orphan(sk); out: release_sock(sk); sock_put(sk); return 0; } static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; int len, i, rc = 0; if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25 || strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) { rc = -EINVAL; goto out; } /* check for the null_x25_address */ if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) { len = strlen(addr->sx25_addr.x25_addr); for (i = 0; i < len; i++) { if (!isdigit(addr->sx25_addr.x25_addr[i])) { rc = -EINVAL; goto out; } } } lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { x25_sk(sk)->source_addr = addr->sx25_addr; x25_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); } else { rc = -EINVAL; } release_sock(sk); net_dbg_ratelimited("x25_bind: socket is bound\n"); out: return rc; } static int x25_wait_for_connection_establishment(struct sock *sk) { DECLARE_WAITQUEUE(wait, current); int rc; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = sock_error(sk); if (rc) { sk->sk_socket->state = SS_UNCONNECTED; break; } rc = -ENOTCONN; if (sk->sk_state == TCP_CLOSE) { sk->sk_socket->state = SS_UNCONNECTED; break; } rc = 0; if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); schedule(); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; struct x25_route *rt; int rc = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out; /* Connect completed during a ERESTARTSYS event */ } rc = -ECONNREFUSED; if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; goto out; } rc = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; rc = -EALREADY; /* Do nothing if call is already in progress */ if (sk->sk_state == TCP_SYN_SENT) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rc = -EINVAL; if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25 || strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) goto out; rc = -ENETUNREACH; rt = x25_get_route(&addr->sx25_addr); if (!rt) goto out; x25->neighbour = x25_get_neigh(rt->dev); if (!x25->neighbour) goto out_put_route; x25_limit_facilities(&x25->facilities, x25->neighbour); x25->lci = x25_new_lci(x25->neighbour); if (!x25->lci) goto out_put_neigh; rc = -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ goto out_put_neigh; if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr)) memset(&x25->source_addr, '\0', X25_ADDR_LEN); x25->dest_addr = addr->sx25_addr; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; x25->state = X25_STATE_1; x25_write_internal(sk, X25_CALL_REQUEST); x25_start_heartbeat(sk); x25_start_t21timer(sk); /* Now the loop */ rc = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out; rc = x25_wait_for_connection_establishment(sk); if (rc) goto out_put_neigh; sock->state = SS_CONNECTED; rc = 0; out_put_neigh: if (rc && x25->neighbour) { read_lock_bh(&x25_list_lock); x25_neigh_put(x25->neighbour); x25->neighbour = NULL; read_unlock_bh(&x25_list_lock); x25->state = X25_STATE_0; } out_put_route: x25_route_put(rt); out: release_sock(sk); return rc; } static int x25_wait_for_data(struct sock *sk, long timeout) { DECLARE_WAITQUEUE(wait, current); int rc = 0; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; if (skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct sock *sk = sock->sk; struct sock *newsk; struct sk_buff *skb; int rc = -EINVAL; if (!sk) goto out; rc = -EOPNOTSUPP; if (sk->sk_type != SOCK_SEQPACKET) goto out; lock_sock(sk); rc = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out2; rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); if (rc) goto out2; skb = skb_dequeue(&sk->sk_receive_queue); rc = -EINVAL; if (!skb->sk) goto out2; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk_acceptq_removed(sk); newsock->state = SS_CONNECTED; rc = 0; out2: release_sock(sk); out: return rc; } static int x25_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); int rc = 0; if (peer) { if (sk->sk_state != TCP_ESTABLISHED) { rc = -ENOTCONN; goto out; } sx25->sx25_addr = x25->dest_addr; } else sx25->sx25_addr = x25->source_addr; sx25->sx25_family = AF_X25; rc = sizeof(*sx25); out: return rc; } int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, unsigned int lci) { struct sock *sk; struct sock *make; struct x25_sock *makex25; struct x25_address source_addr, dest_addr; struct x25_facilities facilities; struct x25_dte_facilities dte_facilities; int len, addr_len, rc; /* * Remove the LCI and frame type. */ skb_pull(skb, X25_STD_MIN_LEN); /* * Extract the X.25 addresses and convert them to ASCII strings, * and remove them. * * Address block is mandatory in call request packets */ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); if (addr_len <= 0) goto out_clear_request; skb_pull(skb, addr_len); /* * Get the length of the facilities, skip past them for the moment * get the call user data because this is needed to determine * the correct listener * * Facilities length is mandatory in call request packets */ if (!pskb_may_pull(skb, 1)) goto out_clear_request; len = skb->data[0] + 1; if (!pskb_may_pull(skb, len)) goto out_clear_request; skb_pull(skb,len); /* * Ensure that the amount of call user data is valid. */ if (skb->len > X25_MAX_CUD_LEN) goto out_clear_request; /* * Get all the call user data so it can be used in * x25_find_listener and skb_copy_from_linear_data up ahead. */ if (!pskb_may_pull(skb, skb->len)) goto out_clear_request; /* * Find a listener for the particular address/cud pair. */ sk = x25_find_listener(&source_addr,skb); skb_push(skb,len); if (sk != NULL && sk_acceptq_is_full(sk)) { goto out_sock_put; } /* * We dont have any listeners for this incoming call. * Try forwarding it. */ if (sk == NULL) { skb_push(skb, addr_len + X25_STD_MIN_LEN); if (sysctl_x25_forward && x25_forward_call(&dest_addr, nb, skb, lci) > 0) { /* Call was forwarded, dont process it any more */ kfree_skb(skb); rc = 1; goto out; } else { /* No listeners, can't forward, clear the call */ goto out_clear_request; } } /* * Try to reach a compromise on the requested facilities. */ len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); if (len == -1) goto out_sock_put; /* * current neighbour/link might impose additional limits * on certain facilities */ x25_limit_facilities(&facilities, nb); /* * Try to create a new socket. */ make = x25_make_new(sk); if (!make) goto out_sock_put; /* * Remove the facilities */ skb_pull(skb, len); skb->sk = make; make->sk_state = TCP_ESTABLISHED; makex25 = x25_sk(make); makex25->lci = lci; makex25->dest_addr = dest_addr; makex25->source_addr = source_addr; x25_neigh_hold(nb); makex25->neighbour = nb; makex25->facilities = facilities; makex25->dte_facilities= dte_facilities; makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; /* ensure no reverse facil on accept */ makex25->vc_facil_mask &= ~X25_MASK_REVERSE; /* ensure no calling address extension on accept */ makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; /* Normally all calls are accepted immediately */ if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; } else { makex25->state = X25_STATE_5; } /* * Incoming Call User Data. */ skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); makex25->calluserdata.cudlength = skb->len; sk_acceptq_added(sk); x25_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); x25_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); rc = 1; sock_put(sk); out: return rc; out_sock_put: sock_put(sk); out_clear_request: rc = 0; x25_transmit_clear_request(nb, lci, 0x01); goto out; } static int x25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); DECLARE_SOCKADDR(struct sockaddr_x25 *, usx25, msg->msg_name); struct sockaddr_x25 sx25; struct sk_buff *skb; unsigned char *asmptr; int noblock = msg->msg_flags & MSG_DONTWAIT; size_t size; int qbit = 0, rc = -EINVAL; lock_sock(sk); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) goto out; /* we currently don't support segmented records at the user interface */ if (!(msg->msg_flags & (MSG_EOR|MSG_OOB))) goto out; rc = -EADDRNOTAVAIL; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } rc = -ENETUNREACH; if (!x25->neighbour) goto out; if (usx25) { rc = -EINVAL; if (msg->msg_namelen < sizeof(sx25)) goto out; memcpy(&sx25, usx25, sizeof(sx25)); rc = -EISCONN; if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr)) goto out; rc = -EINVAL; if (sx25.sx25_family != AF_X25) goto out; } else { /* * FIXME 1003.1g - if the socket is like this because * it has become closed (not started closed) we ought * to SIGPIPE, EPIPE; */ rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; sx25.sx25_family = AF_X25; sx25.sx25_addr = x25->dest_addr; } /* Sanity check the packet size */ if (len > 65535) { rc = -EMSGSIZE; goto out; } net_dbg_ratelimited("x25_sendmsg: sendto: Addresses built.\n"); /* Build a packet */ net_dbg_ratelimited("x25_sendmsg: sendto: building packet.\n"); if ((msg->msg_flags & MSG_OOB) && len > 32) len = 32; size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (!skb) goto out; X25_SKB_CB(skb)->flags = msg->msg_flags; skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN); /* * Put the data on the end */ net_dbg_ratelimited("x25_sendmsg: Copying user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); rc = memcpy_from_msg(skb_transport_header(skb), msg, len); if (rc) goto out_kfree_skb; /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { if (!pskb_may_pull(skb, 1)) goto out_kfree_skb; qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the X.25 header */ net_dbg_ratelimited("x25_sendmsg: Building X.25 Header.\n"); if (msg->msg_flags & MSG_OOB) { if (x25->neighbour->extended) { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } else { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } } else { if (x25->neighbour->extended) { /* Build an Extended X.25 header */ asmptr = skb_push(skb, X25_EXT_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; *asmptr++ = X25_DATA; } else { /* Build an Standard X.25 header */ asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; } if (qbit) skb->data[0] |= X25_Q_BIT; } net_dbg_ratelimited("x25_sendmsg: Built header.\n"); net_dbg_ratelimited("x25_sendmsg: Transmitting buffer\n"); rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out_kfree_skb; if (msg->msg_flags & MSG_OOB) skb_queue_tail(&x25->interrupt_out_queue, skb); else { rc = x25_output(sk, skb); len = rc; if (rc < 0) kfree_skb(skb); else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) len++; } x25_kick(sk); rc = len; out: release_sock(sk); return rc; out_kfree_skb: kfree_skb(skb); goto out; } static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); DECLARE_SOCKADDR(struct sockaddr_x25 *, sx25, msg->msg_name); size_t copied; int qbit, header_len; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); if (x25->neighbour == NULL) goto out; header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) goto out; if (flags & MSG_OOB) { rc = -EINVAL; if (sock_flag(sk, SOCK_URGINLINE) || !skb_peek(&x25->interrupt_in_queue)) goto out; skb = skb_dequeue(&x25->interrupt_in_queue); if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) goto out_free_dgram; skb_pull(skb, X25_STD_MIN_LEN); /* * No Q bit information on Interrupt data. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } msg->msg_flags |= MSG_OOB; } else { /* Now we can treat all alike */ release_sock(sk); skb = skb_recv_datagram(sk, flags, &rc); lock_sock(sk); if (!skb) goto out; if (!pskb_may_pull(skb, header_len)) goto out_free_dgram; qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; skb_pull(skb, header_len); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; rc = skb_copy_datagram_msg(skb, 0, msg, copied); if (rc) goto out_free_dgram; if (sx25) { sx25->sx25_family = AF_X25; sx25->sx25_addr = x25->dest_addr; msg->msg_namelen = sizeof(*sx25); } x25_check_rbuf(sk); rc = copied; out_free_dgram: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; } static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); void __user *argp = (void __user *)arg; int rc; switch (cmd) { case TIOCOUTQ: { int amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (unsigned int __user *)argp); break; } case TIOCINQ: { struct sk_buff *skb; int amount = 0; /* * These two are safe on a single CPU system as * only user tasks fiddle here */ lock_sock(sk); if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); rc = put_user(amount, (unsigned int __user *)argp); break; } case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->facilities, sizeof(x25->facilities)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SFACILITIES: { struct x25_facilities facilities; rc = -EFAULT; if (copy_from_user(&facilities, argp, sizeof(facilities))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_fac_release; if (facilities.pacsize_in < X25_PS16 || facilities.pacsize_in > X25_PS4096) goto out_fac_release; if (facilities.pacsize_out < X25_PS16 || facilities.pacsize_out > X25_PS4096) goto out_fac_release; if (facilities.winsize_in < 1 || facilities.winsize_in > 127) goto out_fac_release; if (facilities.throughput) { int out = facilities.throughput & 0xf0; int in = facilities.throughput & 0x0f; if (!out) facilities.throughput |= X25_DEFAULT_THROUGHPUT << 4; else if (out < 0x30 || out > 0xD0) goto out_fac_release; if (!in) facilities.throughput |= X25_DEFAULT_THROUGHPUT; else if (in < 0x03 || in > 0x0D) goto out_fac_release; } if (facilities.reverse && (facilities.reverse & 0x81) != 0x81) goto out_fac_release; x25->facilities = facilities; rc = 0; out_fac_release: release_sock(sk); break; } case SIOCX25GDTEFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->dte_facilities, sizeof(x25->dte_facilities)); release_sock(sk); if (rc) rc = -EFAULT; break; } case SIOCX25SDTEFACILITIES: { struct x25_dte_facilities dtefacs; rc = -EFAULT; if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_dtefac_release; if (dtefacs.calling_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.called_len > X25_MAX_AE_LEN) goto out_dtefac_release; x25->dte_facilities = dtefacs; rc = 0; out_dtefac_release: release_sock(sk); break; } case SIOCX25GCALLUSERDATA: { lock_sock(sk); rc = copy_to_user(argp, &x25->calluserdata, sizeof(x25->calluserdata)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCALLUSERDATA: { struct x25_calluserdata calluserdata; rc = -EFAULT; if (copy_from_user(&calluserdata, argp, sizeof(calluserdata))) break; rc = -EINVAL; if (calluserdata.cudlength > X25_MAX_CUD_LEN) break; lock_sock(sk); x25->calluserdata = calluserdata; release_sock(sk); rc = 0; break; } case SIOCX25GCAUSEDIAG: { lock_sock(sk); rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCAUSEDIAG: { struct x25_causediag causediag; rc = -EFAULT; if (copy_from_user(&causediag, argp, sizeof(causediag))) break; lock_sock(sk); x25->causediag = causediag; release_sock(sk); rc = 0; break; } case SIOCX25SCUDMATCHLEN: { struct x25_subaddr sub_addr; rc = -EINVAL; lock_sock(sk); if(sk->sk_state != TCP_CLOSE) goto out_cud_release; rc = -EFAULT; if (copy_from_user(&sub_addr, argp, sizeof(sub_addr))) goto out_cud_release; rc = -EINVAL; if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN) goto out_cud_release; x25->cudmatchlength = sub_addr.cudmatchlength; rc = 0; out_cud_release: release_sock(sk); break; } case SIOCX25CALLACCPTAPPRV: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_CLOSE) { clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); rc = 0; } release_sock(sk); break; } case SIOCX25SENDCALLACCPT: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) goto out_sendcallaccpt_release; /* must call accptapprv above */ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) goto out_sendcallaccpt_release; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; rc = 0; out_sendcallaccpt_release: release_sock(sk); break; } default: rc = -ENOIOCTLCMD; break; } return rc; } static const struct net_proto_family x25_family_ops = { .family = AF_X25, .create = x25_create, .owner = THIS_MODULE, }; #ifdef CONFIG_COMPAT static int compat_x25_subscr_ioctl(unsigned int cmd, struct compat_x25_subscrip_struct __user *x25_subscr32) { struct compat_x25_subscrip_struct x25_subscr; struct x25_neigh *nb; struct net_device *dev; int rc = -EINVAL; rc = -EFAULT; if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32))) goto out; rc = -EINVAL; dev = x25_dev_get(x25_subscr.device); if (dev == NULL) goto out; nb = x25_get_neigh(dev); if (nb == NULL) goto out_dev_put; dev_put(dev); if (cmd == SIOCX25GSUBSCRIP) { read_lock_bh(&x25_neigh_list_lock); x25_subscr.extended = nb->extended; x25_subscr.global_facil_mask = nb->global_facil_mask; read_unlock_bh(&x25_neigh_list_lock); rc = copy_to_user(x25_subscr32, &x25_subscr, sizeof(*x25_subscr32)) ? -EFAULT : 0; } else { rc = -EINVAL; if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { rc = 0; write_lock_bh(&x25_neigh_list_lock); nb->extended = x25_subscr.extended; nb->global_facil_mask = x25_subscr.global_facil_mask; write_unlock_bh(&x25_neigh_list_lock); } } x25_neigh_put(nb); out: return rc; out_dev_put: dev_put(dev); goto out; } static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); int rc = -ENOIOCTLCMD; switch(cmd) { case TIOCOUTQ: case TIOCINQ: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: case SIOCX25SFACILITIES: case SIOCX25GDTEFACILITIES: case SIOCX25SDTEFACILITIES: case SIOCX25GCALLUSERDATA: case SIOCX25SCALLUSERDATA: case SIOCX25GCAUSEDIAG: case SIOCX25SCAUSEDIAG: case SIOCX25SCUDMATCHLEN: case SIOCX25CALLACCPTAPPRV: case SIOCX25SENDCALLACCPT: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; default: rc = -ENOIOCTLCMD; break; } return rc; } #endif static const struct proto_ops x25_proto_ops = { .family = AF_X25, .owner = THIS_MODULE, .release = x25_release, .bind = x25_bind, .connect = x25_connect, .socketpair = sock_no_socketpair, .accept = x25_accept, .getname = x25_getname, .poll = datagram_poll, .ioctl = x25_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_x25_ioctl, #endif .gettstamp = sock_gettstamp, .listen = x25_listen, .shutdown = sock_no_shutdown, .setsockopt = x25_setsockopt, .getsockopt = x25_getsockopt, .sendmsg = x25_sendmsg, .recvmsg = x25_recvmsg, .mmap = sock_no_mmap, }; static struct packet_type x25_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_X25), .func = x25_lapb_receive_frame, }; static struct notifier_block x25_dev_notifier = { .notifier_call = x25_device_event, }; void x25_kill_by_neigh(struct x25_neigh *nb) { struct sock *s; write_lock_bh(&x25_list_lock); sk_for_each(s, &x25_list) { if (x25_sk(s)->neighbour == nb) { write_unlock_bh(&x25_list_lock); lock_sock(s); x25_disconnect(s, ENETUNREACH, 0, 0); release_sock(s); write_lock_bh(&x25_list_lock); } } write_unlock_bh(&x25_list_lock); /* Remove any related forwards */ x25_clear_forward_by_dev(nb->dev); } static int __init x25_init(void) { int rc; rc = proto_register(&x25_proto, 0); if (rc) goto out; rc = sock_register(&x25_family_ops); if (rc) goto out_proto; dev_add_pack(&x25_packet_type); rc = register_netdevice_notifier(&x25_dev_notifier); if (rc) goto out_sock; rc = x25_register_sysctl(); if (rc) goto out_dev; rc = x25_proc_init(); if (rc) goto out_sysctl; pr_info("Linux Version 0.2\n"); out: return rc; out_sysctl: x25_unregister_sysctl(); out_dev: unregister_netdevice_notifier(&x25_dev_notifier); out_sock: dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); out_proto: proto_unregister(&x25_proto); goto out; } module_init(x25_init); static void __exit x25_exit(void) { x25_proc_exit(); x25_link_free(); x25_route_free(); x25_unregister_sysctl(); unregister_netdevice_notifier(&x25_dev_notifier); dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); proto_unregister(&x25_proto); } module_exit(x25_exit); MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_X25); |
37 14 2 23 66 21 21 10 6 12 12 265 1 5 3 3 17 3 60 3 63 101 143 144 143 143 6 155 23 31 66 28 5 22 22 22 22 22 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 | /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ #ifndef _LINUX_SKMSG_H #define _LINUX_SKMSG_H #include <linux/bpf.h> #include <linux/filter.h> #include <linux/scatterlist.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp.h> #include <net/strparser.h> #define MAX_MSG_FRAGS MAX_SKB_FRAGS #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) enum __sk_action { __SK_DROP = 0, __SK_PASS, __SK_REDIRECT, __SK_NONE, }; struct sk_msg_sg { u32 start; u32 curr; u32 end; u32 size; u32 copybreak; DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); /* The extra two elements: * 1) used for chaining the front and sections when the list becomes * partitioned (e.g. end < start). The crypto APIs require the * chaining; * 2) to chain tailer SG entries after the message. */ struct scatterlist data[MAX_MSG_FRAGS + 2]; }; /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ struct sk_msg { struct sk_msg_sg sg; void *data; void *data_end; u32 apply_bytes; u32 cork_bytes; u32 flags; struct sk_buff *skb; struct sock *sk_redir; struct sock *sk; struct list_head list; }; struct sk_psock_progs { struct bpf_prog *msg_parser; struct bpf_prog *stream_parser; struct bpf_prog *stream_verdict; struct bpf_prog *skb_verdict; struct bpf_link *msg_parser_link; struct bpf_link *stream_parser_link; struct bpf_link *stream_verdict_link; struct bpf_link *skb_verdict_link; }; enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED, SK_PSOCK_RX_STRP_ENABLED, }; struct sk_psock_link { struct list_head list; struct bpf_map *map; void *link_raw; }; struct sk_psock_work_state { u32 len; u32 off; }; struct sk_psock { struct sock *sk; struct sock *sk_redir; u32 apply_bytes; u32 cork_bytes; u32 eval; bool redir_ingress; /* undefined if sk_redir is null */ struct sk_msg *cork; struct sk_psock_progs progs; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) struct strparser strp; u32 copied_seq; u32 ingress_bytes; #endif struct sk_buff_head ingress_skb; struct list_head ingress_msg; spinlock_t ingress_lock; unsigned long state; struct list_head link; spinlock_t link_lock; refcount_t refcnt; void (*saved_unhash)(struct sock *sk); void (*saved_destroy)(struct sock *sk); void (*saved_close)(struct sock *sk, long timeout); void (*saved_write_space)(struct sock *sk); void (*saved_data_ready)(struct sock *sk); /* psock_update_sk_prot may be called with restore=false many times * so the handler must be safe for this case. It will be called * exactly once with restore=true when the psock is being destroyed * and psock refcnt is zero, but before an RCU grace period. */ int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, bool restore); struct proto *sk_proto; struct mutex work_mutex; struct sk_psock_work_state work_state; struct delayed_work work; struct sock *sk_pair; struct rcu_work rwork; }; int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, int elem_first_coalesce); int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, u32 off, u32 len); void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); int sk_msg_free(struct sock *sk, struct sk_msg *msg); int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, u32 bytes); void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, int len, int flags); bool sk_msg_is_readable(struct sock *sk); static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) { WARN_ON(i == msg->sg.end && bytes); } static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) { if (psock->apply_bytes) { if (psock->apply_bytes < bytes) psock->apply_bytes = 0; else psock->apply_bytes -= bytes; } } static inline u32 sk_msg_iter_dist(u32 start, u32 end) { return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); } #define sk_msg_iter_var_prev(var) \ do { \ if (var == 0) \ var = NR_MSG_FRAG_IDS - 1; \ else \ var--; \ } while (0) #define sk_msg_iter_var_next(var) \ do { \ var++; \ if (var == NR_MSG_FRAG_IDS) \ var = 0; \ } while (0) #define sk_msg_iter_prev(msg, which) \ sk_msg_iter_var_prev(msg->sg.which) #define sk_msg_iter_next(msg, which) \ sk_msg_iter_var_next(msg->sg.which) static inline void sk_msg_init(struct sk_msg *msg) { BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); memset(msg, 0, sizeof(*msg)); sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); } static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, int which, u32 size) { dst->sg.data[which] = src->sg.data[which]; dst->sg.data[which].length = size; dst->sg.size += size; src->sg.size -= size; src->sg.data[which].length -= size; src->sg.data[which].offset += size; } static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) { memcpy(dst, src, sizeof(*src)); sk_msg_init(src); } static inline bool sk_msg_full(const struct sk_msg *msg) { return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; } static inline u32 sk_msg_elem_used(const struct sk_msg *msg) { return sk_msg_iter_dist(msg->sg.start, msg->sg.end); } static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) { return &msg->sg.data[which]; } static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) { return msg->sg.data[which]; } static inline struct page *sk_msg_page(struct sk_msg *msg, int which) { return sg_page(sk_msg_elem(msg, which)); } static inline bool sk_msg_to_ingress(const struct sk_msg *msg) { return msg->flags & BPF_F_INGRESS; } static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) { struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); if (test_bit(msg->sg.start, msg->sg.copy)) { msg->data = NULL; msg->data_end = NULL; } else { msg->data = sg_virt(sge); msg->data_end = msg->data + sge->length; } } static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, u32 len, u32 offset) { struct scatterlist *sge; get_page(page); sge = sk_msg_elem(msg, msg->sg.end); sg_set_page(sge, page, len, offset); sg_unmark_end(sge); __set_bit(msg->sg.end, msg->sg.copy); msg->sg.size += len; sk_msg_iter_next(msg, end); } static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) { do { if (copy_state) __set_bit(i, msg->sg.copy); else __clear_bit(i, msg->sg.copy); sk_msg_iter_var_next(i); if (i == msg->sg.end) break; } while (1); } static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) { sk_msg_sg_copy(msg, start, true); } static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) { sk_msg_sg_copy(msg, start, false); } static inline struct sk_psock *sk_psock(const struct sock *sk) { return __rcu_dereference_sk_user_data_with_flags(sk, SK_USER_DATA_PSOCK); } static inline void sk_psock_set_state(struct sk_psock *psock, enum sk_psock_state_bits bit) { set_bit(bit, &psock->state); } static inline void sk_psock_clear_state(struct sk_psock *psock, enum sk_psock_state_bits bit) { clear_bit(bit, &psock->state); } static inline bool sk_psock_test_state(const struct sk_psock *psock, enum sk_psock_state_bits bit) { return test_bit(bit, &psock->state); } static inline void sock_drop(struct sock *sk, struct sk_buff *skb) { sk_drops_add(sk, skb); kfree_skb(skb); } static inline bool sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { bool ret; spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { list_add_tail(&msg->list, &psock->ingress_msg); ret = true; } else { sk_msg_free(psock->sk, msg); kfree(msg); ret = false; } spin_unlock_bh(&psock->ingress_lock); return ret; } static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) { struct sk_msg *msg; spin_lock_bh(&psock->ingress_lock); msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); if (msg) list_del(&msg->list); spin_unlock_bh(&psock->ingress_lock); return msg; } static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) { struct sk_msg *msg; spin_lock_bh(&psock->ingress_lock); msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); spin_unlock_bh(&psock->ingress_lock); return msg; } static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, struct sk_msg *msg) { struct sk_msg *ret; spin_lock_bh(&psock->ingress_lock); if (list_is_last(&msg->list, &psock->ingress_msg)) ret = NULL; else ret = list_next_entry(msg, list); spin_unlock_bh(&psock->ingress_lock); return ret; } static inline bool sk_psock_queue_empty(const struct sk_psock *psock) { return psock ? list_empty(&psock->ingress_msg) : true; } static inline void kfree_sk_msg(struct sk_msg *msg) { if (msg->skb) consume_skb(msg->skb); kfree(msg); } static inline void sk_psock_report_error(struct sk_psock *psock, int err) { struct sock *sk = psock->sk; sk->sk_err = err; sk_error_report(sk); } struct sk_psock *sk_psock_init(struct sock *sk, int node); void sk_psock_stop(struct sk_psock *psock); #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); #else static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) { return -EOPNOTSUPP; } static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) { } static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) { } #endif void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg); /* * This specialized allocator has to be a macro for its allocations to be * accounted separately (to have a separate alloc_tag). The typecast is * intentional to enforce typesafety. */ #define sk_psock_init_link() \ ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \ GFP_ATOMIC | __GFP_NOWARN)) static inline void sk_psock_free_link(struct sk_psock_link *link) { kfree(link); } struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); static inline void sk_psock_cork_free(struct sk_psock *psock) { if (psock->cork) { sk_msg_free(psock->sk, psock->cork); kfree(psock->cork); psock->cork = NULL; } } static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { if (psock->psock_update_sk_prot) psock->psock_update_sk_prot(sk, psock, true); } static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; rcu_read_lock(); psock = sk_psock(sk); if (psock && !refcount_inc_not_zero(&psock->refcnt)) psock = NULL; rcu_read_unlock(); return psock; } void sk_psock_drop(struct sock *sk, struct sk_psock *psock); static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) { if (refcount_dec_and_test(&psock->refcnt)) sk_psock_drop(sk, psock); } static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) { read_lock_bh(&sk->sk_callback_lock); if (psock->saved_data_ready) psock->saved_data_ready(sk); else sk->sk_data_ready(sk); read_unlock_bh(&sk->sk_callback_lock); } static inline void psock_set_prog(struct bpf_prog **pprog, struct bpf_prog *prog) { prog = xchg(pprog, prog); if (prog) bpf_prog_put(prog); } static inline int psock_replace_prog(struct bpf_prog **pprog, struct bpf_prog *prog, struct bpf_prog *old) { if (cmpxchg(pprog, old, prog) != old) return -ENOENT; if (old) bpf_prog_put(old); return 0; } static inline void psock_progs_drop(struct sk_psock_progs *progs) { psock_set_prog(&progs->msg_parser, NULL); psock_set_prog(&progs->stream_parser, NULL); psock_set_prog(&progs->stream_verdict, NULL); psock_set_prog(&progs->skb_verdict, NULL); } int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); static inline bool sk_psock_strp_enabled(struct sk_psock *psock) { if (!psock) return false; return !!psock->saved_data_ready; } #if IS_ENABLED(CONFIG_NET_SOCK_MSG) #define BPF_F_STRPARSER (1UL << 1) /* We only have two bits so far. */ #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) static inline bool skb_bpf_strparser(const struct sk_buff *skb) { unsigned long sk_redir = skb->_sk_redir; return sk_redir & BPF_F_STRPARSER; } static inline void skb_bpf_set_strparser(struct sk_buff *skb) { skb->_sk_redir |= BPF_F_STRPARSER; } static inline bool skb_bpf_ingress(const struct sk_buff *skb) { unsigned long sk_redir = skb->_sk_redir; return sk_redir & BPF_F_INGRESS; } static inline void skb_bpf_set_ingress(struct sk_buff *skb) { skb->_sk_redir |= BPF_F_INGRESS; } static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, bool ingress) { skb->_sk_redir = (unsigned long)sk_redir; if (ingress) skb->_sk_redir |= BPF_F_INGRESS; } static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) { unsigned long sk_redir = skb->_sk_redir; return (struct sock *)(sk_redir & BPF_F_PTR_MASK); } static inline void skb_bpf_redirect_clear(struct sk_buff *skb) { skb->_sk_redir = 0; } #endif /* CONFIG_NET_SOCK_MSG */ #endif /* _LINUX_SKMSG_H */ |
21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for dma buffer sharing framework. * * Copyright(C) 2011 Linaro Limited. All rights reserved. * Author: Sumit Semwal <sumit.semwal@ti.com> * * Many thanks to linaro-mm-sig list, and specially * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and * Daniel Vetter <daniel@ffwll.ch> for their support in creation and * refining of this idea. */ #ifndef __DMA_BUF_H__ #define __DMA_BUF_H__ #include <linux/iosys-map.h> #include <linux/file.h> #include <linux/err.h> #include <linux/scatterlist.h> #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/fs.h> #include <linux/dma-fence.h> #include <linux/wait.h> struct device; struct dma_buf; struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { /** * @cache_sgt_mapping: * * If true the framework will cache the first mapping made for each * attachment. This avoids creating mappings for attachments multiple * times. */ bool cache_sgt_mapping; /** * @attach: * * This is called from dma_buf_attach() to make sure that a given * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters * which support buffer objects in special locations like VRAM or * device-specific carveout areas should check whether the buffer could * be move to system memory (or directly accessed by the provided * device), and otherwise need to fail the attach operation. * * The exporter should also in general check whether the current * allocation fulfills the DMA constraints of the new device. If this * is not the case, and the allocation cannot be moved, it should also * fail the attach operation. * * Any exporter-private housekeeping data can be stored in the * &dma_buf_attachment.priv pointer. * * This callback is optional. * * Returns: * * 0 on success, negative error code on failure. It might return -EBUSY * to signal that backing storage is already allocated and incompatible * with the requirements of requesting device. */ int (*attach)(struct dma_buf *, struct dma_buf_attachment *); /** * @detach: * * This is called by dma_buf_detach() to release a &dma_buf_attachment. * Provided so that exporters can clean up any housekeeping for an * &dma_buf_attachment. * * This callback is optional. */ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); /** * @pin: * * This is called by dma_buf_pin() and lets the exporter know that the * DMA-buf can't be moved any more. Ideally, the exporter should * pin the buffer so that it is generally accessible by all * devices. * * This is called with the &dmabuf.resv object locked and is mutual * exclusive with @cache_sgt_mapping. * * This is called automatically for non-dynamic importers from * dma_buf_attach(). * * Note that similar to non-dynamic exporters in their @map_dma_buf * callback the driver must guarantee that the memory is available for * use and cleared of any old data by the time this function returns. * Drivers which pipeline their buffer moves internally must wait for * all moves and clears to complete. * * Returns: * * 0 on success, negative error code on failure. */ int (*pin)(struct dma_buf_attachment *attach); /** * @unpin: * * This is called by dma_buf_unpin() and lets the exporter know that the * DMA-buf can be moved again. * * This is called with the dmabuf->resv object locked and is mutual * exclusive with @cache_sgt_mapping. * * This callback is optional. */ void (*unpin)(struct dma_buf_attachment *attach); /** * @map_dma_buf: * * This is called by dma_buf_map_attachment() and is used to map a * shared &dma_buf into device address space, and it is mandatory. It * can only be called if @attach has been called successfully. * * This call may sleep, e.g. when the backing storage first needs to be * allocated, or moved to a location suitable for all currently attached * devices. * * Note that any specific buffer attributes required for this function * should get added to device_dma_parameters accessible via * &device.dma_params from the &dma_buf_attachment. The @attach callback * should also check these constraints. * * If this is being called for the first time, the exporter can now * choose to scan through the list of attachments for this buffer, * collate the requirements of the attached devices, and choose an * appropriate backing storage for the buffer. * * Based on enum dma_data_direction, it might be possible to have * multiple users accessing at the same time (for reading, maybe), or * any other kind of sharing that the exporter might wish to make * available to buffer-users. * * This is always called with the dmabuf->resv object locked when * the dynamic_mapping flag is true. * * Note that for non-dynamic exporters the driver must guarantee that * that the memory is available for use and cleared of any old data by * the time this function returns. Drivers which pipeline their buffer * moves internally must wait for all moves and clears to complete. * Dynamic exporters do not need to follow this rule: For non-dynamic * importers the buffer is already pinned through @pin, which has the * same requirements. Dynamic importers otoh are required to obey the * dma_resv fences. * * Returns: * * A &sg_table scatter list of the backing storage of the DMA buffer, * already mapped into the device address space of the &device attached * with the provided &dma_buf_attachment. The addresses and lengths in * the scatter list are PAGE_SIZE aligned. * * On failure, returns a negative error value wrapped into a pointer. * May also return -EINTR when a signal was received while being * blocked. * * Note that exporters should not try to cache the scatter list, or * return the same one for multiple calls. Caching is done either by the * DMA-BUF code (for non-dynamic importers) or the importer. Ownership * of the scatter list is transferred to the caller, and returned by * @unmap_dma_buf. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); /** * @unmap_dma_buf: * * This is called by dma_buf_unmap_attachment() and should unmap and * release the &sg_table allocated in @map_dma_buf, and it is mandatory. * For static dma_buf handling this might also unpin the backing * storage if this is the last mapping of the DMA buffer. */ void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ /** * @release: * * Called after the last dma_buf_put to release the &dma_buf, and * mandatory. */ void (*release)(struct dma_buf *); /** * @begin_cpu_access: * * This is called from dma_buf_begin_cpu_access() and allows the * exporter to ensure that the memory is actually coherent for cpu * access. The exporter also needs to ensure that cpu access is coherent * for the access direction. The direction can be used by the exporter * to optimize the cache flushing, i.e. access with a different * direction (read instead of write) might return stale or even bogus * data (e.g. when the exporter needs to copy the data to temporary * storage). * * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL * command for userspace mappings established through @mmap, and also * for kernel mappings established with @vmap. * * This callback is optional. * * Returns: * * 0 on success or a negative error code on failure. This can for * example fail when the backing storage can't be allocated. Can also * return -ERESTARTSYS or -EINTR when the call has been interrupted and * needs to be restarted. */ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); /** * @end_cpu_access: * * This is called from dma_buf_end_cpu_access() when the importer is * done accessing the CPU. The exporter can use this to flush caches and * undo anything else done in @begin_cpu_access. * * This callback is optional. * * Returns: * * 0 on success or a negative error code on failure. Can return * -ERESTARTSYS or -EINTR when the call has been interrupted and needs * to be restarted. */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); /** * @mmap: * * This callback is used by the dma_buf_mmap() function * * Note that the mapping needs to be incoherent, userspace is expected * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface. * * Because dma-buf buffers have invariant size over their lifetime, the * dma-buf core checks whether a vma is too large and rejects such * mappings. The exporter hence does not need to duplicate this check. * Drivers do not need to check this themselves. * * If an exporter needs to manually flush caches and hence needs to fake * coherency for mmap support, it needs to be able to zap all the ptes * pointing at the backing storage. Now linux mm needs a struct * address_space associated with the struct file stored in vma->vm_file * to do that with the function unmap_mapping_range. But the dma_buf * framework only backs every dma_buf fd with the anon_file struct file, * i.e. all dma_bufs share the same file. * * Hence exporters need to setup their own file (and address_space) * association by setting vma->vm_file and adjusting vma->vm_pgoff in * the dma_buf mmap callback. In the specific case of a gem driver the * exporter could use the shmem file already provided by gem (and set * vm_pgoff = 0). Exporters can then zap ptes by unmapping the * corresponding range of the struct address_space associated with their * own file. * * This callback is optional. * * Returns: * * 0 on success or a negative error code on failure. */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map); void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map); }; /** * struct dma_buf - shared buffer object * * This represents a shared buffer, created by calling dma_buf_export(). The * userspace representation is a normal file descriptor, which can be created by * calling dma_buf_fd(). * * Shared dma buffers are reference counted using dma_buf_put() and * get_dma_buf(). * * Device DMA access is handled by the separate &struct dma_buf_attachment. */ struct dma_buf { /** * @size: * * Size of the buffer; invariant over the lifetime of the buffer. */ size_t size; /** * @file: * * File pointer used for sharing buffers across, and for refcounting. * See dma_buf_get() and dma_buf_put(). */ struct file *file; /** * @attachments: * * List of dma_buf_attachment that denotes all devices attached, * protected by &dma_resv lock @resv. */ struct list_head attachments; /** @ops: dma_buf_ops associated with this buffer object. */ const struct dma_buf_ops *ops; /** * @vmapping_counter: * * Used internally to refcnt the vmaps returned by dma_buf_vmap(). * Protected by @lock. */ unsigned vmapping_counter; /** * @vmap_ptr: * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. */ struct iosys_map vmap_ptr; /** * @exp_name: * * Name of the exporter; useful for debugging. Must not be NULL */ const char *exp_name; /** * @name: * * Userspace-provided name. Default value is NULL. If not NULL, * length cannot be longer than DMA_BUF_NAME_LEN, including NIL * char. Useful for accounting and debugging. Read/Write accesses * are protected by @name_lock * * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B */ const char *name; /** @name_lock: Spinlock to protect name access for read access. */ spinlock_t name_lock; /** * @owner: * * Pointer to exporter module; used for refcounting when exporter is a * kernel module. */ struct module *owner; #if IS_ENABLED(CONFIG_DEBUG_FS) /** @list_node: node for dma_buf accounting and debugging. */ struct list_head list_node; #endif /** @priv: exporter specific private data for this buffer object. */ void *priv; /** * @resv: * * Reservation object linked to this dma-buf. * * IMPLICIT SYNCHRONIZATION RULES: * * Drivers which support implicit synchronization of buffer access as * e.g. exposed in `Implicit Fence Poll Support`_ must follow the * below rules. * * - Drivers must add a read fence through dma_resv_add_fence() with the * DMA_RESV_USAGE_READ flag for anything the userspace API considers a * read access. This highly depends upon the API and window system. * * - Similarly drivers must add a write fence through * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for * anything the userspace API considers write access. * * - Drivers may just always add a write fence, since that only * causes unnecessary synchronization, but no correctness issues. * * - Some drivers only expose a synchronous userspace API with no * pipelining across drivers. These do not set any fences for their * access. An example here is v4l. * * - Driver should use dma_resv_usage_rw() when retrieving fences as * dependency for implicit synchronization. * * DYNAMIC IMPORTER RULES: * * Dynamic importers, see dma_buf_attachment_is_dynamic(), have * additional constraints on how they set up fences: * * - Dynamic importers must obey the write fences and wait for them to * signal before allowing access to the buffer's underlying storage * through the device. * * - Dynamic importers should set fences for any access that they can't * disable immediately from their &dma_buf_attach_ops.move_notify * callback. * * IMPORTANT: * * All drivers and memory management related functions must obey the * struct dma_resv rules, specifically the rules for updating and * obeying fences. See enum dma_resv_usage for further descriptions. */ struct dma_resv *resv; /** @poll: for userspace poll support */ wait_queue_head_t poll; /** @cb_in: for userspace poll support */ /** @cb_out: for userspace poll support */ struct dma_buf_poll_cb_t { struct dma_fence_cb cb; wait_queue_head_t *poll; __poll_t active; } cb_in, cb_out; #ifdef CONFIG_DMABUF_SYSFS_STATS /** * @sysfs_entry: * * For exposing information about this buffer in sysfs. See also * `DMA-BUF statistics`_ for the uapi this enables. */ struct dma_buf_sysfs_entry { struct kobject kobj; struct dma_buf *dmabuf; } *sysfs_entry; #endif }; /** * struct dma_buf_attach_ops - importer operations for an attachment * * Attachment operations implemented by the importer. */ struct dma_buf_attach_ops { /** * @allow_peer2peer: * * If this is set to true the importer must be able to handle peer * resources without struct pages. */ bool allow_peer2peer; /** * @move_notify: [optional] notification that the DMA-buf is moving * * If this callback is provided the framework can avoid pinning the * backing store while mappings exists. * * This callback is called with the lock of the reservation object * associated with the dma_buf held and the mapping function must be * called with this lock held as well. This makes sure that no mapping * is created concurrently with an ongoing move operation. * * Mappings stay valid and are not directly affected by this callback. * But the DMA-buf can now be in a different physical location, so all * mappings should be destroyed and re-created as soon as possible. * * New mappings can be created after this callback returns, and will * point to the new location of the DMA-buf. */ void (*move_notify)(struct dma_buf_attachment *attach); }; /** * struct dma_buf_attachment - holds device-buffer attachment data * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. * @sgt: cached mapping. * @dir: direction of cached mapping. * @peer2peer: true if the importer can handle peer resources without pages. * @priv: exporter specific attachment data. * @importer_ops: importer operations for this attachment, if provided * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. * @importer_priv: importer specific attachment data. * * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device * attached to the buffer. * * An attachment is created by calling dma_buf_attach(), and released again by * calling dma_buf_detach(). The DMA mapping itself needed to initiate a * transfer is created by dma_buf_map_attachment() and freed again by calling * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; struct sg_table *sgt; enum dma_data_direction dir; bool peer2peer; const struct dma_buf_attach_ops *importer_ops; void *importer_priv; void *priv; }; /** * struct dma_buf_export_info - holds information needed to export a dma_buf * @exp_name: name of the exporter - useful for debugging. * @owner: pointer to exporter module - used for refcounting kernel module * @ops: Attach allocator-defined dma buf ops to the new buffer * @size: Size of the buffer - invariant over the lifetime of the buffer * @flags: mode flags for the file * @resv: reservation-object, NULL to allocate default one * @priv: Attach private data of allocator to this buffer * * This structure holds the information required to export the buffer. Used * with dma_buf_export() only. */ struct dma_buf_export_info { const char *exp_name; struct module *owner; const struct dma_buf_ops *ops; size_t size; int flags; struct dma_resv *resv; void *priv; }; /** * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name * * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, * zeroes it out and pre-populates exp_name in it. */ #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ .owner = THIS_MODULE } /** * get_dma_buf - convenience wrapper for get_file. * @dmabuf: [in] pointer to dma_buf * * Increments the reference count on the dma-buf, needed in case of drivers * that either need to create additional references to the dmabuf on the * kernel side. For example, an exporter that needs to keep a dmabuf ptr * so that subsequent exports don't create a new dmabuf. */ static inline void get_dma_buf(struct dma_buf *dmabuf) { get_file(dmabuf->file); } /** * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings. * @dmabuf: the DMA-buf to check * * Returns true if a DMA-buf exporter wants to be called with the dma_resv * locked for the map/unmap callbacks, false if it doesn't wants to be called * with the lock held. */ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) { return !!dmabuf->ops->pin; } /** * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic * mappings * @attach: the DMA-buf attachment to check * * Returns true if a DMA-buf importer wants to call the map/unmap functions with * the dma_resv lock held. */ static inline bool dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) { return !!attach->importer_ops; } struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev); struct dma_buf_attachment * dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, const struct dma_buf_attach_ops *importer_ops, void *importer_priv); void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach); int dma_buf_pin(struct dma_buf_attachment *attach); void dma_buf_unpin(struct dma_buf_attachment *attach); struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); void dma_buf_put(struct dma_buf *dmabuf); struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); void dma_buf_move_notify(struct dma_buf *dma_buf); int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); int dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); struct sg_table * dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, enum dma_data_direction direction); void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, struct sg_table *sg_table, enum dma_data_direction direction); int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map); void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map); int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); #endif /* __DMA_BUF_H__ */ |
25 25 24 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 | // SPDX-License-Identifier: GPL-2.0 /* * Block device concurrent positioning ranges. * * Copyright (C) 2021 Western Digital Corporation or its Affiliates. */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/init.h> #include "blk.h" static ssize_t blk_ia_range_sector_show(struct blk_independent_access_range *iar, char *buf) { return sprintf(buf, "%llu\n", iar->sector); } static ssize_t blk_ia_range_nr_sectors_show(struct blk_independent_access_range *iar, char *buf) { return sprintf(buf, "%llu\n", iar->nr_sectors); } struct blk_ia_range_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_independent_access_range *iar, char *buf); }; static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry = { .attr = { .name = "sector", .mode = 0444 }, .show = blk_ia_range_sector_show, }; static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry = { .attr = { .name = "nr_sectors", .mode = 0444 }, .show = blk_ia_range_nr_sectors_show, }; static struct attribute *blk_ia_range_attrs[] = { &blk_ia_range_sector_entry.attr, &blk_ia_range_nr_sectors_entry.attr, NULL, }; ATTRIBUTE_GROUPS(blk_ia_range); static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct blk_ia_range_sysfs_entry *entry = container_of(attr, struct blk_ia_range_sysfs_entry, attr); struct blk_independent_access_range *iar = container_of(kobj, struct blk_independent_access_range, kobj); return entry->show(iar, buf); } static const struct sysfs_ops blk_ia_range_sysfs_ops = { .show = blk_ia_range_sysfs_show, }; /* * Independent access range entries are not freed individually, but alltogether * with struct blk_independent_access_ranges and its array of ranges. Since * kobject_add() takes a reference on the parent kobject contained in * struct blk_independent_access_ranges, the array of independent access range * entries cannot be freed until kobject_del() is called for all entries. * So we do not need to do anything here, but still need this no-op release * operation to avoid complaints from the kobject code. */ static void blk_ia_range_sysfs_nop_release(struct kobject *kobj) { } static const struct kobj_type blk_ia_range_ktype = { .sysfs_ops = &blk_ia_range_sysfs_ops, .default_groups = blk_ia_range_groups, .release = blk_ia_range_sysfs_nop_release, }; /* * This will be executed only after all independent access range entries are * removed with kobject_del(), at which point, it is safe to free everything, * including the array of ranges. */ static void blk_ia_ranges_sysfs_release(struct kobject *kobj) { struct blk_independent_access_ranges *iars = container_of(kobj, struct blk_independent_access_ranges, kobj); kfree(iars); } static const struct kobj_type blk_ia_ranges_ktype = { .release = blk_ia_ranges_sysfs_release, }; /** * disk_register_independent_access_ranges - register with sysfs a set of * independent access ranges * @disk: Target disk * * Register with sysfs a set of independent access ranges for @disk. */ int disk_register_independent_access_ranges(struct gendisk *disk) { struct blk_independent_access_ranges *iars = disk->ia_ranges; struct request_queue *q = disk->queue; int i, ret; lockdep_assert_held(&q->sysfs_lock); if (!iars) return 0; /* * At this point, iars is the new set of sector access ranges that needs * to be registered with sysfs. */ WARN_ON(iars->sysfs_registered); ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype, &disk->queue_kobj, "%s", "independent_access_ranges"); if (ret) { disk->ia_ranges = NULL; kobject_put(&iars->kobj); return ret; } for (i = 0; i < iars->nr_ia_ranges; i++) { ret = kobject_init_and_add(&iars->ia_range[i].kobj, &blk_ia_range_ktype, &iars->kobj, "%d", i); if (ret) { while (--i >= 0) kobject_del(&iars->ia_range[i].kobj); kobject_del(&iars->kobj); kobject_put(&iars->kobj); return ret; } } iars->sysfs_registered = true; return 0; } void disk_unregister_independent_access_ranges(struct gendisk *disk) { struct request_queue *q = disk->queue; struct blk_independent_access_ranges *iars = disk->ia_ranges; int i; lockdep_assert_held(&q->sysfs_lock); if (!iars) return; if (iars->sysfs_registered) { for (i = 0; i < iars->nr_ia_ranges; i++) kobject_del(&iars->ia_range[i].kobj); kobject_del(&iars->kobj); kobject_put(&iars->kobj); } else { kfree(iars); } disk->ia_ranges = NULL; } static struct blk_independent_access_range * disk_find_ia_range(struct blk_independent_access_ranges *iars, sector_t sector) { struct blk_independent_access_range *iar; int i; for (i = 0; i < iars->nr_ia_ranges; i++) { iar = &iars->ia_range[i]; if (sector >= iar->sector && sector < iar->sector + iar->nr_sectors) return iar; } return NULL; } static bool disk_check_ia_ranges(struct gendisk *disk, struct blk_independent_access_ranges *iars) { struct blk_independent_access_range *iar, *tmp; sector_t capacity = get_capacity(disk); sector_t sector = 0; int i; if (WARN_ON_ONCE(!iars->nr_ia_ranges)) return false; /* * While sorting the ranges in increasing LBA order, check that the * ranges do not overlap, that there are no sector holes and that all * sectors belong to one range. */ for (i = 0; i < iars->nr_ia_ranges; i++) { tmp = disk_find_ia_range(iars, sector); if (!tmp || tmp->sector != sector) { pr_warn("Invalid non-contiguous independent access ranges\n"); return false; } iar = &iars->ia_range[i]; if (tmp != iar) { swap(iar->sector, tmp->sector); swap(iar->nr_sectors, tmp->nr_sectors); } sector += iar->nr_sectors; } if (sector != capacity) { pr_warn("Independent access ranges do not match disk capacity\n"); return false; } return true; } static bool disk_ia_ranges_changed(struct gendisk *disk, struct blk_independent_access_ranges *new) { struct blk_independent_access_ranges *old = disk->ia_ranges; int i; if (!old) return true; if (old->nr_ia_ranges != new->nr_ia_ranges) return true; for (i = 0; i < old->nr_ia_ranges; i++) { if (new->ia_range[i].sector != old->ia_range[i].sector || new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors) return true; } return false; } /** * disk_alloc_independent_access_ranges - Allocate an independent access ranges * data structure * @disk: target disk * @nr_ia_ranges: Number of independent access ranges * * Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges * access range descriptors. */ struct blk_independent_access_ranges * disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges) { struct blk_independent_access_ranges *iars; iars = kzalloc_node(struct_size(iars, ia_range, nr_ia_ranges), GFP_KERNEL, disk->queue->node); if (iars) iars->nr_ia_ranges = nr_ia_ranges; return iars; } EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges); /** * disk_set_independent_access_ranges - Set a disk independent access ranges * @disk: target disk * @iars: independent access ranges structure * * Set the independent access ranges information of the request queue * of @disk to @iars. If @iars is NULL and the independent access ranges * structure already set is cleared. If there are no differences between * @iars and the independent access ranges structure already set, @iars * is freed. */ void disk_set_independent_access_ranges(struct gendisk *disk, struct blk_independent_access_ranges *iars) { struct request_queue *q = disk->queue; mutex_lock(&q->sysfs_lock); if (iars && !disk_check_ia_ranges(disk, iars)) { kfree(iars); iars = NULL; } if (iars && !disk_ia_ranges_changed(disk, iars)) { kfree(iars); goto unlock; } /* * This may be called for a registered queue. E.g. during a device * revalidation. If that is the case, we need to unregister the old * set of independent access ranges and register the new set. If the * queue is not registered, registration of the device request queue * will register the independent access ranges. */ disk_unregister_independent_access_ranges(disk); disk->ia_ranges = iars; if (blk_queue_registered(q)) disk_register_independent_access_ranges(disk); unlock: mutex_unlock(&q->sysfs_lock); } EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges); |
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 2 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Syntek STK1135 subdriver * * Copyright (c) 2013 Ondrej Zary * * Based on Syntekdriver (stk11xx) by Nicolas VIVIEN: * http://syntekdriver.sourceforge.net */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "stk1135" #include "gspca.h" #include "stk1135.h" MODULE_AUTHOR("Ondrej Zary"); MODULE_DESCRIPTION("Syntek STK1135 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 pkt_seq; u8 sensor_page; bool flip_status; u8 flip_debounce; struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; }; static const struct v4l2_pix_format stk1135_modes[] = { /* default mode (this driver supports variable resolution) */ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB}, }; /* -- read a register -- */ static u8 reg_r(struct gspca_dev *gspca_dev, u16 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return 0; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, index, gspca_dev->usb_buf, 1, 500); gspca_dbg(gspca_dev, D_USBI, "reg_r 0x%x=0x%02x\n", index, gspca_dev->usb_buf[0]); if (ret < 0) { pr_err("reg_r 0x%x err %d\n", index, ret); gspca_dev->usb_err = ret; return 0; } return gspca_dev->usb_buf[0]; } /* -- write a register -- */ static void reg_w(struct gspca_dev *gspca_dev, u16 index, u8 val) { int ret; struct usb_device *dev = gspca_dev->dev; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, index, NULL, 0, 500); gspca_dbg(gspca_dev, D_USBO, "reg_w 0x%x:=0x%02x\n", index, val); if (ret < 0) { pr_err("reg_w 0x%x err %d\n", index, ret); gspca_dev->usb_err = ret; } } static void reg_w_mask(struct gspca_dev *gspca_dev, u16 index, u8 val, u8 mask) { val = (reg_r(gspca_dev, index) & ~mask) | (val & mask); reg_w(gspca_dev, index, val); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { gspca_dev->cam.cam_mode = stk1135_modes; gspca_dev->cam.nmodes = ARRAY_SIZE(stk1135_modes); return 0; } static int stk1135_serial_wait_ready(struct gspca_dev *gspca_dev) { int i = 0; u8 val; do { val = reg_r(gspca_dev, STK1135_REG_SICTL + 1); if (i++ > 500) { /* maximum retry count */ pr_err("serial bus timeout: status=0x%02x\n", val); return -1; } /* repeat if BUSY or WRITE/READ not finished */ } while ((val & 0x10) || !(val & 0x05)); return 0; } static u8 sensor_read_8(struct gspca_dev *gspca_dev, u8 addr) { reg_w(gspca_dev, STK1135_REG_SBUSR, addr); /* begin read */ reg_w(gspca_dev, STK1135_REG_SICTL, 0x20); /* wait until finished */ if (stk1135_serial_wait_ready(gspca_dev)) { pr_err("Sensor read failed\n"); return 0; } return reg_r(gspca_dev, STK1135_REG_SBUSR + 1); } static u16 sensor_read_16(struct gspca_dev *gspca_dev, u8 addr) { return (sensor_read_8(gspca_dev, addr) << 8) | sensor_read_8(gspca_dev, 0xf1); } static void sensor_write_8(struct gspca_dev *gspca_dev, u8 addr, u8 data) { /* load address and data registers */ reg_w(gspca_dev, STK1135_REG_SBUSW, addr); reg_w(gspca_dev, STK1135_REG_SBUSW + 1, data); /* begin write */ reg_w(gspca_dev, STK1135_REG_SICTL, 0x01); /* wait until finished */ if (stk1135_serial_wait_ready(gspca_dev)) { pr_err("Sensor write failed\n"); return; } } static void sensor_write_16(struct gspca_dev *gspca_dev, u8 addr, u16 data) { sensor_write_8(gspca_dev, addr, data >> 8); sensor_write_8(gspca_dev, 0xf1, data & 0xff); } static void sensor_set_page(struct gspca_dev *gspca_dev, u8 page) { struct sd *sd = (struct sd *) gspca_dev; if (page != sd->sensor_page) { sensor_write_16(gspca_dev, 0xf0, page); sd->sensor_page = page; } } static u16 sensor_read(struct gspca_dev *gspca_dev, u16 reg) { sensor_set_page(gspca_dev, reg >> 8); return sensor_read_16(gspca_dev, reg & 0xff); } static void sensor_write(struct gspca_dev *gspca_dev, u16 reg, u16 val) { sensor_set_page(gspca_dev, reg >> 8); sensor_write_16(gspca_dev, reg & 0xff, val); } static void sensor_write_mask(struct gspca_dev *gspca_dev, u16 reg, u16 val, u16 mask) { val = (sensor_read(gspca_dev, reg) & ~mask) | (val & mask); sensor_write(gspca_dev, reg, val); } struct sensor_val { u16 reg; u16 val; }; /* configure MT9M112 sensor */ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev) { static const struct sensor_val cfg[] = { /* restart&reset, chip enable, reserved */ { 0x00d, 0x000b }, { 0x00d, 0x0008 }, { 0x035, 0x0022 }, /* mode ctl: AWB on, AE both, clip aper corr, defect corr, AE */ { 0x106, 0x700e }, { 0x2dd, 0x18e0 }, /* B-R thresholds, */ /* AWB */ { 0x21f, 0x0180 }, /* Cb and Cr limits */ { 0x220, 0xc814 }, { 0x221, 0x8080 }, /* lum limits, RGB gain */ { 0x222, 0xa078 }, { 0x223, 0xa078 }, /* R, B limit */ { 0x224, 0x5f20 }, { 0x228, 0xea02 }, /* mtx adj lim, adv ctl */ { 0x229, 0x867a }, /* wide gates */ /* Color correction */ /* imager gains base, delta, delta signs */ { 0x25e, 0x594c }, { 0x25f, 0x4d51 }, { 0x260, 0x0002 }, /* AWB adv ctl 2, gain offs */ { 0x2ef, 0x0008 }, { 0x2f2, 0x0000 }, /* base matrix signs, scale K1-5, K6-9 */ { 0x202, 0x00ee }, { 0x203, 0x3923 }, { 0x204, 0x0724 }, /* base matrix coef */ { 0x209, 0x00cd }, { 0x20a, 0x0093 }, { 0x20b, 0x0004 },/*K1-3*/ { 0x20c, 0x005c }, { 0x20d, 0x00d9 }, { 0x20e, 0x0053 },/*K4-6*/ { 0x20f, 0x0008 }, { 0x210, 0x0091 }, { 0x211, 0x00cf },/*K7-9*/ { 0x215, 0x0000 }, /* delta mtx signs */ /* delta matrix coef */ { 0x216, 0x0000 }, { 0x217, 0x0000 }, { 0x218, 0x0000 },/*D1-3*/ { 0x219, 0x0000 }, { 0x21a, 0x0000 }, { 0x21b, 0x0000 },/*D4-6*/ { 0x21c, 0x0000 }, { 0x21d, 0x0000 }, { 0x21e, 0x0000 },/*D7-9*/ /* enable & disable manual WB to apply color corr. settings */ { 0x106, 0xf00e }, { 0x106, 0x700e }, /* Lens shading correction */ { 0x180, 0x0007 }, /* control */ /* vertical knee 0, 2+1, 4+3 */ { 0x181, 0xde13 }, { 0x182, 0xebe2 }, { 0x183, 0x00f6 }, /* R */ { 0x184, 0xe114 }, { 0x185, 0xeadd }, { 0x186, 0xfdf6 }, /* G */ { 0x187, 0xe511 }, { 0x188, 0xede6 }, { 0x189, 0xfbf7 }, /* B */ /* horizontal knee 0, 2+1, 4+3, 5 */ { 0x18a, 0xd613 }, { 0x18b, 0xedec }, /* R .. */ { 0x18c, 0xf9f2 }, { 0x18d, 0x0000 }, /* .. R */ { 0x18e, 0xd815 }, { 0x18f, 0xe9ea }, /* G .. */ { 0x190, 0xf9f1 }, { 0x191, 0x0002 }, /* .. G */ { 0x192, 0xde10 }, { 0x193, 0xefef }, /* B .. */ { 0x194, 0xfbf4 }, { 0x195, 0x0002 }, /* .. B */ /* vertical knee 6+5, 8+7 */ { 0x1b6, 0x0e06 }, { 0x1b7, 0x2713 }, /* R */ { 0x1b8, 0x1106 }, { 0x1b9, 0x2713 }, /* G */ { 0x1ba, 0x0c03 }, { 0x1bb, 0x2a0f }, /* B */ /* horizontal knee 7+6, 9+8, 10 */ { 0x1bc, 0x1208 }, { 0x1bd, 0x1a16 }, { 0x1be, 0x0022 }, /* R */ { 0x1bf, 0x150a }, { 0x1c0, 0x1c1a }, { 0x1c1, 0x002d }, /* G */ { 0x1c2, 0x1109 }, { 0x1c3, 0x1414 }, { 0x1c4, 0x002a }, /* B */ { 0x106, 0x740e }, /* enable lens shading correction */ /* Gamma correction - context A */ { 0x153, 0x0b03 }, { 0x154, 0x4722 }, { 0x155, 0xac82 }, { 0x156, 0xdac7 }, { 0x157, 0xf5e9 }, { 0x158, 0xff00 }, /* Gamma correction - context B */ { 0x1dc, 0x0b03 }, { 0x1dd, 0x4722 }, { 0x1de, 0xac82 }, { 0x1df, 0xdac7 }, { 0x1e0, 0xf5e9 }, { 0x1e1, 0xff00 }, /* output format: RGB, invert output pixclock, output bayer */ { 0x13a, 0x4300 }, { 0x19b, 0x4300 }, /* for context A, B */ { 0x108, 0x0180 }, /* format control - enable bayer row flip */ { 0x22f, 0xd100 }, { 0x29c, 0xd100 }, /* AE A, B */ /* default prg conf, prg ctl - by 0x2d2, prg advance - PA1 */ { 0x2d2, 0x0000 }, { 0x2cc, 0x0004 }, { 0x2cb, 0x0001 }, { 0x22e, 0x0c3c }, { 0x267, 0x1010 }, /* AE tgt ctl, gain lim */ /* PLL */ { 0x065, 0xa000 }, /* clk ctl - enable PLL (clear bit 14) */ { 0x066, 0x2003 }, { 0x067, 0x0501 }, /* PLL M=128, N=3, P=1 */ { 0x065, 0x2000 }, /* disable PLL bypass (clear bit 15) */ { 0x005, 0x01b8 }, { 0x007, 0x00d8 }, /* horiz blanking B, A */ /* AE line size, shutter delay limit */ { 0x239, 0x06c0 }, { 0x23b, 0x040e }, /* for context A */ { 0x23a, 0x06c0 }, { 0x23c, 0x0564 }, /* for context B */ /* shutter width basis 60Hz, 50Hz */ { 0x257, 0x0208 }, { 0x258, 0x0271 }, /* for context A */ { 0x259, 0x0209 }, { 0x25a, 0x0271 }, /* for context B */ { 0x25c, 0x120d }, { 0x25d, 0x1712 }, /* flicker 60Hz, 50Hz */ { 0x264, 0x5e1c }, /* reserved */ /* flicker, AE gain limits, gain zone limits */ { 0x25b, 0x0003 }, { 0x236, 0x7810 }, { 0x237, 0x8304 }, { 0x008, 0x0021 }, /* vert blanking A */ }; int i; u16 width, height; for (i = 0; i < ARRAY_SIZE(cfg); i++) sensor_write(gspca_dev, cfg[i].reg, cfg[i].val); /* set output size */ width = gspca_dev->pixfmt.width; height = gspca_dev->pixfmt.height; if (width <= 640 && height <= 512) { /* context A (half readout speed)*/ sensor_write(gspca_dev, 0x1a7, width); sensor_write(gspca_dev, 0x1aa, height); /* set read mode context A */ sensor_write(gspca_dev, 0x0c8, 0x0000); /* set resize, read mode, vblank, hblank context A */ sensor_write(gspca_dev, 0x2c8, 0x0000); } else { /* context B (full readout speed) */ sensor_write(gspca_dev, 0x1a1, width); sensor_write(gspca_dev, 0x1a4, height); /* set read mode context B */ sensor_write(gspca_dev, 0x0c8, 0x0008); /* set resize, read mode, vblank, hblank context B */ sensor_write(gspca_dev, 0x2c8, 0x040b); } } static void stk1135_configure_clock(struct gspca_dev *gspca_dev) { /* configure SCLKOUT */ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x12); /* set 1 clock per pixel */ /* and positive edge clocked pulse high when pixel counter = 0 */ reg_w(gspca_dev, STK1135_REG_TCP1 + 0, 0x41); reg_w(gspca_dev, STK1135_REG_TCP1 + 1, 0x00); reg_w(gspca_dev, STK1135_REG_TCP1 + 2, 0x00); reg_w(gspca_dev, STK1135_REG_TCP1 + 3, 0x00); /* enable CLKOUT for sensor */ reg_w(gspca_dev, STK1135_REG_SENSO + 0, 0x10); /* disable STOP clock */ reg_w(gspca_dev, STK1135_REG_SENSO + 1, 0x00); /* set lower 8 bits of PLL feedback divider */ reg_w(gspca_dev, STK1135_REG_SENSO + 3, 0x07); /* set other PLL parameters */ reg_w(gspca_dev, STK1135_REG_PLLFD, 0x06); /* enable timing generator */ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x80); /* enable PLL */ reg_w(gspca_dev, STK1135_REG_SENSO + 2, 0x04); /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); /* wait a while for sensor to catch up */ udelay(1000); } static void stk1135_camera_disable(struct gspca_dev *gspca_dev) { /* set capture end Y position to 0 */ reg_w(gspca_dev, STK1135_REG_CIEPO + 2, 0x00); reg_w(gspca_dev, STK1135_REG_CIEPO + 3, 0x00); /* disable capture */ reg_w_mask(gspca_dev, STK1135_REG_SCTRL, 0x00, 0x80); /* enable sensor standby and diasble chip enable */ sensor_write_mask(gspca_dev, 0x00d, 0x0004, 0x000c); /* disable PLL */ reg_w_mask(gspca_dev, STK1135_REG_SENSO + 2, 0x00, 0x01); /* disable timing generator */ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x00); /* enable STOP clock */ reg_w(gspca_dev, STK1135_REG_SENSO + 1, 0x20); /* disable CLKOUT for sensor */ reg_w(gspca_dev, STK1135_REG_SENSO, 0x00); /* disable sensor (GPIO5) and enable GPIO0,3,6 (?) - sensor standby? */ reg_w(gspca_dev, STK1135_REG_GCTRL, 0x49); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u16 sensor_id; char *sensor_name; struct sd *sd = (struct sd *) gspca_dev; /* set GPIO3,4,5,6 direction to output */ reg_w(gspca_dev, STK1135_REG_GCTRL + 2, 0x78); /* enable sensor (GPIO5) */ reg_w(gspca_dev, STK1135_REG_GCTRL, (1 << 5)); /* disable ROM interface */ reg_w(gspca_dev, STK1135_REG_GCTRL + 3, 0x80); /* enable interrupts from GPIO8 (flip sensor) and GPIO9 (???) */ reg_w(gspca_dev, STK1135_REG_ICTRL + 1, 0x00); reg_w(gspca_dev, STK1135_REG_ICTRL + 3, 0x03); /* enable remote wakeup from GPIO9 (???) */ reg_w(gspca_dev, STK1135_REG_RMCTL + 1, 0x00); reg_w(gspca_dev, STK1135_REG_RMCTL + 3, 0x02); /* reset serial interface */ reg_w(gspca_dev, STK1135_REG_SICTL, 0x80); reg_w(gspca_dev, STK1135_REG_SICTL, 0x00); /* set sensor address */ reg_w(gspca_dev, STK1135_REG_SICTL + 3, 0xba); /* disable alt 2-wire serial interface */ reg_w(gspca_dev, STK1135_REG_ASIC + 3, 0x00); stk1135_configure_clock(gspca_dev); /* read sensor ID */ sd->sensor_page = 0xff; sensor_id = sensor_read(gspca_dev, 0x000); switch (sensor_id) { case 0x148c: sensor_name = "MT9M112"; break; default: sensor_name = "unknown"; } pr_info("Detected sensor type %s (0x%x)\n", sensor_name, sensor_id); stk1135_camera_disable(gspca_dev); return gspca_dev->usb_err; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 width, height; /* enable sensor (GPIO5) */ reg_w(gspca_dev, STK1135_REG_GCTRL, (1 << 5)); stk1135_configure_clock(gspca_dev); /* set capture start position X = 0, Y = 0 */ reg_w(gspca_dev, STK1135_REG_CISPO + 0, 0x00); reg_w(gspca_dev, STK1135_REG_CISPO + 1, 0x00); reg_w(gspca_dev, STK1135_REG_CISPO + 2, 0x00); reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00); /* set capture end position */ width = gspca_dev->pixfmt.width; height = gspca_dev->pixfmt.height; reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff); reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8); reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff); reg_w(gspca_dev, STK1135_REG_CIEPO + 3, height >> 8); /* set 8-bit mode */ reg_w(gspca_dev, STK1135_REG_SCTRL, 0x20); stk1135_configure_mt9m112(gspca_dev); /* enable capture */ reg_w_mask(gspca_dev, STK1135_REG_SCTRL, 0x80, 0x80); if (gspca_dev->usb_err >= 0) gspca_dbg(gspca_dev, D_STREAM, "camera started alt: 0x%02x\n", gspca_dev->alt); sd->pkt_seq = 0; return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct usb_device *dev = gspca_dev->dev; usb_set_interface(dev, gspca_dev->iface, 0); stk1135_camera_disable(gspca_dev); gspca_dbg(gspca_dev, D_STREAM, "camera stopped\n"); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int skip = sizeof(struct stk1135_pkt_header); bool flip; enum gspca_packet_type pkt_type = INTER_PACKET; struct stk1135_pkt_header *hdr = (void *)data; u8 seq; if (len < 4) { gspca_dbg(gspca_dev, D_PACK, "received short packet (less than 4 bytes)\n"); return; } /* GPIO 8 is flip sensor (1 = normal position, 0 = flipped to back) */ flip = !(le16_to_cpu(hdr->gpio) & (1 << 8)); /* it's a switch, needs software debounce */ if (sd->flip_status != flip) sd->flip_debounce++; else sd->flip_debounce = 0; /* check sequence number (not present in new frame packets) */ if (!(hdr->flags & STK1135_HDR_FRAME_START)) { seq = hdr->seq & STK1135_HDR_SEQ_MASK; if (seq != sd->pkt_seq) { gspca_dbg(gspca_dev, D_PACK, "received out-of-sequence packet\n"); /* resync sequence and discard packet */ sd->pkt_seq = seq; gspca_dev->last_packet_type = DISCARD_PACKET; return; } } sd->pkt_seq++; if (sd->pkt_seq > STK1135_HDR_SEQ_MASK) sd->pkt_seq = 0; if (len == sizeof(struct stk1135_pkt_header)) return; if (hdr->flags & STK1135_HDR_FRAME_START) { /* new frame */ skip = 8; /* the header is longer */ gspca_frame_add(gspca_dev, LAST_PACKET, data, 0); pkt_type = FIRST_PACKET; } gspca_frame_add(gspca_dev, pkt_type, data + skip, len - skip); } static void sethflip(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->flip_status) val = !val; sensor_write_mask(gspca_dev, 0x020, val ? 0x0002 : 0x0000 , 0x0002); } static void setvflip(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->flip_status) val = !val; sensor_write_mask(gspca_dev, 0x020, val ? 0x0001 : 0x0000 , 0x0001); } static void stk1135_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->flip_debounce > 100) { sd->flip_status = !sd->flip_status; sethflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip)); setvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->vflip)); } } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_HFLIP: sethflip(gspca_dev, ctrl->val); break; case V4L2_CID_VFLIP: setvflip(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 2); sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } static void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt) { fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U); fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U); /* round up to even numbers */ fmt->fmt.pix.width += (fmt->fmt.pix.width & 1); fmt->fmt.pix.height += (fmt->fmt.pix.height & 1); fmt->fmt.pix.bytesperline = fmt->fmt.pix.width; fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height; } static int stk1135_enum_framesizes(struct gspca_dev *gspca_dev, struct v4l2_frmsizeenum *fsize) { if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = 32; fsize->stepwise.min_height = 32; fsize->stepwise.max_width = 1280; fsize->stepwise.max_height = 1024; fsize->stepwise.step_width = 2; fsize->stepwise.step_height = 2; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = stk1135_dq_callback, .try_fmt = stk1135_try_fmt, .enum_framesizes = stk1135_enum_framesizes, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x174f, 0x6a31)}, /* ASUS laptop, MT9M112 sensor */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
2 777 604 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UTSNAME_H #define _LINUX_UTSNAME_H #include <linux/sched.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/err.h> #include <uapi/linux/utsname.h> enum uts_proc { UTS_PROC_ARCH, UTS_PROC_OSTYPE, UTS_PROC_OSRELEASE, UTS_PROC_VERSION, UTS_PROC_HOSTNAME, UTS_PROC_DOMAINNAME, }; struct user_namespace; extern struct user_namespace init_user_ns; struct uts_namespace { struct new_utsname name; struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; } __randomize_layout; extern struct uts_namespace init_uts_ns; #ifdef CONFIG_UTS_NS static inline void get_uts_ns(struct uts_namespace *ns) { refcount_inc(&ns->ns.count); } extern struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns); extern void free_uts_ns(struct uts_namespace *ns); static inline void put_uts_ns(struct uts_namespace *ns) { if (refcount_dec_and_test(&ns->ns.count)) free_uts_ns(ns); } void uts_ns_init(void); #else static inline void get_uts_ns(struct uts_namespace *ns) { } static inline void put_uts_ns(struct uts_namespace *ns) { } static inline struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns) { if (flags & CLONE_NEWUTS) return ERR_PTR(-EINVAL); return old_ns; } static inline void uts_ns_init(void) { } #endif #ifdef CONFIG_PROC_SYSCTL extern void uts_proc_notify(enum uts_proc proc); #else static inline void uts_proc_notify(enum uts_proc proc) { } #endif static inline struct new_utsname *utsname(void) { return ¤t->nsproxy->uts_ns->name; } static inline struct new_utsname *init_utsname(void) { return &init_uts_ns.name; } extern struct rw_semaphore uts_sem; #endif /* _LINUX_UTSNAME_H */ |
1 6 1 6 6 1 6 4 9 2 9 1 11 9 5 9 9 4 4 4 4 4 2 4 4 4 1 8 8 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 | // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright Red Hat Inc. 2017 * * This file is part of the SCTP kernel implementation * * These functions manipulate sctp stream queue/scheduling. * * Please send any bug reports or fixes you make to the * email addresched(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> */ #include <linux/list.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> /* Priority handling * RFC DRAFT ndata section 3.2 */ static void sctp_sched_rr_unsched_all(struct sctp_stream *stream); static void sctp_sched_rr_next_stream(struct sctp_stream *stream) { struct list_head *pos; pos = stream->rr_next->rr_list.next; if (pos == &stream->rr_list) pos = pos->next; stream->rr_next = list_entry(pos, struct sctp_stream_out_ext, rr_list); } static void sctp_sched_rr_unsched(struct sctp_stream *stream, struct sctp_stream_out_ext *soute) { if (stream->rr_next == soute) /* Try to move to the next stream */ sctp_sched_rr_next_stream(stream); list_del_init(&soute->rr_list); /* If we have no other stream queued, clear next */ if (list_empty(&stream->rr_list)) stream->rr_next = NULL; } static void sctp_sched_rr_sched(struct sctp_stream *stream, struct sctp_stream_out_ext *soute) { if (!list_empty(&soute->rr_list)) /* Already scheduled. */ return; /* Schedule the stream */ list_add_tail(&soute->rr_list, &stream->rr_list); if (!stream->rr_next) stream->rr_next = soute; } static int sctp_sched_rr_set(struct sctp_stream *stream, __u16 sid, __u16 prio, gfp_t gfp) { return 0; } static int sctp_sched_rr_get(struct sctp_stream *stream, __u16 sid, __u16 *value) { return 0; } static int sctp_sched_rr_init(struct sctp_stream *stream) { INIT_LIST_HEAD(&stream->rr_list); stream->rr_next = NULL; return 0; } static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->rr_list); return 0; } static void sctp_sched_rr_free_sid(struct sctp_stream *stream, __u16 sid) { } static void sctp_sched_rr_enqueue(struct sctp_outq *q, struct sctp_datamsg *msg) { struct sctp_stream *stream; struct sctp_chunk *ch; __u16 sid; ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list); sid = sctp_chunk_stream_no(ch); stream = &q->asoc->stream; sctp_sched_rr_sched(stream, SCTP_SO(stream, sid)->ext); } static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q) { struct sctp_stream *stream = &q->asoc->stream; struct sctp_stream_out_ext *soute; struct sctp_chunk *ch = NULL; /* Bail out quickly if queue is empty */ if (list_empty(&q->out_chunk_list)) goto out; /* Find which chunk is next */ if (stream->out_curr) soute = stream->out_curr->ext; else soute = stream->rr_next; ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list); sctp_sched_dequeue_common(q, ch); out: return ch; } static void sctp_sched_rr_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch) { struct sctp_stream_out_ext *soute; __u16 sid; /* Last chunk on that msg, move to the next stream */ sid = sctp_chunk_stream_no(ch); soute = SCTP_SO(&q->asoc->stream, sid)->ext; sctp_sched_rr_next_stream(&q->asoc->stream); if (list_empty(&soute->outq)) sctp_sched_rr_unsched(&q->asoc->stream, soute); } static void sctp_sched_rr_sched_all(struct sctp_stream *stream) { struct sctp_association *asoc; struct sctp_stream_out_ext *soute; struct sctp_chunk *ch; asoc = container_of(stream, struct sctp_association, stream); list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) { __u16 sid; sid = sctp_chunk_stream_no(ch); soute = SCTP_SO(stream, sid)->ext; if (soute) sctp_sched_rr_sched(stream, soute); } } static void sctp_sched_rr_unsched_all(struct sctp_stream *stream) { struct sctp_stream_out_ext *soute, *tmp; list_for_each_entry_safe(soute, tmp, &stream->rr_list, rr_list) sctp_sched_rr_unsched(stream, soute); } static struct sctp_sched_ops sctp_sched_rr = { .set = sctp_sched_rr_set, .get = sctp_sched_rr_get, .init = sctp_sched_rr_init, .init_sid = sctp_sched_rr_init_sid, .free_sid = sctp_sched_rr_free_sid, .enqueue = sctp_sched_rr_enqueue, .dequeue = sctp_sched_rr_dequeue, .dequeue_done = sctp_sched_rr_dequeue_done, .sched_all = sctp_sched_rr_sched_all, .unsched_all = sctp_sched_rr_unsched_all, }; void sctp_sched_ops_rr_init(void) { sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr); } |
4883 4880 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | // SPDX-License-Identifier: GPL-2.0 /* * Device physical location support * * Author: Won Chung <wonchung@google.com> */ #include <linux/acpi.h> #include <linux/sysfs.h> #include <linux/string_choices.h> #include "physical_location.h" bool dev_add_physical_location(struct device *dev) { struct acpi_pld_info *pld; if (!has_acpi_companion(dev)) return false; if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld)) return false; dev->physical_location = kzalloc(sizeof(*dev->physical_location), GFP_KERNEL); if (!dev->physical_location) { ACPI_FREE(pld); return false; } dev->physical_location->panel = pld->panel; dev->physical_location->vertical_position = pld->vertical_position; dev->physical_location->horizontal_position = pld->horizontal_position; dev->physical_location->dock = pld->dock; dev->physical_location->lid = pld->lid; ACPI_FREE(pld); return true; } static ssize_t panel_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *panel; switch (dev->physical_location->panel) { case DEVICE_PANEL_TOP: panel = "top"; break; case DEVICE_PANEL_BOTTOM: panel = "bottom"; break; case DEVICE_PANEL_LEFT: panel = "left"; break; case DEVICE_PANEL_RIGHT: panel = "right"; break; case DEVICE_PANEL_FRONT: panel = "front"; break; case DEVICE_PANEL_BACK: panel = "back"; break; default: panel = "unknown"; } return sysfs_emit(buf, "%s\n", panel); } static DEVICE_ATTR_RO(panel); static ssize_t vertical_position_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *vertical_position; switch (dev->physical_location->vertical_position) { case DEVICE_VERT_POS_UPPER: vertical_position = "upper"; break; case DEVICE_VERT_POS_CENTER: vertical_position = "center"; break; case DEVICE_VERT_POS_LOWER: vertical_position = "lower"; break; default: vertical_position = "unknown"; } return sysfs_emit(buf, "%s\n", vertical_position); } static DEVICE_ATTR_RO(vertical_position); static ssize_t horizontal_position_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *horizontal_position; switch (dev->physical_location->horizontal_position) { case DEVICE_HORI_POS_LEFT: horizontal_position = "left"; break; case DEVICE_HORI_POS_CENTER: horizontal_position = "center"; break; case DEVICE_HORI_POS_RIGHT: horizontal_position = "right"; break; default: horizontal_position = "unknown"; } return sysfs_emit(buf, "%s\n", horizontal_position); } static DEVICE_ATTR_RO(horizontal_position); static ssize_t dock_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", str_yes_no(dev->physical_location->dock)); } static DEVICE_ATTR_RO(dock); static ssize_t lid_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", str_yes_no(dev->physical_location->lid)); } static DEVICE_ATTR_RO(lid); static struct attribute *dev_attr_physical_location[] = { &dev_attr_panel.attr, &dev_attr_vertical_position.attr, &dev_attr_horizontal_position.attr, &dev_attr_dock.attr, &dev_attr_lid.attr, NULL, }; const struct attribute_group dev_attr_physical_location_group = { .name = "physical_location", .attrs = dev_attr_physical_location, }; |
62 664 475 153 176 2 37 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Cryptographic API. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> */ #ifndef _CRYPTO_INTERNAL_H #define _CRYPTO_INTERNAL_H #include <crypto/algapi.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/jump_label.h> #include <linux/list.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/numa.h> #include <linux/refcount.h> #include <linux/rwsem.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/types.h> struct crypto_instance; struct crypto_template; struct crypto_larval { struct crypto_alg alg; struct crypto_alg *adult; struct completion completion; u32 mask; bool test_started; }; struct crypto_type { unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); unsigned int (*extsize)(struct crypto_alg *alg); int (*init_tfm)(struct crypto_tfm *tfm); void (*show)(struct seq_file *m, struct crypto_alg *alg); int (*report)(struct sk_buff *skb, struct crypto_alg *alg); void (*free)(struct crypto_instance *inst); void (*destroy)(struct crypto_alg *alg); unsigned int type; unsigned int maskclear; unsigned int maskset; unsigned int tfmsize; }; enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, CRYPTOA_TYPE, __CRYPTOA_MAX, }; #define CRYPTOA_MAX (__CRYPTOA_MAX - 1) /* Maximum number of (rtattr) parameters for each template. */ #define CRYPTO_MAX_ATTRS 32 extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; extern struct blocking_notifier_head crypto_chain; int alg_test(const char *driver, const char *alg, u32 type, u32 mask); #if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || \ IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) static inline bool crypto_boot_test_finished(void) { return true; } static inline void set_crypto_boot_test_finished(void) { } #else DECLARE_STATIC_KEY_FALSE(__crypto_boot_test_finished); static inline bool crypto_boot_test_finished(void) { return static_branch_likely(&__crypto_boot_test_finished); } static inline void set_crypto_boot_test_finished(void) { static_branch_enable(&__crypto_boot_test_finished); } #endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || * IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) */ #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); void __exit crypto_exit_proc(void); #else static inline void crypto_init_proc(void) { } static inline void crypto_exit_proc(void) { } #endif static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) { return alg->cra_ctxsize; } static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg) { return alg->cra_ctxsize; } struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); void crypto_schedule_test(struct crypto_larval *larval); void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, u32 mask, gfp_t gfp); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm_node(struct crypto_alg *alg, const struct crypto_type *frontend, int node); void *crypto_clone_tfm(const struct crypto_type *frontend, struct crypto_tfm *otfm); void crypto_destroy_alg(struct crypto_alg *alg); static inline void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) { return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE); } struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); void *crypto_alloc_tfm_node(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask, int node); static inline void *crypto_alloc_tfm(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) { return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE); } int crypto_probing_notify(unsigned long val, void *v); unsigned int crypto_alg_extsize(struct crypto_alg *alg); int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, u32 type, u32 mask); static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) { refcount_inc(&alg->cra_refcnt); return alg; } static inline void crypto_alg_put(struct crypto_alg *alg) { if (refcount_dec_and_test(&alg->cra_refcnt)) crypto_destroy_alg(alg); } static inline int crypto_tmpl_get(struct crypto_template *tmpl) { return try_module_get(tmpl->module); } static inline void crypto_tmpl_put(struct crypto_template *tmpl) { module_put(tmpl->module); } static inline int crypto_is_larval(struct crypto_alg *alg) { return alg->cra_flags & CRYPTO_ALG_LARVAL; } static inline int crypto_is_dead(struct crypto_alg *alg) { return alg->cra_flags & CRYPTO_ALG_DEAD; } static inline int crypto_is_moribund(struct crypto_alg *alg) { return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING); } static inline void crypto_notify(unsigned long val, void *v) { blocking_notifier_call_chain(&crypto_chain, val, v); } static inline void crypto_yield(u32 flags) { if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) cond_resched(); } static inline int crypto_is_test_larval(struct crypto_larval *larval) { return larval->alg.cra_driver_name[0]; } static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm) { return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW); } #endif /* _CRYPTO_INTERNAL_H */ |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | /* SPDX-License-Identifier: GPL-2.0 */ /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation */ #include <linux/unaligned.h> void eir_create(struct hci_dev *hdev, u8 *data); u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, u8 data_len); static inline u16 eir_precalc_len(u8 data_len) { return sizeof(u8) * 2 + data_len; } static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len) { eir[eir_len++] = sizeof(type) + data_len; eir[eir_len++] = type; memcpy(&eir[eir_len], data, data_len); eir_len += data_len; return eir_len; } static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) { eir[eir_len++] = sizeof(type) + sizeof(data); eir[eir_len++] = type; put_unaligned_le16(data, &eir[eir_len]); eir_len += sizeof(data); return eir_len; } static inline u16 eir_skb_put_data(struct sk_buff *skb, u8 type, u8 *data, u8 data_len) { u8 *eir; u16 eir_len; eir_len = eir_precalc_len(data_len); eir = skb_put(skb, eir_len); WARN_ON(sizeof(type) + data_len > U8_MAX); eir[0] = sizeof(type) + data_len; eir[1] = type; memcpy(&eir[2], data, data_len); return eir_len; } static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, size_t *data_len) { size_t parsed = 0; if (eir_len < 2) return NULL; while (parsed < eir_len - 1) { u8 field_len = eir[0]; if (field_len == 0) break; parsed += field_len + 1; if (parsed > eir_len) break; if (eir[1] != type) { eir += field_len + 1; continue; } /* Zero length data */ if (field_len == 1) return NULL; if (data_len) *data_len = field_len - 1; return &eir[2]; } return NULL; } void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len); |
2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 | // SPDX-License-Identifier: GPL-2.0-or-later /* * MPLS GSO Support * * Authors: Simon Horman (horms@verge.net.au) * * Based on: GSO portions of net/ipv4/gre.c */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/module.h> #include <linux/netdev_features.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/gso.h> #include <net/mpls.h> static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; netdev_features_t mpls_features; u16 mac_len = skb->mac_len; __be16 mpls_protocol; unsigned int mpls_hlen; if (!skb_inner_network_header_was_set(skb)) goto out; skb_reset_network_header(skb); mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb); if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN)) goto out; if (unlikely(!pskb_may_pull(skb, mpls_hlen))) goto out; /* Setup inner SKB. */ mpls_protocol = skb->protocol; skb->protocol = skb->inner_protocol; __skb_pull(skb, mpls_hlen); skb->mac_len = 0; skb_reset_mac_header(skb); /* Segment inner packet. */ mpls_features = skb->dev->mpls_features & features; segs = skb_mac_gso_segment(skb, mpls_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, mpls_protocol, mpls_hlen, mac_offset, mac_len); goto out; } skb = segs; mpls_hlen += mac_len; do { skb->mac_len = mac_len; skb->protocol = mpls_protocol; skb_reset_inner_network_header(skb); __skb_push(skb, mpls_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); } while ((skb = skb->next)); out: return segs; } static struct packet_offload mpls_mc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_MC), .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, }; static struct packet_offload mpls_uc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_UC), .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, }; static int __init mpls_gso_init(void) { pr_info("MPLS GSO support\n"); dev_add_offload(&mpls_uc_offload); dev_add_offload(&mpls_mc_offload); return 0; } static void __exit mpls_gso_exit(void) { dev_remove_offload(&mpls_uc_offload); dev_remove_offload(&mpls_mc_offload); } module_init(mpls_gso_init); module_exit(mpls_gso_exit); MODULE_DESCRIPTION("MPLS GSO support"); MODULE_AUTHOR("Simon Horman <horms@verge.net.au>"); MODULE_LICENSE("GPL"); |
14 14 13 1 5 1 1 1 1 1 1 1 2 14 14 9 5 7 5 1 1 1 1 5 7 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some logitech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2010 Hendrik Iben */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/usb.h> #include <linux/wait.h> #include "usbhid/usbhid.h" #include "hid-ids.h" #include "hid-lg.h" #include "hid-lg4ff.h" #define LG_RDESC 0x001 #define LG_BAD_RELATIVE_KEYS 0x002 #define LG_DUPLICATE_USAGES 0x004 #define LG_EXPANDED_KEYMAP 0x010 #define LG_IGNORE_DOUBLED_WHEEL 0x020 #define LG_WIRELESS 0x040 #define LG_INVERT_HWHEEL 0x080 #define LG_NOGET 0x100 #define LG_FF 0x200 #define LG_FF2 0x400 #define LG_RDESC_REL_ABS 0x800 #define LG_FF3 0x1000 #define LG_FF4 0x2000 /* Size of the original descriptors of the Driving Force (and Pro) wheels */ #define DF_RDESC_ORIG_SIZE 130 #define DFP_RDESC_ORIG_SIZE 97 #define FV_RDESC_ORIG_SIZE 130 #define MOMO_RDESC_ORIG_SIZE 87 #define MOMO2_RDESC_ORIG_SIZE 87 #define FFG_RDESC_ORIG_SIZE 85 #define FG_RDESC_ORIG_SIZE 82 /* Fixed report descriptors for Logitech Driving Force (and Pro) * wheel controllers * * The original descriptors hide the separate throttle and brake axes in * a custom vendor usage page, providing only a combined value as * GenericDesktop.Y. * These descriptors remove the combined Y axis and instead report * separate throttle (Y) and brake (RZ). */ static const __u8 df_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x14, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x34, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage (Buttons), */ 0x19, 0x01, /* Usage Minimum (1), */ 0x29, 0x0c, /* Usage Maximum (12), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */ 0x09, 0x01, /* Usage (?: 1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x02, /* Input (Variable), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x75, 0x04, /* Report Size (4), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Null State), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x65, 0x00, /* Unit (none), */ 0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */ 0x09, 0x01, /* Usage (?: 1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x35, /* Usage (Rz), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x07, /* Report Count (7), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x03, /* Usage (?: 3), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 dfp_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0E, /* Report Size (14), */ 0x14, /* Logical Minimum (0), */ 0x26, 0xFF, 0x3F, /* Logical Maximum (16383), */ 0x34, /* Physical Minimum (0), */ 0x46, 0xFF, 0x3F, /* Physical Maximum (16383), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0E, /* Report Count (14), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0E, /* Usage Maximum (0Eh), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x04, /* Report Size (4), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Nullstate), */ 0x65, 0x00, /* Unit, */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x35, /* Usage (Rz), */ 0x81, 0x02, /* Input (Variable), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 fv_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0C, /* Usage Maximum (0Ch), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x02, /* Usage (02h), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x75, 0x04, /* Report Size (4), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Null State), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x65, 0x00, /* Unit, */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x07, /* Report Count (7), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x03, /* Usage (03h), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 momo_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x08, /* Report Count (8), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x08, /* Usage Maximum (08h), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x75, 0x0E, /* Report Size (14), */ 0x95, 0x01, /* Report Count (1), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x00, /* Usage (00h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 momo2_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0A, /* Report Count (10), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0A, /* Usage Maximum (0Ah), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x00, /* Usage (00h), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x00, /* Usage (00h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 ffg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 fg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0xA4, /* Push, */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x95, 0x06, /* Report Count (6), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0xB4, /* Pop, */ 0x81, 0x02, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x04, /* Report Count (4), */ 0x09, 0x02, /* Usage (02h), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection, */ }; /* * Certain Logitech keyboards send in report #3 keys which are far * above the logical maximum described in descriptor. This extends * the original value of 0x28c of logical maximum to 0x104d */ static const __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); rdesc[84] = rdesc[89] = 0x4d; rdesc[85] = rdesc[90] = 0x10; } if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && rdesc[49] == 0x81 && rdesc[50] == 0x06) { hid_info(hdev, "fixing up rel/abs in Logitech report descriptor\n"); rdesc[33] = rdesc[50] = 0x02; } switch (hdev->product) { case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: if (*rsize == FG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula GP report descriptor\n"); *rsize = sizeof(fg_rdesc_fixed); return fg_rdesc_fixed; } else { hid_info(hdev, "rdesc size test failed for formula gp\n"); } break; case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: if (*rsize == FFG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula Force GP report descriptor\n"); *rsize = sizeof(ffg_rdesc_fixed); return ffg_rdesc_fixed; } break; /* Several wheels report as this id when operating in emulation mode. */ case USB_DEVICE_ID_LOGITECH_WHEEL: if (*rsize == DF_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force report descriptor\n"); *rsize = sizeof(df_rdesc_fixed); return df_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: if (*rsize == MOMO_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Force (Red) report descriptor\n"); *rsize = sizeof(momo_rdesc_fixed); return momo_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: if (*rsize == MOMO2_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Racing Force (Black) report descriptor\n"); *rsize = sizeof(momo2_rdesc_fixed); return momo2_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: if (*rsize == FV_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Formula Vibration report descriptor\n"); *rsize = sizeof(fv_rdesc_fixed); return fv_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: if (*rsize == DFP_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force Pro report descriptor\n"); *rsize = sizeof(dfp_rdesc_fixed); return dfp_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_WII_WHEEL: if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B && rdesc[47] == 0x05 && rdesc[48] == 0x09) { hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n"); rdesc[41] = 0x05; rdesc[42] = 0x09; rdesc[47] = 0x95; rdesc[48] = 0x0B; } break; } return rdesc; } #define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int lg_ultrax_remote_mapping(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) return 0; set_bit(EV_REP, hi->input->evbit); switch (usage->hid & HID_USAGE) { /* Reported on Logitech Ultra X Media Remote */ case 0x004: lg_map_key_clear(KEY_AGAIN); break; case 0x00d: lg_map_key_clear(KEY_HOME); break; case 0x024: lg_map_key_clear(KEY_SHUFFLE); break; case 0x025: lg_map_key_clear(KEY_TV); break; case 0x026: lg_map_key_clear(KEY_MENU); break; case 0x031: lg_map_key_clear(KEY_AUDIO); break; case 0x032: lg_map_key_clear(KEY_TEXT); break; case 0x033: lg_map_key_clear(KEY_LAST); break; case 0x047: lg_map_key_clear(KEY_MP3); break; case 0x048: lg_map_key_clear(KEY_DVD); break; case 0x049: lg_map_key_clear(KEY_MEDIA); break; case 0x04a: lg_map_key_clear(KEY_VIDEO); break; case 0x04b: lg_map_key_clear(KEY_ANGLE); break; case 0x04c: lg_map_key_clear(KEY_LANGUAGE); break; case 0x04d: lg_map_key_clear(KEY_SUBTITLE); break; case 0x051: lg_map_key_clear(KEY_RED); break; case 0x052: lg_map_key_clear(KEY_CLOSE); break; default: return 0; } return 1; } static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x1001: lg_map_key_clear(KEY_MESSENGER); break; case 0x1003: lg_map_key_clear(KEY_SOUND); break; case 0x1004: lg_map_key_clear(KEY_VIDEO); break; case 0x1005: lg_map_key_clear(KEY_AUDIO); break; case 0x100a: lg_map_key_clear(KEY_DOCUMENTS); break; /* The following two entries are Playlist 1 and 2 on the MX3200 */ case 0x100f: lg_map_key_clear(KEY_FN_1); break; case 0x1010: lg_map_key_clear(KEY_FN_2); break; case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG); break; case 0x1012: lg_map_key_clear(KEY_NEXTSONG); break; case 0x1013: lg_map_key_clear(KEY_CAMERA); break; case 0x1014: lg_map_key_clear(KEY_MESSENGER); break; case 0x1015: lg_map_key_clear(KEY_RECORD); break; case 0x1016: lg_map_key_clear(KEY_PLAYER); break; case 0x1017: lg_map_key_clear(KEY_EJECTCD); break; case 0x1018: lg_map_key_clear(KEY_MEDIA); break; case 0x1019: lg_map_key_clear(KEY_PROG1); break; case 0x101a: lg_map_key_clear(KEY_PROG2); break; case 0x101b: lg_map_key_clear(KEY_PROG3); break; case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS); break; case 0x101f: lg_map_key_clear(KEY_ZOOMIN); break; case 0x1020: lg_map_key_clear(KEY_ZOOMOUT); break; case 0x1021: lg_map_key_clear(KEY_ZOOMRESET); break; case 0x1023: lg_map_key_clear(KEY_CLOSE); break; case 0x1027: lg_map_key_clear(KEY_MENU); break; /* this one is marked as 'Rotate' */ case 0x1028: lg_map_key_clear(KEY_ANGLE); break; case 0x1029: lg_map_key_clear(KEY_SHUFFLE); break; case 0x102a: lg_map_key_clear(KEY_BACK); break; case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS); break; case 0x102d: lg_map_key_clear(KEY_WWW); break; /* The following two are 'Start/answer call' and 'End/reject call' on the MX3200 */ case 0x1031: lg_map_key_clear(KEY_OK); break; case 0x1032: lg_map_key_clear(KEY_CANCEL); break; case 0x1041: lg_map_key_clear(KEY_BATTERY); break; case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR); break; case 0x1043: lg_map_key_clear(KEY_SPREADSHEET); break; case 0x1044: lg_map_key_clear(KEY_PRESENTATION); break; case 0x1045: lg_map_key_clear(KEY_UNDO); break; case 0x1046: lg_map_key_clear(KEY_REDO); break; case 0x1047: lg_map_key_clear(KEY_PRINT); break; case 0x1048: lg_map_key_clear(KEY_SAVE); break; case 0x1049: lg_map_key_clear(KEY_PROG1); break; case 0x104a: lg_map_key_clear(KEY_PROG2); break; case 0x104b: lg_map_key_clear(KEY_PROG3); break; case 0x104c: lg_map_key_clear(KEY_PROG4); break; default: return 0; } return 1; } static int lg_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* extended mapping for certain Logitech hardware (Logitech cordless desktop LX500) */ static const u8 e_keymap[] = { 0,216, 0,213,175,156, 0, 0, 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, 0,212, 174,167,152,161,112, 0, 0, 0,154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,183,184,185,186,187, 188,189,190,191,192,193,194, 0, 0, 0 }; struct lg_drv_data *drv_data = hid_get_drvdata(hdev); unsigned int hid = usage->hid; if (hdev->product == USB_DEVICE_ID_LOGITECH_RECEIVER && lg_ultrax_remote_mapping(hi, usage, bit, max)) return 1; if ((drv_data->quirks & LG_WIRELESS) && lg_wireless_mapping(hi, usage, bit, max)) return 1; if ((hid & HID_USAGE_PAGE) != HID_UP_BUTTON) return 0; hid &= HID_USAGE; /* Special handling for Logitech Cordless Desktop */ if (field->application == HID_GD_MOUSE) { if ((drv_data->quirks & LG_IGNORE_DOUBLED_WHEEL) && (hid == 7 || hid == 8)) return -1; } else { if ((drv_data->quirks & LG_EXPANDED_KEYMAP) && hid < ARRAY_SIZE(e_keymap) && e_keymap[hid] != 0) { hid_map_usage(hi, usage, bit, max, EV_KEY, e_keymap[hid]); return 1; } } return 0; } static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_BAD_RELATIVE_KEYS) && usage->type == EV_KEY && (field->flags & HID_MAIN_ITEM_RELATIVE)) field->flags &= ~HID_MAIN_ITEM_RELATIVE; if ((drv_data->quirks & LG_DUPLICATE_USAGES) && (usage->type == EV_KEY || usage->type == EV_REL || usage->type == EV_ABS)) clear_bit(usage->code, *bit); /* Ensure that Logitech wheels are not given a default fuzz/flat value */ if (usage->type == EV_ABS && (usage->code == ABS_X || usage->code == ABS_Y || usage->code == ABS_Z || usage->code == ABS_RZ)) { switch (hdev->product) { case USB_DEVICE_ID_LOGITECH_G29_WHEEL: case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: case USB_DEVICE_ID_LOGITECH_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: case USB_DEVICE_ID_LOGITECH_G25_WHEEL: case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: case USB_DEVICE_ID_LOGITECH_G27_WHEEL: case USB_DEVICE_ID_LOGITECH_WII_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: field->application = HID_GD_MULTIAXIS; break; default: break; } } return 0; } static int lg_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_INVERT_HWHEEL) && usage->code == REL_HWHEEL) { input_event(field->hidinput->input, usage->type, usage->code, -value); return 1; } if (drv_data->quirks & LG_FF4) { return lg4ff_adjust_input_event(hdev, field, usage, value, drv_data); } return 0; } static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *rd, int size) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) return lg4ff_raw_event(hdev, report, rd, size, drv_data); return 0; } static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct usb_interface *iface; __u8 iface_num; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; if (!hid_is_usb(hdev)) return -EINVAL; iface = to_usb_interface(hdev->dev.parent); iface_num = iface->cur_altsetting->desc.bInterfaceNumber; /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); return -ENODEV; } drv_data = kzalloc(sizeof(struct lg_drv_data), GFP_KERNEL); if (!drv_data) { hid_err(hdev, "Insufficient memory, cannot allocate driver data\n"); return -ENOMEM; } drv_data->quirks = id->driver_data; hid_set_drvdata(hdev, (void *)drv_data); if (drv_data->quirks & LG_NOGET) hdev->quirks |= HID_QUIRK_NOGET; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } if (drv_data->quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4)) connect_mask &= ~HID_CONNECT_FF; ret = hid_hw_start(hdev, connect_mask); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } /* Setup wireless link with Logitech Wii wheel */ if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { static const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_stop; } ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret >= 0) { /* insert a little delay of 10 jiffies ~ 40ms */ wait_queue_head_t wait; init_waitqueue_head (&wait); wait_event_interruptible_timeout(wait, 0, msecs_to_jiffies(40)); /* Select random Address */ buf[1] = 0xB2; get_random_bytes(&buf[2], 2); ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); } kfree(buf); } if (drv_data->quirks & LG_FF) ret = lgff_init(hdev); else if (drv_data->quirks & LG_FF2) ret = lg2ff_init(hdev); else if (drv_data->quirks & LG_FF3) ret = lg3ff_init(hdev); else if (drv_data->quirks & LG_FF4) ret = lg4ff_init(hdev); if (ret) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: kfree(drv_data); return ret; } static void lg_remove(struct hid_device *hdev) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) lg4ff_deinit(hdev); hid_hw_stop(hdev); kfree(drv_data); } static const struct hid_device_id lg_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER), .driver_data = LG_RDESC | LG_WIRELESS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER), .driver_data = LG_BAD_RELATIVE_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP), .driver_data = LG_DUPLICATE_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD), .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500), .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD), .driver_data = LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL), .driver_data = LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FG), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), .driver_data = LG_NOGET | LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), .driver_data = LG_FF3 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), .driver_data = LG_RDESC_REL_ABS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER), .driver_data = LG_RDESC_REL_ABS }, { } }; MODULE_DEVICE_TABLE(hid, lg_devices); static struct hid_driver lg_driver = { .name = "logitech", .id_table = lg_devices, .report_fixup = lg_report_fixup, .input_mapping = lg_input_mapping, .input_mapped = lg_input_mapped, .event = lg_event, .raw_event = lg_raw_event, .probe = lg_probe, .remove = lg_remove, }; module_hid_driver(lg_driver); #ifdef CONFIG_LOGIWHEELS_FF int lg4ff_no_autoswitch = 0; module_param_named(lg4ff_no_autoswitch, lg4ff_no_autoswitch, int, S_IRUGO); MODULE_PARM_DESC(lg4ff_no_autoswitch, "Do not switch multimode wheels to their native mode automatically"); #endif MODULE_DESCRIPTION("HID driver for some logitech \"special\" devices"); MODULE_LICENSE("GPL"); |
85 85 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 | // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright 2020 NXP */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/netlink.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gate.h> #include <net/tc_wrapper.h> static struct tc_action_ops act_gate_ops; static ktime_t gate_get_time(struct tcf_gate *gact) { ktime_t mono = ktime_get(); switch (gact->tk_offset) { case TK_OFFS_MAX: return mono; default: return ktime_mono_to_any(mono, gact->tk_offset); } return KTIME_MAX; } static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start) { struct tcf_gate_params *param = &gact->param; ktime_t now, base, cycle; u64 n; base = ns_to_ktime(param->tcfg_basetime); now = gate_get_time(gact); if (ktime_after(base, now)) { *start = base; return; } cycle = param->tcfg_cycletime; n = div64_u64(ktime_sub_ns(now, base), cycle); *start = ktime_add_ns(base, (n + 1) * cycle); } static void gate_start_timer(struct tcf_gate *gact, ktime_t start) { ktime_t expires; expires = hrtimer_get_expires(&gact->hitimer); if (expires == 0) expires = KTIME_MAX; start = min_t(ktime_t, start, expires); hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT); } static enum hrtimer_restart gate_timer_func(struct hrtimer *timer) { struct tcf_gate *gact = container_of(timer, struct tcf_gate, hitimer); struct tcf_gate_params *p = &gact->param; struct tcfg_gate_entry *next; ktime_t close_time, now; spin_lock(&gact->tcf_lock); next = gact->next_entry; /* cycle start, clear pending bit, clear total octets */ gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0; gact->current_entry_octets = 0; gact->current_max_octets = next->maxoctets; gact->current_close_time = ktime_add_ns(gact->current_close_time, next->interval); close_time = gact->current_close_time; if (list_is_last(&next->list, &p->entries)) next = list_first_entry(&p->entries, struct tcfg_gate_entry, list); else next = list_next_entry(next, list); now = gate_get_time(gact); if (ktime_after(now, close_time)) { ktime_t cycle, base; u64 n; cycle = p->tcfg_cycletime; base = ns_to_ktime(p->tcfg_basetime); n = div64_u64(ktime_sub_ns(now, base), cycle); close_time = ktime_add_ns(base, (n + 1) * cycle); } gact->next_entry = next; hrtimer_set_expires(&gact->hitimer, close_time); spin_unlock(&gact->tcf_lock); return HRTIMER_RESTART; } TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_gate *gact = to_gate(a); int action = READ_ONCE(gact->tcf_action); tcf_lastuse_update(&gact->tcf_tm); tcf_action_update_bstats(&gact->common, skb); spin_lock(&gact->tcf_lock); if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) { spin_unlock(&gact->tcf_lock); return action; } if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) { spin_unlock(&gact->tcf_lock); goto drop; } if (gact->current_max_octets >= 0) { gact->current_entry_octets += qdisc_pkt_len(skb); if (gact->current_entry_octets > gact->current_max_octets) { spin_unlock(&gact->tcf_lock); goto overlimit; } } spin_unlock(&gact->tcf_lock); return action; overlimit: tcf_action_inc_overlimit_qstats(&gact->common); drop: tcf_action_inc_drop_qstats(&gact->common); return TC_ACT_SHOT; } static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = { [TCA_GATE_ENTRY_INDEX] = { .type = NLA_U32 }, [TCA_GATE_ENTRY_GATE] = { .type = NLA_FLAG }, [TCA_GATE_ENTRY_INTERVAL] = { .type = NLA_U32 }, [TCA_GATE_ENTRY_IPV] = { .type = NLA_S32 }, [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 }, }; static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = { [TCA_GATE_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)), [TCA_GATE_PRIORITY] = { .type = NLA_S32 }, [TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED }, [TCA_GATE_BASE_TIME] = { .type = NLA_U64 }, [TCA_GATE_CYCLE_TIME] = { .type = NLA_U64 }, [TCA_GATE_CYCLE_TIME_EXT] = { .type = NLA_U64 }, [TCA_GATE_FLAGS] = { .type = NLA_U32 }, [TCA_GATE_CLOCKID] = { .type = NLA_S32 }, }; static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry, struct netlink_ext_ack *extack) { u32 interval = 0; entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]); if (tb[TCA_GATE_ENTRY_INTERVAL]) interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]); if (interval == 0) { NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); return -EINVAL; } entry->interval = interval; entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1); entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS], -1); return 0; } static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry, int index, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { }; int err; err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Could not parse nested entry"); return -EINVAL; } entry->index = index; return fill_gate_entry(tb, entry, extack); } static void release_entry_list(struct list_head *entries) { struct tcfg_gate_entry *entry, *e; list_for_each_entry_safe(entry, e, entries, list) { list_del(&entry->list); kfree(entry); } } static int parse_gate_list(struct nlattr *list_attr, struct tcf_gate_params *sched, struct netlink_ext_ack *extack) { struct tcfg_gate_entry *entry; struct nlattr *n; int err, rem; int i = 0; if (!list_attr) return -EINVAL; nla_for_each_nested(n, list_attr, rem) { if (nla_type(n) != TCA_GATE_ONE_ENTRY) { NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'"); continue; } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { NL_SET_ERR_MSG(extack, "Not enough memory for entry"); err = -ENOMEM; goto release_list; } err = parse_gate_entry(n, entry, i, extack); if (err < 0) { kfree(entry); goto release_list; } list_add_tail(&entry->list, &sched->entries); i++; } sched->num_entries = i; return i; release_list: release_entry_list(&sched->entries); return err; } static void gate_setup_timer(struct tcf_gate *gact, u64 basetime, enum tk_offsets tko, s32 clockid, bool do_init) { if (!do_init) { if (basetime == gact->param.tcfg_basetime && tko == gact->tk_offset && clockid == gact->param.tcfg_clockid) return; spin_unlock_bh(&gact->tcf_lock); hrtimer_cancel(&gact->hitimer); spin_lock_bh(&gact->tcf_lock); } gact->param.tcfg_basetime = basetime; gact->param.tcfg_clockid = clockid; gact->tk_offset = tko; hrtimer_setup(&gact->hitimer, gate_timer_func, clockid, HRTIMER_MODE_ABS_SOFT); } static int tcf_gate_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); enum tk_offsets tk_offset = TK_OFFS_TAI; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_GATE_MAX + 1]; struct tcf_chain *goto_ch = NULL; u64 cycletime = 0, basetime = 0; struct tcf_gate_params *p; s32 clockid = CLOCK_TAI; struct tcf_gate *gact; struct tc_gate *parm; int ret = 0, err; u32 gflags = 0; s32 prio = -1; ktime_t start; u32 index; if (!nla) return -EINVAL; err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack); if (err < 0) return err; if (!tb[TCA_GATE_PARMS]) return -EINVAL; if (tb[TCA_GATE_CLOCKID]) { clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]); switch (clockid) { case CLOCK_REALTIME: tk_offset = TK_OFFS_REAL; break; case CLOCK_MONOTONIC: tk_offset = TK_OFFS_MAX; break; case CLOCK_BOOTTIME: tk_offset = TK_OFFS_BOOT; break; case CLOCK_TAI: tk_offset = TK_OFFS_TAI; break; default: NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); return -EINVAL; } } parm = nla_data(tb[TCA_GATE_PARMS]); index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; if (err && bind) return ACT_P_BOUND; if (!err) { ret = tcf_idr_create_from_flags(tn, index, est, a, &act_gate_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } if (tb[TCA_GATE_PRIORITY]) prio = nla_get_s32(tb[TCA_GATE_PRIORITY]); if (tb[TCA_GATE_BASE_TIME]) basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]); if (tb[TCA_GATE_FLAGS]) gflags = nla_get_u32(tb[TCA_GATE_FLAGS]); gact = to_gate(*a); if (ret == ACT_P_CREATED) INIT_LIST_HEAD(&gact->param.entries); err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; spin_lock_bh(&gact->tcf_lock); p = &gact->param; if (tb[TCA_GATE_CYCLE_TIME]) cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]); if (tb[TCA_GATE_ENTRY_LIST]) { err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack); if (err < 0) goto chain_put; } if (!cycletime) { struct tcfg_gate_entry *entry; ktime_t cycle = 0; list_for_each_entry(entry, &p->entries, list) cycle = ktime_add_ns(cycle, entry->interval); cycletime = cycle; if (!cycletime) { err = -EINVAL; goto chain_put; } } p->tcfg_cycletime = cycletime; if (tb[TCA_GATE_CYCLE_TIME_EXT]) p->tcfg_cycletime_ext = nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]); gate_setup_timer(gact, basetime, tk_offset, clockid, ret == ACT_P_CREATED); p->tcfg_priority = prio; p->tcfg_flags = gflags; gate_get_start_time(gact, &start); gact->current_close_time = start; gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING; gact->next_entry = list_first_entry(&p->entries, struct tcfg_gate_entry, list); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); gate_start_timer(gact, start); spin_unlock_bh(&gact->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); return ret; chain_put: spin_unlock_bh(&gact->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: /* action is not inserted in any list: it's safe to init hitimer * without taking tcf_lock. */ if (ret == ACT_P_CREATED) gate_setup_timer(gact, gact->param.tcfg_basetime, gact->tk_offset, gact->param.tcfg_clockid, true); tcf_idr_release(*a, bind); return err; } static void tcf_gate_cleanup(struct tc_action *a) { struct tcf_gate *gact = to_gate(a); struct tcf_gate_params *p; p = &gact->param; hrtimer_cancel(&gact->hitimer); release_entry_list(&p->entries); } static int dumping_entry(struct sk_buff *skb, struct tcfg_gate_entry *entry) { struct nlattr *item; item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY); if (!item) return -ENOSPC; if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index)) goto nla_put_failure; if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv)) goto nla_put_failure; return nla_nest_end(skb, item); nla_put_failure: nla_nest_cancel(skb, item); return -1; } static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_gate *gact = to_gate(a); struct tc_gate opt = { .index = gact->tcf_index, .refcnt = refcount_read(&gact->tcf_refcnt) - ref, .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, }; struct tcfg_gate_entry *entry; struct tcf_gate_params *p; struct nlattr *entry_list; struct tcf_t t; spin_lock_bh(&gact->tcf_lock); opt.action = gact->tcf_action; p = &gact->param; if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME, p->tcfg_basetime, TCA_GATE_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME, p->tcfg_cycletime, TCA_GATE_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT, p->tcfg_cycletime_ext, TCA_GATE_PAD)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority)) goto nla_put_failure; entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST); if (!entry_list) goto nla_put_failure; list_for_each_entry(entry, &p->entries, list) { if (dumping_entry(skb, entry) < 0) goto nla_put_failure; } nla_nest_end(skb, entry_list); tcf_tm_dump(&t, &gact->tcf_tm); if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD)) goto nla_put_failure; spin_unlock_bh(&gact->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&gact->tcf_lock); nlmsg_trim(skb, b); return -1; } static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_gate *gact = to_gate(a); struct tcf_t *tm = &gact->tcf_tm; tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static size_t tcf_gate_get_fill_size(const struct tc_action *act) { return nla_total_size(sizeof(struct tc_gate)); } static void tcf_gate_entry_destructor(void *priv) { struct action_gate_entry *oe = priv; kfree(oe); } static int tcf_gate_get_entries(struct flow_action_entry *entry, const struct tc_action *act) { entry->gate.entries = tcf_gate_get_list(act); if (!entry->gate.entries) return -EINVAL; entry->destructor = tcf_gate_entry_destructor; entry->destructor_priv = entry->gate.entries; return 0; } static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) { int err; if (bind) { struct flow_action_entry *entry = entry_data; entry->id = FLOW_ACTION_GATE; entry->gate.prio = tcf_gate_prio(act); entry->gate.basetime = tcf_gate_basetime(act); entry->gate.cycletime = tcf_gate_cycletime(act); entry->gate.cycletimeext = tcf_gate_cycletimeext(act); entry->gate.num_entries = tcf_gate_num_entries(act); err = tcf_gate_get_entries(entry, act); if (err) return err; *index_inc = 1; } else { struct flow_offload_action *fl_action = entry_data; fl_action->id = FLOW_ACTION_GATE; } return 0; } static struct tc_action_ops act_gate_ops = { .kind = "gate", .id = TCA_ID_GATE, .owner = THIS_MODULE, .act = tcf_gate_act, .dump = tcf_gate_dump, .init = tcf_gate_init, .cleanup = tcf_gate_cleanup, .stats_update = tcf_gate_stats_update, .get_fill_size = tcf_gate_get_fill_size, .offload_act_setup = tcf_gate_offload_act_setup, .size = sizeof(struct tcf_gate), }; MODULE_ALIAS_NET_ACT("gate"); static __net_init int gate_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); return tc_action_net_init(net, tn, &act_gate_ops); } static void __net_exit gate_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_gate_ops.net_id); } static struct pernet_operations gate_net_ops = { .init = gate_init_net, .exit_batch = gate_exit_net, .id = &act_gate_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init gate_init_module(void) { return tcf_register_action(&act_gate_ops, &gate_net_ops); } static void __exit gate_cleanup_module(void) { tcf_unregister_action(&act_gate_ops, &gate_net_ops); } module_init(gate_init_module); module_exit(gate_cleanup_module); MODULE_DESCRIPTION("TC gate action"); MODULE_LICENSE("GPL v2"); |
1235 1235 1238 1237 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Resilient Queued Spin Lock * * (C) Copyright 2024-2025 Meta Platforms, Inc. and affiliates. * * Authors: Kumar Kartikeya Dwivedi <memxor@gmail.com> */ #ifndef __ASM_GENERIC_RQSPINLOCK_H #define __ASM_GENERIC_RQSPINLOCK_H #include <linux/types.h> #include <vdso/time64.h> #include <linux/percpu.h> #ifdef CONFIG_QUEUED_SPINLOCKS #include <asm/qspinlock.h> #endif struct rqspinlock { union { atomic_t val; u32 locked; }; }; /* Even though this is same as struct rqspinlock, we need to emit a distinct * type in BTF for BPF programs. */ struct bpf_res_spin_lock { u32 val; }; struct qspinlock; #ifdef CONFIG_QUEUED_SPINLOCKS typedef struct qspinlock rqspinlock_t; #else typedef struct rqspinlock rqspinlock_t; #endif extern int resilient_tas_spin_lock(rqspinlock_t *lock); #ifdef CONFIG_QUEUED_SPINLOCKS extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val); #endif #ifndef resilient_virt_spin_lock_enabled static __always_inline bool resilient_virt_spin_lock_enabled(void) { return false; } #endif #ifndef resilient_virt_spin_lock static __always_inline int resilient_virt_spin_lock(rqspinlock_t *lock) { return 0; } #endif /* * Default timeout for waiting loops is 0.25 seconds */ #define RES_DEF_TIMEOUT (NSEC_PER_SEC / 4) /* * Choose 31 as it makes rqspinlock_held cacheline-aligned. */ #define RES_NR_HELD 31 struct rqspinlock_held { int cnt; void *locks[RES_NR_HELD]; }; DECLARE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks); static __always_inline void grab_held_lock_entry(void *lock) { int cnt = this_cpu_inc_return(rqspinlock_held_locks.cnt); if (unlikely(cnt > RES_NR_HELD)) { /* Still keep the inc so we decrement later. */ return; } /* * Implied compiler barrier in per-CPU operations; otherwise we can have * the compiler reorder inc with write to table, allowing interrupts to * overwrite and erase our write to the table (as on interrupt exit it * will be reset to NULL). * * It is fine for cnt inc to be reordered wrt remote readers though, * they won't observe our entry until the cnt update is visible, that's * all. */ this_cpu_write(rqspinlock_held_locks.locks[cnt - 1], lock); } /* * We simply don't support out-of-order unlocks, and keep the logic simple here. * The verifier prevents BPF programs from unlocking out-of-order, and the same * holds for in-kernel users. * * It is possible to run into misdetection scenarios of AA deadlocks on the same * CPU, and missed ABBA deadlocks on remote CPUs if this function pops entries * out of order (due to lock A, lock B, unlock A, unlock B) pattern. The correct * logic to preserve right entries in the table would be to walk the array of * held locks and swap and clear out-of-order entries, but that's too * complicated and we don't have a compelling use case for out of order unlocking. */ static __always_inline void release_held_lock_entry(void) { struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks); if (unlikely(rqh->cnt > RES_NR_HELD)) goto dec; WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL); dec: /* * Reordering of clearing above with inc and its write in * grab_held_lock_entry that came before us (in same acquisition * attempt) is ok, we either see a valid entry or NULL when it's * visible. * * But this helper is invoked when we unwind upon failing to acquire the * lock. Unlike the unlock path which constitutes a release store after * we clear the entry, we need to emit a write barrier here. Otherwise, * we may have a situation as follows: * * <error> for lock B * release_held_lock_entry * * try_cmpxchg_acquire for lock A * grab_held_lock_entry * * Lack of any ordering means reordering may occur such that dec, inc * are done before entry is overwritten. This permits a remote lock * holder of lock B (which this CPU failed to acquire) to now observe it * as being attempted on this CPU, and may lead to misdetection (if this * CPU holds a lock it is attempting to acquire, leading to false ABBA * diagnosis). * * In case of unlock, we will always do a release on the lock word after * releasing the entry, ensuring that other CPUs cannot hold the lock * (and make conclusions about deadlocks) until the entry has been * cleared on the local CPU, preventing any anomalies. Reordering is * still possible there, but a remote CPU cannot observe a lock in our * table which it is already holding, since visibility entails our * release store for the said lock has not retired. * * In theory we don't have a problem if the dec and WRITE_ONCE above get * reordered with each other, we either notice an empty NULL entry on * top (if dec succeeds WRITE_ONCE), or a potentially stale entry which * cannot be observed (if dec precedes WRITE_ONCE). * * Emit the write barrier _before_ the dec, this permits dec-inc * reordering but that is harmless as we'd have new entry set to NULL * already, i.e. they cannot precede the NULL store above. */ smp_wmb(); this_cpu_dec(rqspinlock_held_locks.cnt); } #ifdef CONFIG_QUEUED_SPINLOCKS /** * res_spin_lock - acquire a queued spinlock * @lock: Pointer to queued spinlock structure * * Return: * * 0 - Lock was acquired successfully. * * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock. * * -ETIMEDOUT - Lock acquisition failed because of timeout. */ static __always_inline int res_spin_lock(rqspinlock_t *lock) { int val = 0; if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) { grab_held_lock_entry(lock); return 0; } return resilient_queued_spin_lock_slowpath(lock, val); } #else #define res_spin_lock(lock) resilient_tas_spin_lock(lock) #endif /* CONFIG_QUEUED_SPINLOCKS */ static __always_inline void res_spin_unlock(rqspinlock_t *lock) { struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks); if (unlikely(rqh->cnt > RES_NR_HELD)) goto unlock; WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL); unlock: /* * Release barrier, ensures correct ordering. See release_held_lock_entry * for details. Perform release store instead of queued_spin_unlock, * since we use this function for test-and-set fallback as well. When we * have CONFIG_QUEUED_SPINLOCKS=n, we clear the full 4-byte lockword. * * Like release_held_lock_entry, we can do the release before the dec. * We simply care about not seeing the 'lock' in our table from a remote * CPU once the lock has been released, which doesn't rely on the dec. * * Unlike smp_wmb(), release is not a two way fence, hence it is * possible for a inc to move up and reorder with our clearing of the * entry. This isn't a problem however, as for a misdiagnosis of ABBA, * the remote CPU needs to hold this lock, which won't be released until * the store below is done, which would ensure the entry is overwritten * to NULL, etc. */ smp_store_release(&lock->locked, 0); this_cpu_dec(rqspinlock_held_locks.cnt); } #ifdef CONFIG_QUEUED_SPINLOCKS #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; }) #else #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; }) #endif #define raw_res_spin_lock(lock) \ ({ \ int __ret; \ preempt_disable(); \ __ret = res_spin_lock(lock); \ if (__ret) \ preempt_enable(); \ __ret; \ }) #define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); }) #define raw_res_spin_lock_irqsave(lock, flags) \ ({ \ int __ret; \ local_irq_save(flags); \ __ret = raw_res_spin_lock(lock); \ if (__ret) \ local_irq_restore(flags); \ __ret; \ }) #define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); }) #endif /* __ASM_GENERIC_RQSPINLOCK_H */ |
13709 239 14282 47 13519 13520 131 7 4 2 1 2 1 1 12 5 972 989 124 15 138 109 29 761 277 277 45 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NET_SCM_H #define __LINUX_NET_SCM_H #include <linux/limits.h> #include <linux/net.h> #include <linux/cred.h> #include <linux/file.h> #include <linux/security.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/sched/signal.h> #include <net/compat.h> /* Well, we should have at least one descriptor open * to accept passed FDs 8) */ #define SCM_MAX_FD 253 struct scm_creds { u32 pid; kuid_t uid; kgid_t gid; }; #ifdef CONFIG_UNIX struct unix_edge; #endif struct scm_fp_list { short count; short count_unix; short max; #ifdef CONFIG_UNIX bool inflight; bool dead; struct list_head vertices; struct unix_edge *edges; #endif struct user_struct *user; struct file *fp[SCM_MAX_FD]; }; struct scm_cookie { struct pid *pid; /* Skb credentials */ struct scm_fp_list *fp; /* Passed files */ struct scm_creds creds; /* Skb credentials */ #ifdef CONFIG_SECURITY_NETWORK u32 secid; /* Passed security ID */ #endif }; void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm); void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm); int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); void __scm_destroy(struct scm_cookie *scm); struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl); #ifdef CONFIG_SECURITY_NETWORK static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) { security_socket_getpeersec_dgram(sock, NULL, &scm->secid); } #else static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) { } #endif /* CONFIG_SECURITY_NETWORK */ static __inline__ void scm_set_cred(struct scm_cookie *scm, struct pid *pid, kuid_t uid, kgid_t gid) { scm->pid = get_pid(pid); scm->creds.pid = pid_vnr(pid); scm->creds.uid = uid; scm->creds.gid = gid; } static __inline__ void scm_destroy_cred(struct scm_cookie *scm) { put_pid(scm->pid); scm->pid = NULL; } static __inline__ void scm_destroy(struct scm_cookie *scm) { scm_destroy_cred(scm); if (scm->fp) __scm_destroy(scm); } static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm, bool forcecreds) { memset(scm, 0, sizeof(*scm)); scm->creds.uid = INVALID_UID; scm->creds.gid = INVALID_GID; if (forcecreds) scm_set_cred(scm, task_tgid(current), current_uid(), current_gid()); unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; return __scm_send(sock, msg, scm); } #ifdef CONFIG_SECURITY_NETWORK static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) { struct lsm_context ctx; int err; if (test_bit(SOCK_PASSSEC, &sock->flags)) { err = security_secid_to_secctx(scm->secid, &ctx); if (err >= 0) { put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, ctx.len, ctx.context); security_release_secctx(&ctx); } } } static inline bool scm_has_secdata(struct socket *sock) { return test_bit(SOCK_PASSSEC, &sock->flags); } #else static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) { } static inline bool scm_has_secdata(struct socket *sock) { return false; } #endif /* CONFIG_SECURITY_NETWORK */ static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm) { struct file *pidfd_file = NULL; int len, pidfd; /* put_cmsg() doesn't return an error if CMSG is truncated, * that's why we need to opencode these checks here. */ if (msg->msg_flags & MSG_CMSG_COMPAT) len = sizeof(struct compat_cmsghdr) + sizeof(int); else len = sizeof(struct cmsghdr) + sizeof(int); if (msg->msg_controllen < len) { msg->msg_flags |= MSG_CTRUNC; return; } if (!scm->pid) return; pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file); if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) { if (pidfd_file) { put_unused_fd(pidfd); fput(pidfd_file); } return; } if (pidfd_file) fd_install(pidfd, pidfd_file); } static inline bool __scm_recv_common(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm, int flags) { if (!msg->msg_control) { if (test_bit(SOCK_PASSCRED, &sock->flags) || test_bit(SOCK_PASSPIDFD, &sock->flags) || scm->fp || scm_has_secdata(sock)) msg->msg_flags |= MSG_CTRUNC; scm_destroy(scm); return false; } if (test_bit(SOCK_PASSCRED, &sock->flags)) { struct user_namespace *current_ns = current_user_ns(); struct ucred ucreds = { .pid = scm->creds.pid, .uid = from_kuid_munged(current_ns, scm->creds.uid), .gid = from_kgid_munged(current_ns, scm->creds.gid), }; put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds); } scm_passec(sock, msg, scm); if (scm->fp) scm_detach_fds(msg, scm); return true; } static inline void scm_recv(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm, int flags) { if (!__scm_recv_common(sock, msg, scm, flags)) return; scm_destroy_cred(scm); } static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm, int flags) { if (!__scm_recv_common(sock, msg, scm, flags)) return; if (test_bit(SOCK_PASSPIDFD, &sock->flags)) scm_pidfd_recv(msg, scm); scm_destroy_cred(scm); } static inline int scm_recv_one_fd(struct file *f, int __user *ufd, unsigned int flags) { if (!ufd) return -EFAULT; return receive_fd(f, ufd, flags); } #endif /* __LINUX_NET_SCM_H */ |
3 5 9 12 14 14 2 15 1 1 1 2 2 2 1 1 15 12 3 2 11 6 8 1 8 8 2 6 6 5 1 2 2 4 4 4 2 1 2 3 3 3 12 41 38 2 1 57 18 53 44 25 55 32 7 7 3 4 4 4 42 3 39 32 16 2 2 25 5 5 5 7 25 2 1 28 28 2 2 18 18 2 2 10 10 10 7 3 4 3 1 4 4 14 14 2 13 13 13 27 82 89 77 65 5 2 2 74 8 8 95 17 13 1 1 4 2 2 4 9 3 6 15 15 14 4 2 2 1 1 13 13 2 2 2 2 13 6 8 6 4 17 1 1 5 2 13 31 26 22 9 41 1 2 2 1 2 74 74 50 17 8 8 8 66 66 8 95 95 95 95 3 95 95 95 3 21 74 95 8 7 2 8 8 8 2 6 95 95 23 72 23 77 18 94 91 34 34 13 82 95 21 74 74 72 1 66 8 8 8 8 87 23 15 8 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 | // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_driver.c -- USB Video Class driver * * Copyright (C) 2005-2010 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <linux/atomic.h> #include <linux/bits.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/quirks.h> #include <linux/usb/uvc.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/unaligned.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "uvcvideo.h" #define DRIVER_AUTHOR "Laurent Pinchart " \ "<laurent.pinchart@ideasonboard.com>" #define DRIVER_DESC "USB Video Class driver" unsigned int uvc_clock_param = CLOCK_MONOTONIC; unsigned int uvc_hw_timestamps_param; unsigned int uvc_no_drop_param = 1; static unsigned int uvc_quirks_param = -1; unsigned int uvc_dbg_param; unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT; static struct usb_driver uvc_driver; /* ------------------------------------------------------------------------ * Utility functions */ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts, u8 epaddr) { struct usb_host_endpoint *ep; unsigned int i; for (i = 0; i < alts->desc.bNumEndpoints; ++i) { ep = &alts->endpoint[i]; if (ep->desc.bEndpointAddress == epaddr) return ep; } return NULL; } static enum v4l2_colorspace uvc_colorspace(const u8 primaries) { static const enum v4l2_colorspace colorprimaries[] = { V4L2_COLORSPACE_SRGB, /* Unspecified */ V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_470_SYSTEM_M, V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_SMPTE240M, }; if (primaries < ARRAY_SIZE(colorprimaries)) return colorprimaries[primaries]; return V4L2_COLORSPACE_SRGB; /* Reserved */ } static enum v4l2_xfer_func uvc_xfer_func(const u8 transfer_characteristics) { /* * V4L2 does not currently have definitions for all possible values of * UVC transfer characteristics. If v4l2_xfer_func is extended with new * values, the mapping below should be updated. * * Substitutions are taken from the mapping given for * V4L2_XFER_FUNC_DEFAULT documented in videodev2.h. */ static const enum v4l2_xfer_func xfer_funcs[] = { V4L2_XFER_FUNC_DEFAULT, /* Unspecified */ V4L2_XFER_FUNC_709, V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 M */ V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 B, G */ V4L2_XFER_FUNC_709, /* Substitution for SMPTE 170M */ V4L2_XFER_FUNC_SMPTE240M, V4L2_XFER_FUNC_NONE, V4L2_XFER_FUNC_SRGB, }; if (transfer_characteristics < ARRAY_SIZE(xfer_funcs)) return xfer_funcs[transfer_characteristics]; return V4L2_XFER_FUNC_DEFAULT; /* Reserved */ } static enum v4l2_ycbcr_encoding uvc_ycbcr_enc(const u8 matrix_coefficients) { /* * V4L2 does not currently have definitions for all possible values of * UVC matrix coefficients. If v4l2_ycbcr_encoding is extended with new * values, the mapping below should be updated. * * Substitutions are taken from the mapping given for * V4L2_YCBCR_ENC_DEFAULT documented in videodev2.h. * * FCC is assumed to be close enough to 601. */ static const enum v4l2_ycbcr_encoding ycbcr_encs[] = { V4L2_YCBCR_ENC_DEFAULT, /* Unspecified */ V4L2_YCBCR_ENC_709, V4L2_YCBCR_ENC_601, /* Substitution for FCC */ V4L2_YCBCR_ENC_601, /* Substitution for BT.470-2 B, G */ V4L2_YCBCR_ENC_601, V4L2_YCBCR_ENC_SMPTE240M, }; if (matrix_coefficients < ARRAY_SIZE(ycbcr_encs)) return ycbcr_encs[matrix_coefficients]; return V4L2_YCBCR_ENC_DEFAULT; /* Reserved */ } /* ------------------------------------------------------------------------ * Terminal and unit management */ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) { struct uvc_entity *entity; list_for_each_entry(entity, &dev->entities, list) { if (entity->id == id) return entity; } return NULL; } static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev, int id, struct uvc_entity *entity) { unsigned int i; if (entity == NULL) entity = list_entry(&dev->entities, struct uvc_entity, list); list_for_each_entry_continue(entity, &dev->entities, list) { for (i = 0; i < entity->bNrInPins; ++i) if (entity->baSourceID[i] == id) return entity; } return NULL; } static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id) { struct uvc_streaming *stream; list_for_each_entry(stream, &dev->streams, list) { if (stream->header.bTerminalLink == id) return stream; } return NULL; } /* ------------------------------------------------------------------------ * Streaming Object Management */ static void uvc_stream_delete(struct uvc_streaming *stream) { if (stream->async_wq) destroy_workqueue(stream->async_wq); mutex_destroy(&stream->mutex); usb_put_intf(stream->intf); kfree(stream->formats); kfree(stream->header.bmaControls); kfree(stream); } static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev, struct usb_interface *intf) { struct uvc_streaming *stream; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) return NULL; mutex_init(&stream->mutex); stream->dev = dev; stream->intf = usb_get_intf(intf); stream->intfnum = intf->cur_altsetting->desc.bInterfaceNumber; /* Allocate a stream specific work queue for asynchronous tasks. */ stream->async_wq = alloc_workqueue("uvcvideo", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!stream->async_wq) { uvc_stream_delete(stream); return NULL; } return stream; } /* ------------------------------------------------------------------------ * Descriptors parsing */ static int uvc_parse_frame(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, struct uvc_frame *frame, u32 **intervals, u8 ftype, int width_multiplier, const unsigned char *buffer, int buflen) { struct usb_host_interface *alts = streaming->intf->cur_altsetting; unsigned int maxIntervalIndex; unsigned int interval; unsigned int i, n; if (ftype != UVC_VS_FRAME_FRAME_BASED) n = buflen > 25 ? buffer[25] : 0; else n = buflen > 21 ? buffer[21] : 0; n = n ? n : 3; if (buflen < 26 + 4 * n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FRAME error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } frame->bFrameIndex = buffer[3]; frame->bmCapabilities = buffer[4]; frame->wWidth = get_unaligned_le16(&buffer[5]) * width_multiplier; frame->wHeight = get_unaligned_le16(&buffer[7]); frame->dwMinBitRate = get_unaligned_le32(&buffer[9]); frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]); if (ftype != UVC_VS_FRAME_FRAME_BASED) { frame->dwMaxVideoFrameBufferSize = get_unaligned_le32(&buffer[17]); frame->dwDefaultFrameInterval = get_unaligned_le32(&buffer[21]); frame->bFrameIntervalType = buffer[25]; } else { frame->dwMaxVideoFrameBufferSize = 0; frame->dwDefaultFrameInterval = get_unaligned_le32(&buffer[17]); frame->bFrameIntervalType = buffer[21]; } /* * Copy the frame intervals. * * Some bogus devices report dwMinFrameInterval equal to * dwMaxFrameInterval and have dwFrameIntervalStep set to zero. Setting * all null intervals to 1 fixes the problem and some other divisions * by zero that could happen. */ frame->dwFrameInterval = *intervals; for (i = 0; i < n; ++i) { interval = get_unaligned_le32(&buffer[26 + 4 * i]); (*intervals)[i] = interval ? interval : 1; } /* * Apply more fixes, quirks and workarounds to handle incorrect or * broken descriptors. */ /* * Several UVC chipsets screw up dwMaxVideoFrameBufferSize completely. * Observed behaviours range from setting the value to 1.1x the actual * frame size to hardwiring the 16 low bits to 0. This results in a * higher than necessary memory usage as well as a wrong image size * information. For uncompressed formats this can be fixed by computing * the value from the frame size. */ if (!(format->flags & UVC_FMT_FLAG_COMPRESSED)) frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth * frame->wHeight / 8; /* * Clamp the default frame interval to the boundaries. A zero * bFrameIntervalType value indicates a continuous frame interval * range, with dwFrameInterval[0] storing the minimum value and * dwFrameInterval[1] storing the maximum value. */ maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1; frame->dwDefaultFrameInterval = clamp(frame->dwDefaultFrameInterval, frame->dwFrameInterval[0], frame->dwFrameInterval[maxIntervalIndex]); /* * Some devices report frame intervals that are not functional. If the * corresponding quirk is set, restrict operation to the first interval * only. */ if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) { frame->bFrameIntervalType = 1; (*intervals)[0] = frame->dwDefaultFrameInterval; } uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n", frame->wWidth, frame->wHeight, 10000000 / frame->dwDefaultFrameInterval, (100000000 / frame->dwDefaultFrameInterval) % 10); *intervals += n; return buffer[0]; } static int uvc_parse_format(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, struct uvc_frame *frames, u32 **intervals, const unsigned char *buffer, int buflen) { struct usb_host_interface *alts = streaming->intf->cur_altsetting; const struct uvc_format_desc *fmtdesc; struct uvc_frame *frame; const unsigned char *start = buffer; unsigned int width_multiplier = 1; unsigned int i, n; u8 ftype; int ret; format->type = buffer[2]; format->index = buffer[3]; format->frames = frames; switch (buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_FRAME_BASED: n = buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED ? 27 : 28; if (buflen < n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* Find the format descriptor from its GUID. */ fmtdesc = uvc_format_by_guid(&buffer[5]); if (!fmtdesc) { /* * Unknown video formats are not fatal errors, the * caller will skip this descriptor. */ dev_info(&streaming->intf->dev, "Unknown video format %pUl\n", &buffer[5]); return 0; } format->fcc = fmtdesc->fcc; format->bpp = buffer[21]; /* * Some devices report a format that doesn't match what they * really send. */ if (dev->quirks & UVC_QUIRK_FORCE_Y8) { if (format->fcc == V4L2_PIX_FMT_YUYV) { format->fcc = V4L2_PIX_FMT_GREY; format->bpp = 8; width_multiplier = 2; } } /* Some devices report bpp that doesn't match the format. */ if (dev->quirks & UVC_QUIRK_FORCE_BPP) { const struct v4l2_format_info *info = v4l2_format_info(format->fcc); if (info) { unsigned int div = info->hdiv * info->vdiv; n = info->bpp[0] * div; for (i = 1; i < info->comp_planes; i++) n += info->bpp[i]; format->bpp = DIV_ROUND_UP(8 * n, div); } } if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) { ftype = UVC_VS_FRAME_UNCOMPRESSED; } else { ftype = UVC_VS_FRAME_FRAME_BASED; if (buffer[27]) format->flags = UVC_FMT_FLAG_COMPRESSED; } break; case UVC_VS_FORMAT_MJPEG: if (buflen < 11) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->fcc = V4L2_PIX_FMT_MJPEG; format->flags = UVC_FMT_FLAG_COMPRESSED; format->bpp = 0; ftype = UVC_VS_FRAME_MJPEG; break; case UVC_VS_FORMAT_DV: if (buflen < 9) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } if ((buffer[8] & 0x7f) > 2) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d: unknown DV format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[8]); return -EINVAL; } format->fcc = V4L2_PIX_FMT_DV; format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM; format->bpp = 0; ftype = 0; /* Create a dummy frame descriptor. */ frame = &frames[0]; memset(frame, 0, sizeof(*frame)); frame->bFrameIntervalType = 1; frame->dwDefaultFrameInterval = 1; frame->dwFrameInterval = *intervals; *(*intervals)++ = 1; format->nframes = 1; break; case UVC_VS_FORMAT_MPEG2TS: case UVC_VS_FORMAT_STREAM_BASED: /* Not supported yet. */ default: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d unsupported format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[2]); return -EINVAL; } uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc); buflen -= buffer[0]; buffer += buffer[0]; /* * Parse the frame descriptors. Only uncompressed, MJPEG and frame * based formats have frame descriptors. */ if (ftype) { while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == ftype) { frame = &frames[format->nframes]; ret = uvc_parse_frame(dev, streaming, format, frame, intervals, ftype, width_multiplier, buffer, buflen); if (ret < 0) return ret; format->nframes++; buflen -= ret; buffer += ret; } } if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == UVC_VS_STILL_IMAGE_FRAME) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == UVC_VS_COLORFORMAT) { if (buflen < 6) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d COLORFORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->colorspace = uvc_colorspace(buffer[3]); format->xfer_func = uvc_xfer_func(buffer[4]); format->ycbcr_enc = uvc_ycbcr_enc(buffer[5]); buflen -= buffer[0]; buffer += buffer[0]; } else { format->colorspace = V4L2_COLORSPACE_SRGB; } return buffer - start; } static int uvc_parse_streaming(struct uvc_device *dev, struct usb_interface *intf) { struct uvc_streaming *streaming = NULL; struct uvc_format *format; struct uvc_frame *frame; struct usb_host_interface *alts = &intf->altsetting[0]; const unsigned char *_buffer, *buffer = alts->extra; int _buflen, buflen = alts->extralen; unsigned int nformats = 0, nframes = 0, nintervals = 0; unsigned int size, i, n, p; u32 *interval; u16 psize; int ret = -EINVAL; if (intf->cur_altsetting->desc.bInterfaceSubClass != UVC_SC_VIDEOSTREAMING) { uvc_dbg(dev, DESCR, "device %d interface %d isn't a video streaming interface\n", dev->udev->devnum, intf->altsetting[0].desc.bInterfaceNumber); return -EINVAL; } if (usb_driver_claim_interface(&uvc_driver, intf, dev)) { uvc_dbg(dev, DESCR, "device %d interface %d is already claimed\n", dev->udev->devnum, intf->altsetting[0].desc.bInterfaceNumber); return -EINVAL; } streaming = uvc_stream_new(dev, intf); if (streaming == NULL) { usb_driver_release_interface(&uvc_driver, intf); return -ENOMEM; } /* * The Pico iMage webcam has its class-specific interface descriptors * after the endpoint descriptors. */ if (buflen == 0) { for (i = 0; i < alts->desc.bNumEndpoints; ++i) { struct usb_host_endpoint *ep = &alts->endpoint[i]; if (ep->extralen == 0) continue; if (ep->extralen > 2 && ep->extra[1] == USB_DT_CS_INTERFACE) { uvc_dbg(dev, DESCR, "trying extra data from endpoint %u\n", i); buffer = alts->endpoint[i].extra; buflen = alts->endpoint[i].extralen; break; } } } /* Skip the standard interface descriptors. */ while (buflen > 2 && buffer[1] != USB_DT_CS_INTERFACE) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen <= 2) { uvc_dbg(dev, DESCR, "no class-specific streaming interface descriptors found\n"); goto error; } /* Parse the header descriptor. */ switch (buffer[2]) { case UVC_VS_OUTPUT_HEADER: streaming->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; size = 9; break; case UVC_VS_INPUT_HEADER: streaming->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; size = 13; break; default: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d HEADER descriptor not found\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } p = buflen >= 4 ? buffer[3] : 0; n = buflen >= size ? buffer[size-1] : 0; if (buflen < size + p*n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d HEADER descriptor is invalid\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } streaming->header.bNumFormats = p; streaming->header.bEndpointAddress = buffer[6]; if (buffer[2] == UVC_VS_INPUT_HEADER) { streaming->header.bmInfo = buffer[7]; streaming->header.bTerminalLink = buffer[8]; streaming->header.bStillCaptureMethod = buffer[9]; streaming->header.bTriggerSupport = buffer[10]; streaming->header.bTriggerUsage = buffer[11]; } else { streaming->header.bTerminalLink = buffer[7]; } streaming->header.bControlSize = n; streaming->header.bmaControls = kmemdup(&buffer[size], p * n, GFP_KERNEL); if (streaming->header.bmaControls == NULL) { ret = -ENOMEM; goto error; } buflen -= buffer[0]; buffer += buffer[0]; _buffer = buffer; _buflen = buflen; /* Count the format and frame descriptors. */ while (_buflen > 2 && _buffer[1] == USB_DT_CS_INTERFACE) { switch (_buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_MJPEG: case UVC_VS_FORMAT_FRAME_BASED: nformats++; break; case UVC_VS_FORMAT_DV: /* * DV format has no frame descriptor. We will create a * dummy frame descriptor with a dummy frame interval. */ nformats++; nframes++; nintervals++; break; case UVC_VS_FORMAT_MPEG2TS: case UVC_VS_FORMAT_STREAM_BASED: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT %u is not supported\n", dev->udev->devnum, alts->desc.bInterfaceNumber, _buffer[2]); break; case UVC_VS_FRAME_UNCOMPRESSED: case UVC_VS_FRAME_MJPEG: nframes++; if (_buflen > 25) nintervals += _buffer[25] ? _buffer[25] : 3; break; case UVC_VS_FRAME_FRAME_BASED: nframes++; if (_buflen > 21) nintervals += _buffer[21] ? _buffer[21] : 3; break; } _buflen -= _buffer[0]; _buffer += _buffer[0]; } if (nformats == 0) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d has no supported formats defined\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } /* * Allocate memory for the formats, the frames and the intervals, * plus any required padding to guarantee that everything has the * correct alignment. */ size = nformats * sizeof(*format); size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame); size = ALIGN(size, __alignof__(*interval)) + nintervals * sizeof(*interval); format = kzalloc(size, GFP_KERNEL); if (!format) { ret = -ENOMEM; goto error; } frame = (void *)format + nformats * sizeof(*format); frame = PTR_ALIGN(frame, __alignof__(*frame)); interval = (void *)frame + nframes * sizeof(*frame); interval = PTR_ALIGN(interval, __alignof__(*interval)); streaming->formats = format; streaming->nformats = 0; /* Parse the format descriptors. */ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) { switch (buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_MJPEG: case UVC_VS_FORMAT_DV: case UVC_VS_FORMAT_FRAME_BASED: ret = uvc_parse_format(dev, streaming, format, frame, &interval, buffer, buflen); if (ret < 0) goto error; if (!ret) break; streaming->nformats++; frame += format->nframes; format++; buflen -= ret; buffer += ret; continue; default: break; } buflen -= buffer[0]; buffer += buffer[0]; } if (buflen) uvc_dbg(dev, DESCR, "device %d videostreaming interface %d has %u bytes of trailing descriptor garbage\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buflen); /* Parse the alternate settings to find the maximum bandwidth. */ for (i = 0; i < intf->num_altsetting; ++i) { struct usb_host_endpoint *ep; alts = &intf->altsetting[i]; ep = uvc_find_endpoint(alts, streaming->header.bEndpointAddress); if (ep == NULL) continue; psize = uvc_endpoint_max_bpi(dev->udev, ep); if (psize > streaming->maxpsize) streaming->maxpsize = psize; } list_add_tail(&streaming->list, &dev->streams); return 0; error: usb_driver_release_interface(&uvc_driver, intf); uvc_stream_delete(streaming); return ret; } static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA; static const u8 uvc_gpio_guid[16] = UVC_GUID_EXT_GPIO_CONTROLLER; static const u8 uvc_media_transport_input_guid[16] = UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT; static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING; static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id, unsigned int num_pads, unsigned int extra_size) { struct uvc_entity *entity; unsigned int num_inputs; unsigned int size; unsigned int i; extra_size = roundup(extra_size, sizeof(*entity->pads)); if (num_pads) num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1; else num_inputs = 0; size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + num_inputs; entity = kzalloc(size, GFP_KERNEL); if (entity == NULL) return NULL; entity->id = id; entity->type = type; /* * Set the GUID for standard entity types. For extension units, the GUID * is initialized by the caller. */ switch (type) { case UVC_EXT_GPIO_UNIT: memcpy(entity->guid, uvc_gpio_guid, 16); break; case UVC_ITT_CAMERA: memcpy(entity->guid, uvc_camera_guid, 16); break; case UVC_ITT_MEDIA_TRANSPORT_INPUT: memcpy(entity->guid, uvc_media_transport_input_guid, 16); break; case UVC_VC_PROCESSING_UNIT: memcpy(entity->guid, uvc_processing_guid, 16); break; } entity->num_links = 0; entity->num_pads = num_pads; entity->pads = ((void *)(entity + 1)) + extra_size; for (i = 0; i < num_inputs; ++i) entity->pads[i].flags = MEDIA_PAD_FL_SINK; if (!UVC_ENTITY_IS_OTERM(entity) && num_pads) entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE; entity->bNrInPins = num_inputs; entity->baSourceID = (u8 *)(&entity->pads[num_pads]); return entity; } static void uvc_entity_set_name(struct uvc_device *dev, struct uvc_entity *entity, const char *type_name, u8 string_id) { int ret; /* * First attempt to read the entity name from the device. If the entity * has no associated string, or if reading the string fails (most * likely due to a buggy firmware), fall back to default names based on * the entity type. */ if (string_id) { ret = usb_string(dev->udev, string_id, entity->name, sizeof(entity->name)); if (!ret) return; } sprintf(entity->name, "%s %u", type_name, entity->id); } /* Parse vendor-specific extensions. */ static int uvc_parse_vendor_control(struct uvc_device *dev, const unsigned char *buffer, int buflen) { struct usb_device *udev = dev->udev; struct usb_host_interface *alts = dev->intf->cur_altsetting; struct uvc_entity *unit; unsigned int n, p; int handled = 0; switch (le16_to_cpu(dev->udev->descriptor.idVendor)) { case 0x046d: /* Logitech */ if (buffer[1] != 0x41 || buffer[2] != 0x01) break; /* * Logitech implements several vendor specific functions * through vendor specific extension units (LXU). * * The LXU descriptors are similar to XU descriptors * (see "USB Device Video Class for Video Devices", section * 3.7.2.6 "Extension Unit Descriptor") with the following * differences: * * ---------------------------------------------------------- * 0 bLength 1 Number * Size of this descriptor, in bytes: 24+p+n*2 * ---------------------------------------------------------- * 23+p+n bmControlsType N Bitmap * Individual bits in the set are defined: * 0: Absolute * 1: Relative * * This bitset is mapped exactly the same as bmControls. * ---------------------------------------------------------- * 23+p+n*2 bReserved 1 Boolean * ---------------------------------------------------------- * 24+p+n*2 iExtension 1 Index * Index of a string descriptor that describes this * extension unit. * ---------------------------------------------------------- */ p = buflen >= 22 ? buffer[21] : 0; n = buflen >= 25 + p ? buffer[22+p] : 0; if (buflen < 25 + p + 2*n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d EXTENSION_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); break; } unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3], p + 1, 2*n); if (unit == NULL) return -ENOMEM; memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; memcpy(unit->baSourceID, &buffer[22], p); unit->extension.bControlSize = buffer[22+p]; unit->extension.bmControls = (u8 *)unit + sizeof(*unit); unit->extension.bmControlsType = (u8 *)unit + sizeof(*unit) + n; memcpy(unit->extension.bmControls, &buffer[23+p], 2*n); uvc_entity_set_name(dev, unit, "Extension", buffer[24+p+2*n]); list_add_tail(&unit->list, &dev->entities); handled = 1; break; } return handled; } static int uvc_parse_standard_control(struct uvc_device *dev, const unsigned char *buffer, int buflen) { struct usb_device *udev = dev->udev; struct uvc_entity *unit, *term; struct usb_interface *intf; struct usb_host_interface *alts = dev->intf->cur_altsetting; unsigned int i, n, p, len; const char *type_name; u16 type; switch (buffer[2]) { case UVC_VC_HEADER: n = buflen >= 12 ? buffer[11] : 0; if (buflen < 12 + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d HEADER error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } dev->uvc_version = get_unaligned_le16(&buffer[3]); dev->clock_frequency = get_unaligned_le32(&buffer[7]); /* Parse all USB Video Streaming interfaces. */ for (i = 0; i < n; ++i) { intf = usb_ifnum_to_if(udev, buffer[12+i]); if (intf == NULL) { uvc_dbg(dev, DESCR, "device %d interface %d doesn't exists\n", udev->devnum, i); continue; } uvc_parse_streaming(dev, intf); } break; case UVC_VC_INPUT_TERMINAL: if (buflen < 8) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* * Reject invalid terminal types that would cause issues: * * - The high byte must be non-zero, otherwise it would be * confused with a unit. * * - Bit 15 must be 0, as we use it internally as a terminal * direction flag. * * Other unknown types are accepted. */ type = get_unaligned_le16(&buffer[4]); if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL %d has invalid type 0x%04x, skipping\n", udev->devnum, alts->desc.bInterfaceNumber, buffer[3], type); return 0; } n = 0; p = 0; len = 8; if (type == UVC_ITT_CAMERA) { n = buflen >= 15 ? buffer[14] : 0; len = 15; } else if (type == UVC_ITT_MEDIA_TRANSPORT_INPUT) { n = buflen >= 9 ? buffer[8] : 0; p = buflen >= 10 + n ? buffer[9+n] : 0; len = 10; } if (buflen < len + n + p) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3], 1, n + p); if (term == NULL) return -ENOMEM; if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) { term->camera.bControlSize = n; term->camera.bmControls = (u8 *)term + sizeof(*term); term->camera.wObjectiveFocalLengthMin = get_unaligned_le16(&buffer[8]); term->camera.wObjectiveFocalLengthMax = get_unaligned_le16(&buffer[10]); term->camera.wOcularFocalLength = get_unaligned_le16(&buffer[12]); memcpy(term->camera.bmControls, &buffer[15], n); } else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) { term->media.bControlSize = n; term->media.bmControls = (u8 *)term + sizeof(*term); term->media.bTransportModeSize = p; term->media.bmTransportModes = (u8 *)term + sizeof(*term) + n; memcpy(term->media.bmControls, &buffer[9], n); memcpy(term->media.bmTransportModes, &buffer[10+n], p); } if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) type_name = "Camera"; else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) type_name = "Media"; else type_name = "Input"; uvc_entity_set_name(dev, term, type_name, buffer[7]); list_add_tail(&term->list, &dev->entities); break; case UVC_VC_OUTPUT_TERMINAL: if (buflen < 9) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d OUTPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* * Make sure the terminal type MSB is not null, otherwise it * could be confused with a unit. */ type = get_unaligned_le16(&buffer[4]); if ((type & 0xff00) == 0) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d OUTPUT_TERMINAL %d has invalid type 0x%04x, skipping\n", udev->devnum, alts->desc.bInterfaceNumber, buffer[3], type); return 0; } term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3], 1, 0); if (term == NULL) return -ENOMEM; memcpy(term->baSourceID, &buffer[7], 1); uvc_entity_set_name(dev, term, "Output", buffer[8]); list_add_tail(&term->list, &dev->entities); break; case UVC_VC_SELECTOR_UNIT: p = buflen >= 5 ? buffer[4] : 0; if (buflen < 5 || buflen < 6 + p) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d SELECTOR_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0); if (unit == NULL) return -ENOMEM; memcpy(unit->baSourceID, &buffer[5], p); uvc_entity_set_name(dev, unit, "Selector", buffer[5+p]); list_add_tail(&unit->list, &dev->entities); break; case UVC_VC_PROCESSING_UNIT: n = buflen >= 8 ? buffer[7] : 0; p = dev->uvc_version >= 0x0110 ? 10 : 9; if (buflen < p + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d PROCESSING_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n); if (unit == NULL) return -ENOMEM; memcpy(unit->baSourceID, &buffer[4], 1); unit->processing.wMaxMultiplier = get_unaligned_le16(&buffer[5]); unit->processing.bControlSize = buffer[7]; unit->processing.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->processing.bmControls, &buffer[8], n); if (dev->uvc_version >= 0x0110) unit->processing.bmVideoStandards = buffer[9+n]; uvc_entity_set_name(dev, unit, "Processing", buffer[8+n]); list_add_tail(&unit->list, &dev->entities); break; case UVC_VC_EXTENSION_UNIT: p = buflen >= 22 ? buffer[21] : 0; n = buflen >= 24 + p ? buffer[22+p] : 0; if (buflen < 24 + p + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d EXTENSION_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n); if (unit == NULL) return -ENOMEM; memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; memcpy(unit->baSourceID, &buffer[22], p); unit->extension.bControlSize = buffer[22+p]; unit->extension.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->extension.bmControls, &buffer[23+p], n); uvc_entity_set_name(dev, unit, "Extension", buffer[23+p+n]); list_add_tail(&unit->list, &dev->entities); break; default: uvc_dbg(dev, DESCR, "Found an unknown CS_INTERFACE descriptor (%u)\n", buffer[2]); break; } return 0; } static int uvc_parse_control(struct uvc_device *dev) { struct usb_host_interface *alts = dev->intf->cur_altsetting; const unsigned char *buffer = alts->extra; int buflen = alts->extralen; int ret; /* * Parse the default alternate setting only, as the UVC specification * defines a single alternate setting, the default alternate setting * zero. */ while (buflen > 2) { if (uvc_parse_vendor_control(dev, buffer, buflen) || buffer[1] != USB_DT_CS_INTERFACE) goto next_descriptor; ret = uvc_parse_standard_control(dev, buffer, buflen); if (ret < 0) return ret; next_descriptor: buflen -= buffer[0]; buffer += buffer[0]; } /* * Check if the optional status endpoint is present. Built-in iSight * webcams have an interrupt endpoint but spit proprietary data that * don't conform to the UVC status endpoint messages. Don't try to * handle the interrupt endpoint for those cameras. */ if (alts->desc.bNumEndpoints == 1 && !(dev->quirks & UVC_QUIRK_BUILTIN_ISIGHT)) { struct usb_host_endpoint *ep = &alts->endpoint[0]; struct usb_endpoint_descriptor *desc = &ep->desc; if (usb_endpoint_is_int_in(desc) && le16_to_cpu(desc->wMaxPacketSize) >= 8 && desc->bInterval != 0) { uvc_dbg(dev, DESCR, "Found a Status endpoint (addr %02x)\n", desc->bEndpointAddress); dev->int_ep = ep; } } return 0; } /* ----------------------------------------------------------------------------- * Privacy GPIO */ static void uvc_gpio_event(struct uvc_device *dev) { struct uvc_entity *unit = dev->gpio_unit; struct uvc_video_chain *chain; u8 new_val; if (!unit) return; new_val = gpiod_get_value_cansleep(unit->gpio.gpio_privacy); /* GPIO entities are always on the first chain. */ chain = list_first_entry(&dev->chains, struct uvc_video_chain, list); uvc_ctrl_status_event(chain, unit->controls, &new_val); } static int uvc_gpio_get_cur(struct uvc_device *dev, struct uvc_entity *entity, u8 cs, void *data, u16 size) { if (cs != UVC_CT_PRIVACY_CONTROL || size < 1) return -EINVAL; *(u8 *)data = gpiod_get_value_cansleep(entity->gpio.gpio_privacy); return 0; } static int uvc_gpio_get_info(struct uvc_device *dev, struct uvc_entity *entity, u8 cs, u8 *caps) { if (cs != UVC_CT_PRIVACY_CONTROL) return -EINVAL; *caps = UVC_CONTROL_CAP_GET | UVC_CONTROL_CAP_AUTOUPDATE; return 0; } static irqreturn_t uvc_gpio_irq(int irq, void *data) { struct uvc_device *dev = data; uvc_gpio_event(dev); return IRQ_HANDLED; } static int uvc_gpio_parse(struct uvc_device *dev) { struct uvc_entity *unit; struct gpio_desc *gpio_privacy; int irq; gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy", GPIOD_IN); if (IS_ERR_OR_NULL(gpio_privacy)) return PTR_ERR_OR_ZERO(gpio_privacy); irq = gpiod_to_irq(gpio_privacy); if (irq < 0) return dev_err_probe(&dev->intf->dev, irq, "No IRQ for privacy GPIO\n"); unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1); if (!unit) return -ENOMEM; unit->gpio.gpio_privacy = gpio_privacy; unit->gpio.irq = irq; unit->gpio.bControlSize = 1; unit->gpio.bmControls = (u8 *)unit + sizeof(*unit); unit->gpio.bmControls[0] = 1; unit->get_cur = uvc_gpio_get_cur; unit->get_info = uvc_gpio_get_info; strscpy(unit->name, "GPIO", sizeof(unit->name)); list_add_tail(&unit->list, &dev->entities); dev->gpio_unit = unit; return 0; } static int uvc_gpio_init_irq(struct uvc_device *dev) { struct uvc_entity *unit = dev->gpio_unit; int ret; if (!unit || unit->gpio.irq < 0) return 0; ret = request_threaded_irq(unit->gpio.irq, NULL, uvc_gpio_irq, IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "uvc_privacy_gpio", dev); unit->gpio.initialized = !ret; return ret; } static void uvc_gpio_deinit(struct uvc_device *dev) { if (!dev->gpio_unit || !dev->gpio_unit->gpio.initialized) return; free_irq(dev->gpio_unit->gpio.irq, dev); } /* ------------------------------------------------------------------------ * UVC device scan */ /* * Scan the UVC descriptors to locate a chain starting at an Output Terminal * and containing the following units: * * - one or more Output Terminals (USB Streaming or Display) * - zero or one Processing Unit * - zero, one or more single-input Selector Units * - zero or one multiple-input Selector Units, provided all inputs are * connected to input terminals * - zero, one or mode single-input Extension Units * - one or more Input Terminals (Camera, External or USB Streaming) * * The terminal and units must match on of the following structures: * * ITT_*(0) -> +---------+ +---------+ +---------+ -> TT_STREAMING(0) * ... | SU{0,1} | -> | PU{0,1} | -> | XU{0,n} | ... * ITT_*(n) -> +---------+ +---------+ +---------+ -> TT_STREAMING(n) * * +---------+ +---------+ -> OTT_*(0) * TT_STREAMING -> | PU{0,1} | -> | XU{0,n} | ... * +---------+ +---------+ -> OTT_*(n) * * The Processing Unit and Extension Units can be in any order. Additional * Extension Units connected to the main chain as single-unit branches are * also supported. Single-input Selector Units are ignored. */ static int uvc_scan_chain_entity(struct uvc_video_chain *chain, struct uvc_entity *entity) { switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_EXTENSION_UNIT: uvc_dbg_cont(PROBE, " <- XU %d", entity->id); if (entity->bNrInPins != 1) { uvc_dbg(chain->dev, DESCR, "Extension unit %d has more than 1 input pin\n", entity->id); return -1; } break; case UVC_VC_PROCESSING_UNIT: uvc_dbg_cont(PROBE, " <- PU %d", entity->id); if (chain->processing != NULL) { uvc_dbg(chain->dev, DESCR, "Found multiple Processing Units in chain\n"); return -1; } chain->processing = entity; break; case UVC_VC_SELECTOR_UNIT: uvc_dbg_cont(PROBE, " <- SU %d", entity->id); /* Single-input selector units are ignored. */ if (entity->bNrInPins == 1) break; if (chain->selector != NULL) { uvc_dbg(chain->dev, DESCR, "Found multiple Selector Units in chain\n"); return -1; } chain->selector = entity; break; case UVC_ITT_VENDOR_SPECIFIC: case UVC_ITT_CAMERA: case UVC_ITT_MEDIA_TRANSPORT_INPUT: uvc_dbg_cont(PROBE, " <- IT %d\n", entity->id); break; case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: uvc_dbg_cont(PROBE, " OT %d", entity->id); break; case UVC_TT_STREAMING: if (UVC_ENTITY_IS_ITERM(entity)) uvc_dbg_cont(PROBE, " <- IT %d\n", entity->id); else uvc_dbg_cont(PROBE, " OT %d", entity->id); break; default: uvc_dbg(chain->dev, DESCR, "Unsupported entity type 0x%04x found in chain\n", UVC_ENTITY_TYPE(entity)); return -1; } list_add_tail(&entity->chain, &chain->entities); return 0; } static int uvc_scan_chain_forward(struct uvc_video_chain *chain, struct uvc_entity *entity, struct uvc_entity *prev) { struct uvc_entity *forward; int found; /* Forward scan */ forward = NULL; found = 0; while (1) { forward = uvc_entity_by_reference(chain->dev, entity->id, forward); if (forward == NULL) break; if (forward == prev) continue; if (forward->chain.next || forward->chain.prev) { uvc_dbg(chain->dev, DESCR, "Found reference to entity %d already in chain\n", forward->id); return -EINVAL; } switch (UVC_ENTITY_TYPE(forward)) { case UVC_VC_EXTENSION_UNIT: if (forward->bNrInPins != 1) { uvc_dbg(chain->dev, DESCR, "Extension unit %d has more than 1 input pin\n", forward->id); return -EINVAL; } /* * Some devices reference an output terminal as the * source of extension units. This is incorrect, as * output terminals only have an input pin, and thus * can't be connected to any entity in the forward * direction. The resulting topology would cause issues * when registering the media controller graph. To * avoid this problem, connect the extension unit to * the source of the output terminal instead. */ if (UVC_ENTITY_IS_OTERM(entity)) { struct uvc_entity *source; source = uvc_entity_by_id(chain->dev, entity->baSourceID[0]); if (!source) { uvc_dbg(chain->dev, DESCR, "Can't connect extension unit %u in chain\n", forward->id); break; } forward->baSourceID[0] = source->id; } list_add_tail(&forward->chain, &chain->entities); if (!found) uvc_dbg_cont(PROBE, " (->"); uvc_dbg_cont(PROBE, " XU %d", forward->id); found = 1; break; case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: case UVC_TT_STREAMING: if (UVC_ENTITY_IS_ITERM(forward)) { uvc_dbg(chain->dev, DESCR, "Unsupported input terminal %u\n", forward->id); return -EINVAL; } if (UVC_ENTITY_IS_OTERM(entity)) { uvc_dbg(chain->dev, DESCR, "Unsupported connection between output terminals %u and %u\n", entity->id, forward->id); break; } list_add_tail(&forward->chain, &chain->entities); if (!found) uvc_dbg_cont(PROBE, " (->"); uvc_dbg_cont(PROBE, " OT %d", forward->id); found = 1; break; } } if (found) uvc_dbg_cont(PROBE, ")"); return 0; } static int uvc_scan_chain_backward(struct uvc_video_chain *chain, struct uvc_entity **_entity) { struct uvc_entity *entity = *_entity; struct uvc_entity *term; int id = -EINVAL, i; switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_EXTENSION_UNIT: case UVC_VC_PROCESSING_UNIT: id = entity->baSourceID[0]; break; case UVC_VC_SELECTOR_UNIT: /* Single-input selector units are ignored. */ if (entity->bNrInPins == 1) { id = entity->baSourceID[0]; break; } uvc_dbg_cont(PROBE, " <- IT"); chain->selector = entity; for (i = 0; i < entity->bNrInPins; ++i) { id = entity->baSourceID[i]; term = uvc_entity_by_id(chain->dev, id); if (term == NULL || !UVC_ENTITY_IS_ITERM(term)) { uvc_dbg(chain->dev, DESCR, "Selector unit %d input %d isn't connected to an input terminal\n", entity->id, i); return -1; } if (term->chain.next || term->chain.prev) { uvc_dbg(chain->dev, DESCR, "Found reference to entity %d already in chain\n", term->id); return -EINVAL; } uvc_dbg_cont(PROBE, " %d", term->id); list_add_tail(&term->chain, &chain->entities); uvc_scan_chain_forward(chain, term, entity); } uvc_dbg_cont(PROBE, "\n"); id = 0; break; case UVC_ITT_VENDOR_SPECIFIC: case UVC_ITT_CAMERA: case UVC_ITT_MEDIA_TRANSPORT_INPUT: case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: case UVC_TT_STREAMING: id = UVC_ENTITY_IS_OTERM(entity) ? entity->baSourceID[0] : 0; break; } if (id <= 0) { *_entity = NULL; return id; } entity = uvc_entity_by_id(chain->dev, id); if (entity == NULL) { uvc_dbg(chain->dev, DESCR, "Found reference to unknown entity %d\n", id); return -EINVAL; } *_entity = entity; return 0; } static int uvc_scan_chain(struct uvc_video_chain *chain, struct uvc_entity *term) { struct uvc_entity *entity, *prev; uvc_dbg(chain->dev, PROBE, "Scanning UVC chain:"); entity = term; prev = NULL; while (entity != NULL) { /* Entity must not be part of an existing chain */ if (entity->chain.next || entity->chain.prev) { uvc_dbg(chain->dev, DESCR, "Found reference to entity %d already in chain\n", entity->id); return -EINVAL; } /* Process entity */ if (uvc_scan_chain_entity(chain, entity) < 0) return -EINVAL; /* Forward scan */ if (uvc_scan_chain_forward(chain, entity, prev) < 0) return -EINVAL; /* Backward scan */ prev = entity; if (uvc_scan_chain_backward(chain, &entity) < 0) return -EINVAL; } return 0; } static unsigned int uvc_print_terms(struct list_head *terms, u16 dir, char *buffer) { struct uvc_entity *term; unsigned int nterms = 0; char *p = buffer; list_for_each_entry(term, terms, chain) { if (!UVC_ENTITY_IS_TERM(term) || UVC_TERM_DIRECTION(term) != dir) continue; if (nterms) p += sprintf(p, ","); if (++nterms >= 4) { p += sprintf(p, "..."); break; } p += sprintf(p, "%u", term->id); } return p - buffer; } static const char *uvc_print_chain(struct uvc_video_chain *chain) { static char buffer[43]; char *p = buffer; p += uvc_print_terms(&chain->entities, UVC_TERM_INPUT, p); p += sprintf(p, " -> "); uvc_print_terms(&chain->entities, UVC_TERM_OUTPUT, p); return buffer; } static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) { struct uvc_video_chain *chain; chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) return NULL; INIT_LIST_HEAD(&chain->entities); mutex_init(&chain->ctrl_mutex); chain->dev = dev; v4l2_prio_init(&chain->prio); return chain; } /* * Fallback heuristic for devices that don't connect units and terminals in a * valid chain. * * Some devices have invalid baSourceID references, causing uvc_scan_chain() * to fail, but if we just take the entities we can find and put them together * in the most sensible chain we can think of, turns out they do work anyway. * Note: This heuristic assumes there is a single chain. * * At the time of writing, devices known to have such a broken chain are * - Acer Integrated Camera (5986:055a) * - Realtek rtl157a7 (0bda:57a7) */ static int uvc_scan_fallback(struct uvc_device *dev) { struct uvc_video_chain *chain; struct uvc_entity *iterm = NULL; struct uvc_entity *oterm = NULL; struct uvc_entity *entity; struct uvc_entity *prev; /* * Start by locating the input and output terminals. We only support * devices with exactly one of each for now. */ list_for_each_entry(entity, &dev->entities, list) { if (UVC_ENTITY_IS_ITERM(entity)) { if (iterm) return -EINVAL; iterm = entity; } if (UVC_ENTITY_IS_OTERM(entity)) { if (oterm) return -EINVAL; oterm = entity; } } if (iterm == NULL || oterm == NULL) return -EINVAL; /* Allocate the chain and fill it. */ chain = uvc_alloc_chain(dev); if (chain == NULL) return -ENOMEM; if (uvc_scan_chain_entity(chain, oterm) < 0) goto error; prev = oterm; /* * Add all Processing and Extension Units with two pads. The order * doesn't matter much, use reverse list traversal to connect units in * UVC descriptor order as we build the chain from output to input. This * leads to units appearing in the order meant by the manufacturer for * the cameras known to require this heuristic. */ list_for_each_entry_reverse(entity, &dev->entities, list) { if (entity->type != UVC_VC_PROCESSING_UNIT && entity->type != UVC_VC_EXTENSION_UNIT) continue; if (entity->num_pads != 2) continue; if (uvc_scan_chain_entity(chain, entity) < 0) goto error; prev->baSourceID[0] = entity->id; prev = entity; } if (uvc_scan_chain_entity(chain, iterm) < 0) goto error; prev->baSourceID[0] = iterm->id; list_add_tail(&chain->list, &dev->chains); uvc_dbg(dev, PROBE, "Found a video chain by fallback heuristic (%s)\n", uvc_print_chain(chain)); return 0; error: kfree(chain); return -EINVAL; } /* * Scan the device for video chains and register video devices. * * Chains are scanned starting at their output terminals and walked backwards. */ static int uvc_scan_device(struct uvc_device *dev) { struct uvc_video_chain *chain; struct uvc_entity *term; list_for_each_entry(term, &dev->entities, list) { if (!UVC_ENTITY_IS_OTERM(term)) continue; /* * If the terminal is already included in a chain, skip it. * This can happen for chains that have multiple output * terminals, where all output terminals beside the first one * will be inserted in the chain in forward scans. */ if (term->chain.next || term->chain.prev) continue; chain = uvc_alloc_chain(dev); if (chain == NULL) return -ENOMEM; term->flags |= UVC_ENTITY_FLAG_DEFAULT; if (uvc_scan_chain(chain, term) < 0) { kfree(chain); continue; } uvc_dbg(dev, PROBE, "Found a valid video chain (%s)\n", uvc_print_chain(chain)); list_add_tail(&chain->list, &dev->chains); } if (list_empty(&dev->chains)) uvc_scan_fallback(dev); if (list_empty(&dev->chains)) { dev_info(&dev->udev->dev, "No valid video chain found.\n"); return -1; } /* Add GPIO entity to the first chain. */ if (dev->gpio_unit) { chain = list_first_entry(&dev->chains, struct uvc_video_chain, list); list_add_tail(&dev->gpio_unit->chain, &chain->entities); } return 0; } /* ------------------------------------------------------------------------ * Video device registration and unregistration */ /* * Delete the UVC device. * * Called by the kernel when the last reference to the uvc_device structure * is released. * * As this function is called after or during disconnect(), all URBs have * already been cancelled by the USB core. There is no need to kill the * interrupt URB manually. */ static void uvc_delete(struct kref *kref) { struct uvc_device *dev = container_of(kref, struct uvc_device, ref); struct list_head *p, *n; uvc_status_cleanup(dev); uvc_ctrl_cleanup_device(dev); usb_put_intf(dev->intf); usb_put_dev(dev->udev); #ifdef CONFIG_MEDIA_CONTROLLER media_device_cleanup(&dev->mdev); #endif list_for_each_safe(p, n, &dev->chains) { struct uvc_video_chain *chain; chain = list_entry(p, struct uvc_video_chain, list); kfree(chain); } list_for_each_safe(p, n, &dev->entities) { struct uvc_entity *entity; entity = list_entry(p, struct uvc_entity, list); #ifdef CONFIG_MEDIA_CONTROLLER uvc_mc_cleanup_entity(entity); #endif kfree(entity); } list_for_each_safe(p, n, &dev->streams) { struct uvc_streaming *streaming; streaming = list_entry(p, struct uvc_streaming, list); usb_driver_release_interface(&uvc_driver, streaming->intf); uvc_stream_delete(streaming); } kfree(dev); } static void uvc_release(struct video_device *vdev) { struct uvc_streaming *stream = video_get_drvdata(vdev); struct uvc_device *dev = stream->dev; kref_put(&dev->ref, uvc_delete); } /* * Unregister the video devices. */ static void uvc_unregister_video(struct uvc_device *dev) { struct uvc_streaming *stream; uvc_gpio_deinit(dev); list_for_each_entry(stream, &dev->streams, list) { /* Nothing to do here, continue. */ if (!video_is_registered(&stream->vdev)) continue; /* * For stream->vdev we follow the same logic as: * vb2_video_unregister_device(). */ /* 1. Take a reference to vdev */ get_device(&stream->vdev.dev); /* 2. Ensure that no new ioctls can be called. */ video_unregister_device(&stream->vdev); /* 3. Wait for old ioctls to finish. */ mutex_lock(&stream->mutex); /* 4. Stop streaming. */ uvc_queue_release(&stream->queue); mutex_unlock(&stream->mutex); put_device(&stream->vdev.dev); /* * For stream->meta.vdev we can directly call: * vb2_video_unregister_device(). */ vb2_video_unregister_device(&stream->meta.vdev); /* * Now both vdevs are not streaming and all the ioctls will * return -ENODEV. */ uvc_debugfs_cleanup_stream(stream); } uvc_status_unregister(dev); if (dev->vdev.dev) v4l2_device_unregister(&dev->vdev); #ifdef CONFIG_MEDIA_CONTROLLER if (media_devnode_is_registered(dev->mdev.devnode)) media_device_unregister(&dev->mdev); #endif } int uvc_register_video_device(struct uvc_device *dev, struct uvc_streaming *stream, struct video_device *vdev, struct uvc_video_queue *queue, enum v4l2_buf_type type, const struct v4l2_file_operations *fops, const struct v4l2_ioctl_ops *ioctl_ops) { int ret; /* Initialize the video buffers queue. */ ret = uvc_queue_init(queue, type); if (ret) return ret; /* Register the device with V4L. */ /* * We already hold a reference to dev->udev. The video device will be * unregistered before the reference is released, so we don't need to * get another one. */ vdev->v4l2_dev = &dev->vdev; vdev->fops = fops; vdev->ioctl_ops = ioctl_ops; vdev->release = uvc_release; vdev->prio = &stream->chain->prio; if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT) vdev->vfl_dir = VFL_DIR_TX; else vdev->vfl_dir = VFL_DIR_RX; switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: default: vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; break; case V4L2_BUF_TYPE_META_CAPTURE: vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING; break; } strscpy(vdev->name, dev->name, sizeof(vdev->name)); /* * Set the driver data before calling video_register_device, otherwise * the file open() handler might race us. */ video_set_drvdata(vdev, stream); ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(&stream->intf->dev, "Failed to register %s device (%d).\n", v4l2_type_names[type], ret); return ret; } kref_get(&dev->ref); return 0; } static int uvc_register_video(struct uvc_device *dev, struct uvc_streaming *stream) { int ret; /* Initialize the streaming interface with default parameters. */ ret = uvc_video_init(stream); if (ret < 0) { dev_err(&stream->intf->dev, "Failed to initialize the device (%d).\n", ret); return ret; } if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) stream->chain->caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE; else stream->chain->caps |= V4L2_CAP_VIDEO_OUTPUT; uvc_debugfs_init_stream(stream); /* Register the device with V4L. */ return uvc_register_video_device(dev, stream, &stream->vdev, &stream->queue, stream->type, &uvc_fops, &uvc_ioctl_ops); } /* * Register all video devices in all chains. */ static int uvc_register_terms(struct uvc_device *dev, struct uvc_video_chain *chain) { struct uvc_streaming *stream; struct uvc_entity *term; int ret; list_for_each_entry(term, &chain->entities, chain) { if (UVC_ENTITY_TYPE(term) != UVC_TT_STREAMING) continue; stream = uvc_stream_by_id(dev, term->id); if (stream == NULL) { dev_info(&dev->udev->dev, "No streaming interface found for terminal %u.", term->id); continue; } stream->chain = chain; ret = uvc_register_video(dev, stream); if (ret < 0) return ret; /* * Register a metadata node, but ignore a possible failure, * complete registration of video nodes anyway. */ uvc_meta_register(stream); term->vdev = &stream->vdev; } return 0; } static int uvc_register_chains(struct uvc_device *dev) { struct uvc_video_chain *chain; int ret; list_for_each_entry(chain, &dev->chains, list) { ret = uvc_register_terms(dev, chain); if (ret < 0) return ret; #ifdef CONFIG_MEDIA_CONTROLLER ret = uvc_mc_register_entities(chain); if (ret < 0) dev_info(&dev->udev->dev, "Failed to register entities (%d).\n", ret); #endif } return 0; } /* ------------------------------------------------------------------------ * USB probe, disconnect, suspend and resume */ static const struct uvc_device_info uvc_quirk_none = { 0 }; static int uvc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct uvc_device *dev; const struct uvc_device_info *info = (const struct uvc_device_info *)id->driver_info; int function; int ret; /* Allocate memory for the device and initialize it. */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; INIT_LIST_HEAD(&dev->entities); INIT_LIST_HEAD(&dev->chains); INIT_LIST_HEAD(&dev->streams); kref_init(&dev->ref); atomic_set(&dev->nmappings, 0); dev->udev = usb_get_dev(udev); dev->intf = usb_get_intf(intf); dev->intfnum = intf->cur_altsetting->desc.bInterfaceNumber; dev->info = info ? info : &uvc_quirk_none; dev->quirks = uvc_quirks_param == -1 ? dev->info->quirks : uvc_quirks_param; if (id->idVendor && id->idProduct) uvc_dbg(dev, PROBE, "Probing known UVC device %s (%04x:%04x)\n", udev->devpath, id->idVendor, id->idProduct); else uvc_dbg(dev, PROBE, "Probing generic UVC device %s\n", udev->devpath); if (udev->product != NULL) strscpy(dev->name, udev->product, sizeof(dev->name)); else snprintf(dev->name, sizeof(dev->name), "UVC Camera (%04x:%04x)", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); /* * Add iFunction or iInterface to names when available as additional * distinguishers between interfaces. iFunction is prioritized over * iInterface which matches Windows behavior at the point of writing. */ if (intf->intf_assoc && intf->intf_assoc->iFunction != 0) function = intf->intf_assoc->iFunction; else function = intf->cur_altsetting->desc.iInterface; if (function != 0) { size_t len; strlcat(dev->name, ": ", sizeof(dev->name)); len = strlen(dev->name); usb_string(udev, function, dev->name + len, sizeof(dev->name) - len); } /* Initialize the media device. */ #ifdef CONFIG_MEDIA_CONTROLLER dev->mdev.dev = &intf->dev; strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); if (udev->serial) strscpy(dev->mdev.serial, udev->serial, sizeof(dev->mdev.serial)); usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); media_device_init(&dev->mdev); dev->vdev.mdev = &dev->mdev; #endif /* Parse the Video Class control descriptor. */ if (uvc_parse_control(dev) < 0) { uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n"); goto error; } /* Parse the associated GPIOs. */ if (uvc_gpio_parse(dev) < 0) { uvc_dbg(dev, PROBE, "Unable to parse UVC GPIOs\n"); goto error; } dev_info(&dev->udev->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n", dev->uvc_version >> 8, dev->uvc_version & 0xff, udev->product ? udev->product : "<unnamed>", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); if (dev->quirks != dev->info->quirks) { dev_info(&dev->udev->dev, "Forcing device quirks to 0x%x by module parameter for testing purpose.\n", dev->quirks); dev_info(&dev->udev->dev, "Please report required quirks to the linux-media mailing list.\n"); } if (dev->info->uvc_version) { dev->uvc_version = dev->info->uvc_version; dev_info(&dev->udev->dev, "Forcing UVC version to %u.%02x\n", dev->uvc_version >> 8, dev->uvc_version & 0xff); } /* Register the V4L2 device. */ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) goto error; /* Scan the device for video chains. */ if (uvc_scan_device(dev) < 0) goto error; /* Initialize controls. */ if (uvc_ctrl_init_device(dev) < 0) goto error; /* Register video device nodes. */ if (uvc_register_chains(dev) < 0) goto error; #ifdef CONFIG_MEDIA_CONTROLLER /* Register the media device node */ if (media_device_register(&dev->mdev) < 0) goto error; #endif /* Save our data pointer in the interface data. */ usb_set_intfdata(intf, dev); /* Initialize the interrupt URB. */ ret = uvc_status_init(dev); if (ret < 0) { dev_info(&dev->udev->dev, "Unable to initialize the status endpoint (%d), status interrupt will not be supported.\n", ret); } ret = uvc_gpio_init_irq(dev); if (ret < 0) { dev_err(&dev->udev->dev, "Unable to request privacy GPIO IRQ (%d)\n", ret); goto error; } if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME) udev->quirks &= ~USB_QUIRK_RESET_RESUME; if (!(dev->quirks & UVC_QUIRK_DISABLE_AUTOSUSPEND)) usb_enable_autosuspend(udev); uvc_dbg(dev, PROBE, "UVC device initialized\n"); return 0; error: uvc_unregister_video(dev); kref_put(&dev->ref, uvc_delete); return -ENODEV; } static void uvc_disconnect(struct usb_interface *intf) { struct uvc_device *dev = usb_get_intfdata(intf); /* * Set the USB interface data to NULL. This can be done outside the * lock, as there's no other reader. */ usb_set_intfdata(intf, NULL); if (intf->cur_altsetting->desc.bInterfaceSubClass == UVC_SC_VIDEOSTREAMING) return; uvc_unregister_video(dev); kref_put(&dev->ref, uvc_delete); } static int uvc_suspend(struct usb_interface *intf, pm_message_t message) { struct uvc_device *dev = usb_get_intfdata(intf); struct uvc_streaming *stream; uvc_dbg(dev, SUSPEND, "Suspending interface %u\n", intf->cur_altsetting->desc.bInterfaceNumber); /* Controls are cached on the fly so they don't need to be saved. */ if (intf->cur_altsetting->desc.bInterfaceSubClass == UVC_SC_VIDEOCONTROL) { uvc_status_suspend(dev); return 0; } list_for_each_entry(stream, &dev->streams, list) { if (stream->intf == intf) return uvc_video_suspend(stream); } uvc_dbg(dev, SUSPEND, "Suspend: video streaming USB interface mismatch\n"); return -EINVAL; } static int __uvc_resume(struct usb_interface *intf, int reset) { struct uvc_device *dev = usb_get_intfdata(intf); struct uvc_streaming *stream; int ret = 0; uvc_dbg(dev, SUSPEND, "Resuming interface %u\n", intf->cur_altsetting->desc.bInterfaceNumber); if (intf->cur_altsetting->desc.bInterfaceSubClass == UVC_SC_VIDEOCONTROL) { if (reset) { ret = uvc_ctrl_restore_values(dev); if (ret < 0) return ret; } return uvc_status_resume(dev); } list_for_each_entry(stream, &dev->streams, list) { if (stream->intf == intf) { ret = uvc_video_resume(stream, reset); if (ret < 0) uvc_queue_streamoff(&stream->queue, stream->queue.queue.type); return ret; } } uvc_dbg(dev, SUSPEND, "Resume: video streaming USB interface mismatch\n"); return -EINVAL; } static int uvc_resume(struct usb_interface *intf) { return __uvc_resume(intf, 0); } static int uvc_reset_resume(struct usb_interface *intf) { return __uvc_resume(intf, 1); } /* ------------------------------------------------------------------------ * Module parameters */ static int uvc_clock_param_get(char *buffer, const struct kernel_param *kp) { if (uvc_clock_param == CLOCK_MONOTONIC) return sprintf(buffer, "CLOCK_MONOTONIC"); else return sprintf(buffer, "CLOCK_REALTIME"); } static int uvc_clock_param_set(const char *val, const struct kernel_param *kp) { if (strncasecmp(val, "clock_", strlen("clock_")) == 0) val += strlen("clock_"); if (strcasecmp(val, "monotonic") == 0) uvc_clock_param = CLOCK_MONOTONIC; else if (strcasecmp(val, "realtime") == 0) uvc_clock_param = CLOCK_REALTIME; else return -EINVAL; return 0; } module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get, &uvc_clock_param, 0644); MODULE_PARM_DESC(clock, "Video buffers timestamp clock"); module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, 0644); MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps"); static int param_set_nodrop(const char *val, const struct kernel_param *kp) { pr_warn_once("uvcvideo: " DEPRECATED "nodrop parameter will be eventually removed.\n"); return param_set_bool(val, kp); } static const struct kernel_param_ops param_ops_nodrop = { .set = param_set_nodrop, .get = param_get_uint, }; param_check_uint(nodrop, &uvc_no_drop_param); module_param_cb(nodrop, ¶m_ops_nodrop, &uvc_no_drop_param, 0644); __MODULE_PARM_TYPE(nodrop, "uint"); MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames"); module_param_named(quirks, uvc_quirks_param, uint, 0644); MODULE_PARM_DESC(quirks, "Forced device quirks"); module_param_named(trace, uvc_dbg_param, uint, 0644); MODULE_PARM_DESC(trace, "Trace level bitmask"); module_param_named(timeout, uvc_timeout_param, uint, 0644); MODULE_PARM_DESC(timeout, "Streaming control requests timeout"); /* ------------------------------------------------------------------------ * Driver initialization and cleanup */ static const struct uvc_device_info uvc_quirk_probe_minmax = { .quirks = UVC_QUIRK_PROBE_MINMAX, }; static const struct uvc_device_info uvc_quirk_fix_bandwidth = { .quirks = UVC_QUIRK_FIX_BANDWIDTH, }; static const struct uvc_device_info uvc_quirk_probe_def = { .quirks = UVC_QUIRK_PROBE_DEF, }; static const struct uvc_device_info uvc_quirk_stream_no_fid = { .quirks = UVC_QUIRK_STREAM_NO_FID, }; static const struct uvc_device_info uvc_quirk_force_y8 = { .quirks = UVC_QUIRK_FORCE_Y8, }; #define UVC_INFO_QUIRK(q) (kernel_ulong_t)&(struct uvc_device_info){.quirks = q} #define UVC_INFO_META(m) (kernel_ulong_t)&(struct uvc_device_info) \ {.meta_format = m} /* * The Logitech cameras listed below have their interface class set to * VENDOR_SPEC because they don't announce themselves as UVC devices, even * though they are compliant. * * Sort these by vendor/product ID. */ static const struct usb_device_id uvc_ids[] = { /* Quanta ACER HD User Facing */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0408, .idProduct = 0x4033, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = UVC_PC_PROTOCOL_15, .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){ .uvc_version = 0x010a, } }, /* Quanta ACER HD User Facing */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0408, .idProduct = 0x4035, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = UVC_PC_PROTOCOL_15, .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){ .uvc_version = 0x010a, } }, /* LogiLink Wireless Webcam */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0416, .idProduct = 0xa91a, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Genius eFace 2025 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0458, .idProduct = 0x706e, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Microsoft Lifecam NX-6000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x045e, .idProduct = 0x00f8, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Microsoft Lifecam NX-3000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x045e, .idProduct = 0x0721, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Microsoft Lifecam VX-7000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x045e, .idProduct = 0x0723, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Logitech, Webcam C910 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x0821, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, /* Logitech, Webcam B910 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x0823, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, /* Logitech Quickcam Fusion */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c1, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam Orbit MP */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam Pro for Notebook */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c3, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam Pro 5000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c5, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam OEM Dell Notebook */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c6, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam OEM Cisco VT Camera II */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c7, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech HD Pro Webcam C920 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x082d, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT | UVC_QUIRK_INVALID_DEVICE_SOF) }, /* Logitech HD Pro Webcam C922 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x085c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_INVALID_DEVICE_SOF) }, /* Logitech Rally Bar Huddle */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x087c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, /* Logitech Rally Bar */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x089b, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, /* Logitech Rally Bar Mini */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08d3, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, /* Chicony CNF7129 (Asus EEE 100HE) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x04f2, .idProduct = 0xb071, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTRICT_FRAME_RATE) }, /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x058f, .idProduct = 0x3820, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Dell XPS m1530 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x2640, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell SP2008WFP Monitor */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x2641, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell Alienware X51 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x2643, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell Studio Hybrid 140g (OmniVision webcam) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x264a, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell XPS M1330 (OmniVision OV7670 webcam) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x7670, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Apple Built-In iSight */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05ac, .idProduct = 0x8501, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_BUILTIN_ISIGHT) }, /* Apple FaceTime HD Camera (Built-In) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05ac, .idProduct = 0x8514, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Apple Built-In iSight via iBridge */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05ac, .idProduct = 0x8600, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Foxlink ("HP Webcam" on HP Mini 5103) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05c8, .idProduct = 0x0403, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* Genesys Logic USB 2.0 PC Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05e3, .idProduct = 0x0505, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Hercules Classic Silver */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x06f8, .idProduct = 0x300c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* ViMicro Vega */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0ac8, .idProduct = 0x332d, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* ViMicro - Minoru3D */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0ac8, .idProduct = 0x3410, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* ViMicro Venus - Minoru3D */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0ac8, .idProduct = 0x3420, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* Ophir Optronics - SPCAM 620U */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0bd3, .idProduct = 0x0555, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Sonix Technology Co. Ltd. - 292A IPC AR0330 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0c45, .idProduct = 0x6366, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) }, /* MT6227 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0e8d, .idProduct = 0x0004, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_PROBE_DEF) }, /* IMC Networks (Medion Akoya) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x13d3, .idProduct = 0x5103, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* JMicron USB2.0 XGA WebCam */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x152d, .idProduct = 0x0310, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Kurokesu C1 PRO */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x16d0, .idProduct = 0x0ed1, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) }, /* Syntek (HP Spartan) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x5212, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Samsung Q310) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x5931, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Packard Bell EasyNote MX52 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a12, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Asus F9SG) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a31, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Asus U3S) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a33, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (JAOtech Smart Terminal) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a34, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Miricle 307K */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x17dc, .idProduct = 0x0202, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Lenovo Thinkpad SL400/SL500 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x17ef, .idProduct = 0x480b, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Aveo Technology USB 2.0 Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1871, .idProduct = 0x0306, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_PROBE_EXTRAFIELDS) }, /* Aveo Technology USB 2.0 Camera (Tasco USB Microscope) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1871, .idProduct = 0x0516, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Ecamm Pico iMage */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18cd, .idProduct = 0xcafe, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_EXTRAFIELDS) }, /* Manta MM-353 Plako */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18ec, .idProduct = 0x3188, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* FSC WebCam V30S */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18ec, .idProduct = 0x3288, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Arkmicro unbranded */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18ec, .idProduct = 0x3290, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* The Imaging Source USB CCD cameras */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x199e, .idProduct = 0x8102, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Bodelin ProScopeHR */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_HI | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x19ab, .idProduct = 0x1000, .bcdDevice_hi = 0x0126, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_STATUS_INTERVAL) }, /* MSI StarCam 370i */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1b3b, .idProduct = 0x2951, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Generalplus Technology Inc. 808 Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1b3f, .idProduct = 0x2002, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Shenzhen Aoni Electronic Co.,Ltd 2K FHD camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1bcf, .idProduct = 0x0b40, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){ .uvc_version = 0x010a, } }, /* SiGma Micro USB Web Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1c4f, .idProduct = 0x3000, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_IGNORE_SELECTOR_UNIT) }, /* Actions Microelectronics Co. Display capture-UVC05 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1de1, .idProduct = 0xf105, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) }, /* NXP Semiconductors IR VIDEO */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1fc9, .idProduct = 0x009b, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Oculus VR Positional Tracker DK2 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x2833, .idProduct = 0x0201, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_force_y8 }, /* Oculus VR Rift Sensor */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x2833, .idProduct = 0x0211, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_force_y8 }, /* GEO Semiconductor GC6500 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x29fe, .idProduct = 0x4d53, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) }, /* Insta360 Link */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x2e1a, .idProduct = 0x4c01, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) }, /* Intel D410/ASR depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0ad2, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D415/ASRC depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0ad3, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D430/AWG depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0ad4, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel RealSense D4M */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b03, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D435/AWGC depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b07, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D435i depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b3a, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D405 Depth Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b5b, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D455 Depth Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b5c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D421 Depth Module */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x1155, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Generic USB Video Class */ { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) }, { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) }, {} }; MODULE_DEVICE_TABLE(usb, uvc_ids); static struct usb_driver uvc_driver = { .name = "uvcvideo", .probe = uvc_probe, .disconnect = uvc_disconnect, .suspend = uvc_suspend, .resume = uvc_resume, .reset_resume = uvc_reset_resume, .id_table = uvc_ids, .supports_autosuspend = 1, }; static int __init uvc_init(void) { int ret; uvc_debugfs_init(); ret = usb_register(&uvc_driver); if (ret < 0) { uvc_debugfs_cleanup(); return ret; } return 0; } static void __exit uvc_cleanup(void) { usb_deregister(&uvc_driver); uvc_debugfs_cleanup(); } module_init(uvc_init); module_exit(uvc_cleanup); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); |
8 3 13 13 9 4 3 10 4 6 4 531 12 45 341 332 16 8 59 16 5 6 40 275 3 3 9 5 21 541 536 1 28 3 8 2 2 2 1 25 13 1 1 12 10 38 1 5 26 4 29 16 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 | // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/syscalls.h> #include <linux/time_namespace.h> #include "futex.h" /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. * * Implementation: user-space maintains a per-thread list of locks it * is holding. Upon do_exit(), the kernel carefully walks this list, * and marks all locks that are owned by this thread with the * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is * always manipulated with the lock held, so the list is private and * per-thread. Userspace also maintains a per-thread 'list_op_pending' * field, to allow the kernel to clean up if the thread dies after * acquiring the lock, but just before it could have added itself to * the list. There can only be one such pending lock. */ /** * sys_set_robust_list() - Set the robust-futex list head of a task * @head: pointer to the list-head * @len: length of the list-head, as userspace expects */ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, size_t, len) { /* * The kernel knows only one size for now: */ if (unlikely(len != sizeof(*head))) return -EINVAL; current->robust_list = head; return 0; } /** * sys_get_robust_list() - Get the robust-futex list head of a task * @pid: pid of the process [zero for current task] * @head_ptr: pointer to a list-head pointer, the kernel fills it in * @len_ptr: pointer to a length field, the kernel fills in the header size */ SYSCALL_DEFINE3(get_robust_list, int, pid, struct robust_list_head __user * __user *, head_ptr, size_t __user *, len_ptr) { struct robust_list_head __user *head; unsigned long ret; struct task_struct *p; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) goto err_unlock; head = p->robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(head, head_ptr); err_unlock: rcu_read_unlock(); return ret; } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { unsigned int flags = futex_to_flags(op); int cmd = op & FUTEX_CMD_MASK; if (flags & FLAGS_CLOCKRT) { if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI && cmd != FUTEX_LOCK_PI2) return -ENOSYS; } switch (cmd) { case FUTEX_WAIT: val3 = FUTEX_BITSET_MATCH_ANY; fallthrough; case FUTEX_WAIT_BITSET: return futex_wait(uaddr, flags, val, timeout, val3); case FUTEX_WAKE: val3 = FUTEX_BITSET_MATCH_ANY; fallthrough; case FUTEX_WAKE_BITSET: return futex_wake(uaddr, flags, val, val3); case FUTEX_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, NULL, 0); case FUTEX_CMP_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, &val3, 0); case FUTEX_WAKE_OP: return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); case FUTEX_LOCK_PI: flags |= FLAGS_CLOCKRT; fallthrough; case FUTEX_LOCK_PI2: return futex_lock_pi(uaddr, flags, timeout, 0); case FUTEX_UNLOCK_PI: return futex_unlock_pi(uaddr, flags); case FUTEX_TRYLOCK_PI: return futex_lock_pi(uaddr, flags, NULL, 1); case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, uaddr2); case FUTEX_CMP_REQUEUE_PI: return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, &val3, 1); } return -ENOSYS; } static __always_inline bool futex_cmd_has_timeout(u32 cmd) { switch (cmd) { case FUTEX_WAIT: case FUTEX_LOCK_PI: case FUTEX_LOCK_PI2: case FUTEX_WAIT_BITSET: case FUTEX_WAIT_REQUEUE_PI: return true; } return false; } static __always_inline int futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) { if (!timespec64_valid(ts)) return -EINVAL; *t = timespec64_to_ktime(*ts); if (cmd == FUTEX_WAIT) *t = ktime_add_safe(ktime_get(), *t); else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t); return 0; } SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, const struct __kernel_timespec __user *, utime, u32 __user *, uaddr2, u32, val3) { int ret, cmd = op & FUTEX_CMD_MASK; ktime_t t, *tp = NULL; struct timespec64 ts; if (utime && futex_cmd_has_timeout(cmd)) { if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) return -EFAULT; if (get_timespec64(&ts, utime)) return -EFAULT; ret = futex_init_timeout(cmd, op, &ts, &t); if (ret) return ret; tp = &t; } return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); } /** * futex_parse_waitv - Parse a waitv array from userspace * @futexv: Kernel side list of waiters to be filled * @uwaitv: Userspace list to be parsed * @nr_futexes: Length of futexv * @wake: Wake to call when futex is woken * @wake_data: Data for the wake handler * * Return: Error code on failure, 0 on success */ int futex_parse_waitv(struct futex_vector *futexv, struct futex_waitv __user *uwaitv, unsigned int nr_futexes, futex_wake_fn *wake, void *wake_data) { struct futex_waitv aux; unsigned int i; for (i = 0; i < nr_futexes; i++) { unsigned int flags; if (copy_from_user(&aux, &uwaitv[i], sizeof(aux))) return -EFAULT; if ((aux.flags & ~FUTEX2_VALID_MASK) || aux.__reserved) return -EINVAL; flags = futex2_to_flags(aux.flags); if (!futex_flags_valid(flags)) return -EINVAL; if (!futex_validate_input(flags, aux.val)) return -EINVAL; futexv[i].w.flags = flags; futexv[i].w.val = aux.val; futexv[i].w.uaddr = aux.uaddr; futexv[i].q = futex_q_init; futexv[i].q.wake = wake; futexv[i].q.wake_data = wake_data; } return 0; } static int futex2_setup_timeout(struct __kernel_timespec __user *timeout, clockid_t clockid, struct hrtimer_sleeper *to) { int flag_clkid = 0, flag_init = 0; struct timespec64 ts; ktime_t time; int ret; if (!timeout) return 0; if (clockid == CLOCK_REALTIME) { flag_clkid = FLAGS_CLOCKRT; flag_init = FUTEX_CLOCK_REALTIME; } if (clockid != CLOCK_REALTIME && clockid != CLOCK_MONOTONIC) return -EINVAL; if (get_timespec64(&ts, timeout)) return -EFAULT; /* * Since there's no opcode for futex_waitv, use * FUTEX_WAIT_BITSET that uses absolute timeout as well */ ret = futex_init_timeout(FUTEX_WAIT_BITSET, flag_init, &ts, &time); if (ret) return ret; futex_setup_timer(&time, to, flag_clkid, 0); return 0; } static inline void futex2_destroy_timeout(struct hrtimer_sleeper *to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } /** * sys_futex_waitv - Wait on a list of futexes * @waiters: List of futexes to wait on * @nr_futexes: Length of futexv * @flags: Flag for timeout (monotonic/realtime) * @timeout: Optional absolute timeout. * @clockid: Clock to be used for the timeout, realtime or monotonic. * * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes * if a futex_wake() is performed at any uaddr. The syscall returns immediately * if any waiter has *uaddr != val. *timeout is an optional timeout value for * the operation. Each waiter has individual flags. The `flags` argument for * the syscall should be used solely for specifying the timeout as realtime, if * needed. Flags for private futexes, sizes, etc. should be used on the * individual flags of each waiter. * * Returns the array index of one of the woken futexes. No further information * is provided: any number of other futexes may also have been woken by the * same event, and if more than one futex was woken, the retrned index may * refer to any one of them. (It is not necessaryily the futex with the * smallest index, nor the one most recently woken, nor...) */ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters, unsigned int, nr_futexes, unsigned int, flags, struct __kernel_timespec __user *, timeout, clockid_t, clockid) { struct hrtimer_sleeper to; struct futex_vector *futexv; int ret; /* This syscall supports no flags for now */ if (flags) return -EINVAL; if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters) return -EINVAL; if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to))) return ret; futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL); if (!futexv) { ret = -ENOMEM; goto destroy_timer; } ret = futex_parse_waitv(futexv, waiters, nr_futexes, futex_wake_mark, NULL); if (!ret) ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL); kfree(futexv); destroy_timer: if (timeout) futex2_destroy_timeout(&to); return ret; } /* * sys_futex_wake - Wake a number of futexes * @uaddr: Address of the futex(es) to wake * @mask: bitmask * @nr: Number of the futexes to wake * @flags: FUTEX2 flags * * Identical to the traditional FUTEX_WAKE_BITSET op, except it is part of the * futex2 family of calls. */ SYSCALL_DEFINE4(futex_wake, void __user *, uaddr, unsigned long, mask, int, nr, unsigned int, flags) { if (flags & ~FUTEX2_VALID_MASK) return -EINVAL; flags = futex2_to_flags(flags); if (!futex_flags_valid(flags)) return -EINVAL; if (!futex_validate_input(flags, mask)) return -EINVAL; return futex_wake(uaddr, FLAGS_STRICT | flags, nr, mask); } /* * sys_futex_wait - Wait on a futex * @uaddr: Address of the futex to wait on * @val: Value of @uaddr * @mask: bitmask * @flags: FUTEX2 flags * @timeout: Optional absolute timeout * @clockid: Clock to be used for the timeout, realtime or monotonic * * Identical to the traditional FUTEX_WAIT_BITSET op, except it is part of the * futex2 familiy of calls. */ SYSCALL_DEFINE6(futex_wait, void __user *, uaddr, unsigned long, val, unsigned long, mask, unsigned int, flags, struct __kernel_timespec __user *, timeout, clockid_t, clockid) { struct hrtimer_sleeper to; int ret; if (flags & ~FUTEX2_VALID_MASK) return -EINVAL; flags = futex2_to_flags(flags); if (!futex_flags_valid(flags)) return -EINVAL; if (!futex_validate_input(flags, val) || !futex_validate_input(flags, mask)) return -EINVAL; if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to))) return ret; ret = __futex_wait(uaddr, flags, val, timeout ? &to : NULL, mask); if (timeout) futex2_destroy_timeout(&to); return ret; } /* * sys_futex_requeue - Requeue a waiter from one futex to another * @waiters: array describing the source and destination futex * @flags: unused * @nr_wake: number of futexes to wake * @nr_requeue: number of futexes to requeue * * Identical to the traditional FUTEX_CMP_REQUEUE op, except it is part of the * futex2 family of calls. */ SYSCALL_DEFINE4(futex_requeue, struct futex_waitv __user *, waiters, unsigned int, flags, int, nr_wake, int, nr_requeue) { struct futex_vector futexes[2]; u32 cmpval; int ret; if (flags) return -EINVAL; if (!waiters) return -EINVAL; ret = futex_parse_waitv(futexes, waiters, 2, futex_wake_mark, NULL); if (ret) return ret; cmpval = futexes[0].w.val; return futex_requeue(u64_to_user_ptr(futexes[0].w.uaddr), futexes[0].w.flags, u64_to_user_ptr(futexes[1].w.uaddr), futexes[1].w.flags, nr_wake, nr_requeue, &cmpval, 0); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(set_robust_list, struct compat_robust_list_head __user *, head, compat_size_t, len) { if (unlikely(len != sizeof(*head))) return -EINVAL; current->compat_robust_list = head; return 0; } COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, compat_uptr_t __user *, head_ptr, compat_size_t __user *, len_ptr) { struct compat_robust_list_head __user *head; unsigned long ret; struct task_struct *p; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) goto err_unlock; head = p->compat_robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(ptr_to_compat(head), head_ptr); err_unlock: rcu_read_unlock(); return ret; } #endif /* CONFIG_COMPAT */ #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, const struct old_timespec32 __user *, utime, u32 __user *, uaddr2, u32, val3) { int ret, cmd = op & FUTEX_CMD_MASK; ktime_t t, *tp = NULL; struct timespec64 ts; if (utime && futex_cmd_has_timeout(cmd)) { if (get_old_timespec32(&ts, utime)) return -EFAULT; ret = futex_init_timeout(cmd, op, &ts, &t); if (ret) return ret; tp = &t; } return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); } #endif /* CONFIG_COMPAT_32BIT_TIME */ |
7 7 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2017 Mellanox Technologies Ltd. All rights reserved. */ #include "rxe.h" #include "rxe_hw_counters.h" static const struct rdma_stat_desc rxe_counter_descs[] = { [RXE_CNT_SENT_PKTS].name = "sent_pkts", [RXE_CNT_RCVD_PKTS].name = "rcvd_pkts", [RXE_CNT_DUP_REQ].name = "duplicate_request", [RXE_CNT_OUT_OF_SEQ_REQ].name = "out_of_seq_request", [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err", [RXE_CNT_SND_RNR].name = "send_rnr_err", [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err", [RXE_CNT_SENDER_SCHED].name = "ack_deferred", [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err", [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err", [RXE_CNT_COMP_RETRY].name = "completer_retry_err", [RXE_CNT_SEND_ERR].name = "send_err", [RXE_CNT_LINK_DOWNED].name = "link_downed", [RXE_CNT_RDMA_SEND].name = "rdma_sends", [RXE_CNT_RDMA_RECV].name = "rdma_recvs", }; int rxe_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) { struct rxe_dev *dev = to_rdev(ibdev); unsigned int cnt; if (!port || !stats) return -EINVAL; for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_descs); cnt++) stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]); return ARRAY_SIZE(rxe_counter_descs); } struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_descs) != RXE_NUM_OF_COUNTERS); return rdma_alloc_hw_stats_struct(rxe_counter_descs, ARRAY_SIZE(rxe_counter_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } |
10 10 10 4 10 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | // SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0) /* * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b * reference implementation, but it has been heavily modified for use in the * kernel. The reference implementation was: * * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under * the terms of the CC0, the OpenSSL Licence, or the Apache Public License * 2.0, at your option. The terms of these licenses can be found at: * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - OpenSSL license : https://www.openssl.org/source/license.html * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * * More information about BLAKE2 can be found at https://blake2.net. */ #include <linux/unaligned.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> static const u8 blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } }; static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc) { S->t[0] += inc; S->t[1] += (S->t[0] < inc); } #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2b_sigma[r][2*i+0]]; \ d = ror64(d ^ a, 32); \ c = c + d; \ b = ror64(b ^ c, 24); \ a = a + b + m[blake2b_sigma[r][2*i+1]]; \ d = ror64(d ^ a, 16); \ c = c + d; \ b = ror64(b ^ c, 63); \ } while (0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while (0) static void blake2b_compress_one_generic(struct blake2b_state *S, const u8 block[BLAKE2B_BLOCK_SIZE]) { u64 m[16]; u64 v[16]; size_t i; for (i = 0; i < 16; ++i) m[i] = get_unaligned_le64(block + i * sizeof(m[i])); for (i = 0; i < 8; ++i) v[i] = S->h[i]; v[ 8] = BLAKE2B_IV0; v[ 9] = BLAKE2B_IV1; v[10] = BLAKE2B_IV2; v[11] = BLAKE2B_IV3; v[12] = BLAKE2B_IV4 ^ S->t[0]; v[13] = BLAKE2B_IV5 ^ S->t[1]; v[14] = BLAKE2B_IV6 ^ S->f[0]; v[15] = BLAKE2B_IV7 ^ S->f[1]; ROUND(0); ROUND(1); ROUND(2); ROUND(3); ROUND(4); ROUND(5); ROUND(6); ROUND(7); ROUND(8); ROUND(9); ROUND(10); ROUND(11); #ifdef CONFIG_CC_IS_CLANG #pragma nounroll /* https://llvm.org/pr45803 */ #endif for (i = 0; i < 8; ++i) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } #undef G #undef ROUND void blake2b_compress_generic(struct blake2b_state *state, const u8 *block, size_t nblocks, u32 inc) { do { blake2b_increment_counter(state, inc); blake2b_compress_one_generic(state, block); block += BLAKE2B_BLOCK_SIZE; } while (--nblocks); } EXPORT_SYMBOL(blake2b_compress_generic); static int crypto_blake2b_update_generic(struct shash_desc *desc, const u8 *in, unsigned int inlen) { return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic); } static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) { return crypto_blake2b_final(desc, out, blake2b_compress_generic); } #define BLAKE2B_ALG(name, driver_name, digest_size) \ { \ .base.cra_name = name, \ .base.cra_driver_name = driver_name, \ .base.cra_priority = 100, \ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ .base.cra_module = THIS_MODULE, \ .digestsize = digest_size, \ .setkey = crypto_blake2b_setkey, \ .init = crypto_blake2b_init, \ .update = crypto_blake2b_update_generic, \ .final = crypto_blake2b_final_generic, \ .descsize = sizeof(struct blake2b_state), \ } static struct shash_alg blake2b_algs[] = { BLAKE2B_ALG("blake2b-160", "blake2b-160-generic", BLAKE2B_160_HASH_SIZE), BLAKE2B_ALG("blake2b-256", "blake2b-256-generic", BLAKE2B_256_HASH_SIZE), BLAKE2B_ALG("blake2b-384", "blake2b-384-generic", BLAKE2B_384_HASH_SIZE), BLAKE2B_ALG("blake2b-512", "blake2b-512-generic", BLAKE2B_512_HASH_SIZE), }; static int __init blake2b_mod_init(void) { return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } static void __exit blake2b_mod_fini(void) { crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } subsys_initcall(blake2b_mod_init); module_exit(blake2b_mod_fini); MODULE_AUTHOR("David Sterba <kdave@kernel.org>"); MODULE_DESCRIPTION("BLAKE2b generic implementation"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("blake2b-160"); MODULE_ALIAS_CRYPTO("blake2b-160-generic"); MODULE_ALIAS_CRYPTO("blake2b-256"); MODULE_ALIAS_CRYPTO("blake2b-256-generic"); MODULE_ALIAS_CRYPTO("blake2b-384"); MODULE_ALIAS_CRYPTO("blake2b-384-generic"); MODULE_ALIAS_CRYPTO("blake2b-512"); MODULE_ALIAS_CRYPTO("blake2b-512-generic"); |
11 65 65 7 143 143 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * 9P Client Definitions * * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> */ #ifndef NET_9P_CLIENT_H #define NET_9P_CLIENT_H #include <linux/utsname.h> #include <linux/idr.h> #include <linux/tracepoint-defs.h> /* Number of requests per row */ #define P9_ROW_MAXTAG 255 /** enum p9_proto_versions - 9P protocol versions * @p9_proto_legacy: 9P Legacy mode, pre-9P2000.u * @p9_proto_2000u: 9P2000.u extension * @p9_proto_2000L: 9P2000.L extension */ enum p9_proto_versions { p9_proto_legacy, p9_proto_2000u, p9_proto_2000L, }; /** * enum p9_trans_status - different states of underlying transports * @Connected: transport is connected and healthy * @Disconnected: transport has been disconnected * @Hung: transport is connected by wedged * * This enumeration details the various states a transport * instatiation can be in. */ enum p9_trans_status { Connected, BeginDisconnect, Disconnected, Hung, }; /** * enum p9_req_status_t - status of a request * @REQ_STATUS_ALLOC: request has been allocated but not sent * @REQ_STATUS_UNSENT: request waiting to be sent * @REQ_STATUS_SENT: request sent to server * @REQ_STATUS_RCVD: response received from server * @REQ_STATUS_FLSHD: request has been flushed * @REQ_STATUS_ERROR: request encountered an error on the client side */ enum p9_req_status_t { REQ_STATUS_ALLOC, REQ_STATUS_UNSENT, REQ_STATUS_SENT, REQ_STATUS_RCVD, REQ_STATUS_FLSHD, REQ_STATUS_ERROR, }; /** * struct p9_req_t - request slots * @status: status of this request slot * @t_err: transport error * @wq: wait_queue for the client to block on for this request * @tc: the request fcall structure * @rc: the response fcall structure * @req_list: link for higher level objects to chain requests */ struct p9_req_t { int status; int t_err; refcount_t refcount; wait_queue_head_t wq; struct p9_fcall tc; struct p9_fcall rc; struct list_head req_list; }; /** * struct p9_client - per client instance state * @lock: protect @fids and @reqs * @msize: maximum data size negotiated by protocol * @proto_version: 9P protocol version to use * @trans_mod: module API instantiated with this client * @status: connection state * @trans: tranport instance state and API * @fids: All active FID handles * @reqs: All active requests. * @name: node name used as client id * * The client structure is used to keep track of various per-client * state that has been instantiated. */ struct p9_client { spinlock_t lock; unsigned int msize; unsigned char proto_version; struct p9_trans_module *trans_mod; enum p9_trans_status status; void *trans; struct kmem_cache *fcall_cache; union { struct { int rfd; int wfd; } fd; struct { u16 port; bool privport; } tcp; } trans_opts; struct idr fids; struct idr reqs; char name[__NEW_UTS_LEN + 1]; }; /** * struct p9_fid - file system entity handle * @clnt: back pointer to instantiating &p9_client * @fid: numeric identifier for this handle * @mode: current mode of this fid (enum?) * @qid: the &p9_qid server identifier this handle points to * @iounit: the server reported maximum transaction size for this file * @uid: the numeric uid of the local user who owns this handle * @rdir: readdir accounting structure (allocated on demand) * @dlist: per-dentry fid tracking * * TODO: This needs lots of explanation. */ enum fid_source { FID_FROM_OTHER, FID_FROM_INODE, FID_FROM_DENTRY, }; struct p9_fid { struct p9_client *clnt; u32 fid; refcount_t count; int mode; struct p9_qid qid; u32 iounit; kuid_t uid; void *rdir; struct hlist_node dlist; /* list of all fids attached to a dentry */ struct hlist_node ilist; }; /** * struct p9_dirent - directory entry structure * @qid: The p9 server qid for this dirent * @d_off: offset to the next dirent * @d_type: type of file * @d_name: file name */ struct p9_dirent { struct p9_qid qid; u64 d_off; unsigned char d_type; char d_name[256]; }; struct iov_iter; int p9_show_client_options(struct seq_file *m, struct p9_client *clnt); int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, const char *name); int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, struct p9_fid *newdirfid, const char *new_name); struct p9_client *p9_client_create(const char *dev_name, char *options); void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); void p9_client_begin_disconnect(struct p9_client *clnt); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, const char *uname, kuid_t n_uname, const char *aname); struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, const unsigned char * const *wnames, int clone); int p9_client_open(struct p9_fid *fid, int mode); int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode, char *extension); int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, const char *newname); int p9_client_symlink(struct p9_fid *fid, const char *name, const char *symname, kgid_t gid, struct p9_qid *qid); int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, u32 mode, kgid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); int p9_client_remove(struct p9_fid *fid); int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags); int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err); int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err); int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err); struct netfs_io_subrequest; void p9_client_write_subreq(struct netfs_io_subrequest *subreq); int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset); int p9dirent_read(struct p9_client *clnt, char *buf, int len, struct p9_dirent *dirent); struct p9_wstat *p9_client_stat(struct p9_fid *fid); int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst); int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, u64 request_mask); int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode, dev_t rdev, kgid_t gid, struct p9_qid *qid); int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode, kgid_t gid, struct p9_qid *qid); int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status); int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); void p9_fcall_fini(struct p9_fcall *fc); struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag); static inline void p9_req_get(struct p9_req_t *r) { refcount_inc(&r->refcount); } static inline int p9_req_try_get(struct p9_req_t *r) { return refcount_inc_not_zero(&r->refcount); } int p9_req_put(struct p9_client *c, struct p9_req_t *r); /* We cannot have the real tracepoints in header files, * use a wrapper function */ DECLARE_TRACEPOINT(9p_fid_ref); void do_trace_9p_fid_get(struct p9_fid *fid); void do_trace_9p_fid_put(struct p9_fid *fid); /* fid reference counting helpers: * - fids used for any length of time should always be referenced through * p9_fid_get(), and released with p9_fid_put() * - v9fs_fid_lookup() or similar will automatically call get for you * and also require a put * - the *_fid_add() helpers will stash the fid in the inode, * at which point it is the responsibility of evict_inode() * to call the put * - the last put will automatically send a clunk to the server */ static inline struct p9_fid *p9_fid_get(struct p9_fid *fid) { if (tracepoint_enabled(9p_fid_ref)) do_trace_9p_fid_get(fid); refcount_inc(&fid->count); return fid; } static inline int p9_fid_put(struct p9_fid *fid) { if (!fid || IS_ERR(fid)) return 0; if (tracepoint_enabled(9p_fid_ref)) do_trace_9p_fid_put(fid); if (!refcount_dec_and_test(&fid->count)) return 0; return p9_client_clunk(fid); } void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status); int p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type, int16_t *tag, int rewind); int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st); void p9stat_free(struct p9_wstat *stbuf); int p9_is_proto_dotu(struct p9_client *clnt); int p9_is_proto_dotl(struct p9_client *clnt); struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid, const char *attr_name, u64 *attr_size); int p9_client_xattrcreate(struct p9_fid *fid, const char *name, u64 attr_size, int flags); int p9_client_readlink(struct p9_fid *fid, char **target); int p9_client_init(void); void p9_client_exit(void); #endif /* NET_9P_CLIENT_H */ |
45 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | /* SPDX-License-Identifier: GPL-2.0 */ /* taskstats_kern.h - kernel header for per-task statistics interface * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 * (C) Balbir Singh, IBM Corp. 2006 */ #ifndef _LINUX_TASKSTATS_KERN_H #define _LINUX_TASKSTATS_KERN_H #include <linux/taskstats.h> #include <linux/sched/signal.h> #include <linux/slab.h> #ifdef CONFIG_TASKSTATS extern struct kmem_cache *taskstats_cache; extern struct mutex taskstats_exit_mutex; static inline void taskstats_tgid_free(struct signal_struct *sig) { if (sig->stats) kmem_cache_free(taskstats_cache, sig->stats); } extern void taskstats_exit(struct task_struct *, int group_dead); extern void taskstats_init_early(void); #else static inline void taskstats_exit(struct task_struct *tsk, int group_dead) {} static inline void taskstats_tgid_free(struct signal_struct *sig) {} static inline void taskstats_init_early(void) {} #endif /* CONFIG_TASKSTATS */ #endif |
7 2 5 25 20 5 3 7 38 2 3 8 27 30 2 29 4 32 3 30 34 23 3 1 1 18 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 | // SPDX-License-Identifier: GPL-2.0 /* * This contains the io-permission bitmap code - written by obz, with changes * by Linus. 32/64 bits code unification by Miguel Botón. */ #include <linux/capability.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/bitmap.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/io_bitmap.h> #include <asm/desc.h> #include <asm/syscalls.h> #ifdef CONFIG_X86_IOPL_IOPERM static atomic64_t io_bitmap_sequence; void io_bitmap_share(struct task_struct *tsk) { /* Can be NULL when current->thread.iopl_emul == 3 */ if (current->thread.io_bitmap) { /* * Take a refcount on current's bitmap. It can be used by * both tasks as long as none of them changes the bitmap. */ refcount_inc(¤t->thread.io_bitmap->refcnt); tsk->thread.io_bitmap = current->thread.io_bitmap; } set_tsk_thread_flag(tsk, TIF_IO_BITMAP); } static void task_update_io_bitmap(struct task_struct *tsk) { struct thread_struct *t = &tsk->thread; if (t->iopl_emul == 3 || t->io_bitmap) { /* TSS update is handled on exit to user space */ set_tsk_thread_flag(tsk, TIF_IO_BITMAP); } else { clear_tsk_thread_flag(tsk, TIF_IO_BITMAP); /* Invalidate TSS */ preempt_disable(); tss_update_io_bitmap(); preempt_enable(); } } void io_bitmap_exit(struct task_struct *tsk) { struct io_bitmap *iobm = tsk->thread.io_bitmap; tsk->thread.io_bitmap = NULL; task_update_io_bitmap(tsk); if (iobm && refcount_dec_and_test(&iobm->refcnt)) kfree(iobm); } /* * This changes the io permissions bitmap in the current task. */ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on) { struct thread_struct *t = ¤t->thread; unsigned int i, max_long; struct io_bitmap *iobm; if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; if (turn_on && (!capable(CAP_SYS_RAWIO) || security_locked_down(LOCKDOWN_IOPORT))) return -EPERM; /* * If it's the first ioperm() call in this thread's lifetime, set the * IO bitmap up. ioperm() is much less timing critical than clone(), * this is why we delay this operation until now: */ iobm = t->io_bitmap; if (!iobm) { /* No point to allocate a bitmap just to clear permissions */ if (!turn_on) return 0; iobm = kmalloc(sizeof(*iobm), GFP_KERNEL); if (!iobm) return -ENOMEM; memset(iobm->bitmap, 0xff, sizeof(iobm->bitmap)); refcount_set(&iobm->refcnt, 1); } /* * If the bitmap is not shared, then nothing can take a refcount as * current can obviously not fork at the same time. If it's shared * duplicate it and drop the refcount on the original one. */ if (refcount_read(&iobm->refcnt) > 1) { iobm = kmemdup(iobm, sizeof(*iobm), GFP_KERNEL); if (!iobm) return -ENOMEM; refcount_set(&iobm->refcnt, 1); io_bitmap_exit(current); } /* * Store the bitmap pointer (might be the same if the task already * head one). Must be done here so freeing the bitmap when all * permissions are dropped has the pointer set up. */ t->io_bitmap = iobm; /* Mark it active for context switching and exit to user mode */ set_thread_flag(TIF_IO_BITMAP); /* * Update the tasks bitmap. The update of the TSS bitmap happens on * exit to user mode. So this needs no protection. */ if (turn_on) bitmap_clear(iobm->bitmap, from, num); else bitmap_set(iobm->bitmap, from, num); /* * Search for a (possibly new) maximum. This is simple and stupid, * to keep it obviously correct: */ max_long = UINT_MAX; for (i = 0; i < IO_BITMAP_LONGS; i++) { if (iobm->bitmap[i] != ~0UL) max_long = i; } /* All permissions dropped? */ if (max_long == UINT_MAX) { io_bitmap_exit(current); return 0; } iobm->max = (max_long + 1) * sizeof(unsigned long); /* * Update the sequence number to force a TSS update on return to * user mode. */ iobm->sequence = atomic64_inc_return(&io_bitmap_sequence); return 0; } SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on) { return ksys_ioperm(from, num, turn_on); } /* * The sys_iopl functionality depends on the level argument, which if * granted for the task is used to enable access to all 65536 I/O ports. * * This does not use the IOPL mechanism provided by the CPU as that would * also allow the user space task to use the CLI/STI instructions. * * Disabling interrupts in a user space task is dangerous as it might lock * up the machine and the semantics vs. syscalls and exceptions is * undefined. * * Setting IOPL to level 0-2 is disabling I/O permissions. Level 3 * 3 enables them. * * IOPL is strictly per thread and inherited on fork. */ SYSCALL_DEFINE1(iopl, unsigned int, level) { struct thread_struct *t = ¤t->thread; unsigned int old; if (level > 3) return -EINVAL; old = t->iopl_emul; /* No point in going further if nothing changes */ if (level == old) return 0; /* Trying to gain more privileges? */ if (level > old) { if (!capable(CAP_SYS_RAWIO) || security_locked_down(LOCKDOWN_IOPORT)) return -EPERM; } t->iopl_emul = level; task_update_io_bitmap(current); return 0; } #else /* CONFIG_X86_IOPL_IOPERM */ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on) { return -ENOSYS; } SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on) { return -ENOSYS; } SYSCALL_DEFINE1(iopl, unsigned int, level) { return -ENOSYS; } #endif |
6 6 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 | // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2005 Mike Isely <isely@pobox.com> */ #include "pvrusb2-std.h" #include "pvrusb2-debug.h" #include <asm/string.h> #include <linux/slab.h> struct std_name { const char *name; v4l2_std_id id; }; #define CSTD_PAL \ (V4L2_STD_PAL_B| \ V4L2_STD_PAL_B1| \ V4L2_STD_PAL_G| \ V4L2_STD_PAL_H| \ V4L2_STD_PAL_I| \ V4L2_STD_PAL_D| \ V4L2_STD_PAL_D1| \ V4L2_STD_PAL_K| \ V4L2_STD_PAL_M| \ V4L2_STD_PAL_N| \ V4L2_STD_PAL_Nc| \ V4L2_STD_PAL_60) #define CSTD_NTSC \ (V4L2_STD_NTSC_M| \ V4L2_STD_NTSC_M_JP| \ V4L2_STD_NTSC_M_KR| \ V4L2_STD_NTSC_443) #define CSTD_ATSC \ (V4L2_STD_ATSC_8_VSB| \ V4L2_STD_ATSC_16_VSB) #define CSTD_SECAM \ (V4L2_STD_SECAM_B| \ V4L2_STD_SECAM_D| \ V4L2_STD_SECAM_G| \ V4L2_STD_SECAM_H| \ V4L2_STD_SECAM_K| \ V4L2_STD_SECAM_K1| \ V4L2_STD_SECAM_L| \ V4L2_STD_SECAM_LC) #define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B) #define TSTD_B1 (V4L2_STD_PAL_B1) #define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D) #define TSTD_D1 (V4L2_STD_PAL_D1) #define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G) #define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H) #define TSTD_I (V4L2_STD_PAL_I) #define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K) #define TSTD_K1 (V4L2_STD_SECAM_K1) #define TSTD_L (V4L2_STD_SECAM_L) #define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M) #define TSTD_N (V4L2_STD_PAL_N) #define TSTD_Nc (V4L2_STD_PAL_Nc) #define TSTD_60 (V4L2_STD_PAL_60) #define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_ATSC|CSTD_SECAM) /* Mapping of standard bits to color system */ static const struct std_name std_groups[] = { {"PAL",CSTD_PAL}, {"NTSC",CSTD_NTSC}, {"SECAM",CSTD_SECAM}, {"ATSC",CSTD_ATSC}, }; /* Mapping of standard bits to modulation system */ static const struct std_name std_items[] = { {"B",TSTD_B}, {"B1",TSTD_B1}, {"D",TSTD_D}, {"D1",TSTD_D1}, {"G",TSTD_G}, {"H",TSTD_H}, {"I",TSTD_I}, {"K",TSTD_K}, {"K1",TSTD_K1}, {"L",TSTD_L}, {"LC",V4L2_STD_SECAM_LC}, {"M",TSTD_M}, {"Mj",V4L2_STD_NTSC_M_JP}, {"443",V4L2_STD_NTSC_443}, {"Mk",V4L2_STD_NTSC_M_KR}, {"N",TSTD_N}, {"Nc",TSTD_Nc}, {"60",TSTD_60}, {"8VSB",V4L2_STD_ATSC_8_VSB}, {"16VSB",V4L2_STD_ATSC_16_VSB}, }; // Search an array of std_name structures and return a pointer to the // element with the matching name. static const struct std_name *find_std_name(const struct std_name *arrPtr, unsigned int arrSize, const char *bufPtr, unsigned int bufSize) { unsigned int idx; const struct std_name *p; for (idx = 0; idx < arrSize; idx++) { p = arrPtr + idx; if (strlen(p->name) != bufSize) continue; if (!memcmp(bufPtr,p->name,bufSize)) return p; } return NULL; } int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr, unsigned int bufSize) { v4l2_std_id id = 0; v4l2_std_id cmsk = 0; v4l2_std_id t; int mMode = 0; unsigned int cnt; char ch; const struct std_name *sp; while (bufSize) { if (!mMode) { cnt = 0; while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++; if (cnt >= bufSize) return 0; // No more characters sp = find_std_name(std_groups, ARRAY_SIZE(std_groups), bufPtr,cnt); if (!sp) return 0; // Illegal color system name cnt++; bufPtr += cnt; bufSize -= cnt; mMode = !0; cmsk = sp->id; continue; } cnt = 0; while (cnt < bufSize) { ch = bufPtr[cnt]; if (ch == ';') { mMode = 0; break; } if (ch == '/') break; cnt++; } sp = find_std_name(std_items, ARRAY_SIZE(std_items), bufPtr,cnt); if (!sp) return 0; // Illegal modulation system ID t = sp->id & cmsk; if (!t) return 0; // Specific color + modulation system illegal id |= t; if (cnt < bufSize) cnt++; bufPtr += cnt; bufSize -= cnt; } if (idPtr) *idPtr = id; return !0; } unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize, v4l2_std_id id) { unsigned int idx1,idx2; const struct std_name *ip,*gp; int gfl,cfl; unsigned int c1,c2; cfl = 0; c1 = 0; for (idx1 = 0; idx1 < ARRAY_SIZE(std_groups); idx1++) { gp = std_groups + idx1; gfl = 0; for (idx2 = 0; idx2 < ARRAY_SIZE(std_items); idx2++) { ip = std_items + idx2; if (!(gp->id & ip->id & id)) continue; if (!gfl) { if (cfl) { c2 = scnprintf(bufPtr,bufSize,";"); c1 += c2; bufSize -= c2; bufPtr += c2; } cfl = !0; c2 = scnprintf(bufPtr,bufSize, "%s-",gp->name); gfl = !0; } else { c2 = scnprintf(bufPtr,bufSize,"/"); } c1 += c2; bufSize -= c2; bufPtr += c2; c2 = scnprintf(bufPtr,bufSize, ip->name); c1 += c2; bufSize -= c2; bufPtr += c2; } } return c1; } // Template data for possible enumerated video standards. Here we group // standards which share common frame rates and resolution. static struct v4l2_standard generic_standards[] = { { .id = (TSTD_B|TSTD_B1| TSTD_D|TSTD_D1| TSTD_G| TSTD_H| TSTD_I| TSTD_K|TSTD_K1| TSTD_L| V4L2_STD_SECAM_LC | TSTD_N|TSTD_Nc), .frameperiod = { .numerator = 1, .denominator= 25 }, .framelines = 625, .reserved = {0,0,0,0} }, { .id = (TSTD_M| V4L2_STD_NTSC_M_JP| V4L2_STD_NTSC_M_KR), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is a total wild guess .id = (TSTD_60), .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} }, { // This is total wild guess .id = V4L2_STD_NTSC_443, .frameperiod = { .numerator = 1001, .denominator= 30000 }, .framelines = 525, .reserved = {0,0,0,0} } }; static struct v4l2_standard *match_std(v4l2_std_id id) { unsigned int idx; for (idx = 0; idx < ARRAY_SIZE(generic_standards); idx++) { if (generic_standards[idx].id & id) { return generic_standards + idx; } } return NULL; } static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id) { struct v4l2_standard *template; int idx; unsigned int bcnt; template = match_std(id); if (!template) return 0; idx = std->index; memcpy(std,template,sizeof(*template)); std->index = idx; std->id = id; bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id); std->name[bcnt] = 0; pvr2_trace(PVR2_TRACE_STD,"Set up standard idx=%u name=%s", std->index,std->name); return !0; } /* These are special cases of combined standards that we should enumerate separately if the component pieces are present. */ static v4l2_std_id std_mixes[] = { V4L2_STD_PAL_B | V4L2_STD_PAL_G, V4L2_STD_PAL_D | V4L2_STD_PAL_K, V4L2_STD_SECAM_B | V4L2_STD_SECAM_G, V4L2_STD_SECAM_D | V4L2_STD_SECAM_K, }; struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr, v4l2_std_id id) { unsigned int std_cnt = 0; unsigned int idx,bcnt,idx2; v4l2_std_id idmsk,cmsk,fmsk; struct v4l2_standard *stddefs; if (pvrusb2_debug & PVR2_TRACE_STD) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id); pvr2_trace( PVR2_TRACE_STD,"Mapping standards mask=0x%x (%.*s)", (int)id,bcnt,buf); } *countptr = 0; std_cnt = 0; fmsk = 0; for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (match_std(idmsk)) { std_cnt++; continue; } fmsk |= idmsk; } for (idx2 = 0; idx2 < ARRAY_SIZE(std_mixes); idx2++) { if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++; } /* Don't complain about ATSC standard values */ fmsk &= ~CSTD_ATSC; if (fmsk) { char buf[100]; bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk); pvr2_trace( PVR2_TRACE_ERROR_LEGS, "***WARNING*** Failed to classify the following standard(s): %.*s", bcnt,buf); } pvr2_trace(PVR2_TRACE_STD,"Setting up %u unique standard(s)", std_cnt); if (!std_cnt) return NULL; // paranoia stddefs = kcalloc(std_cnt, sizeof(struct v4l2_standard), GFP_KERNEL); if (!stddefs) return NULL; for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx; idx = 0; /* Enumerate potential special cases */ for (idx2 = 0; (idx2 < ARRAY_SIZE(std_mixes)) && (idx < std_cnt); idx2++) { if (!(id & std_mixes[idx2])) continue; if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++; } /* Now enumerate individual pieces */ for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) { if (!(idmsk & cmsk)) continue; cmsk &= ~idmsk; if (!pvr2_std_fill(stddefs+idx,idmsk)) continue; idx++; } *countptr = std_cnt; return stddefs; } v4l2_std_id pvr2_std_get_usable(void) { return CSTD_ALL; } |
7 4 2108 2109 11 2 2 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <net/udp_tunnel.h> #include <net/sch_generic.h> #include <linux/netfilter.h> #include <rdma/ib_addr.h> #include "rxe.h" #include "rxe_net.h" #include "rxe_loc.h" static struct rxe_recv_sockets recv_sockets; static struct dst_entry *rxe_find_route4(struct rxe_qp *qp, struct net_device *ndev, struct in_addr *saddr, struct in_addr *daddr) { struct rtable *rt; struct flowi4 fl = { { 0 } }; memset(&fl, 0, sizeof(fl)); fl.flowi4_oif = ndev->ifindex; memcpy(&fl.saddr, saddr, sizeof(*saddr)); memcpy(&fl.daddr, daddr, sizeof(*daddr)); fl.flowi4_proto = IPPROTO_UDP; rt = ip_route_output_key(&init_net, &fl); if (IS_ERR(rt)) { rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr); return NULL; } return &rt->dst; } #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { struct dst_entry *ndst; struct flowi6 fl6 = { { 0 } }; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = ndev->ifindex; memcpy(&fl6.saddr, saddr, sizeof(*saddr)); memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), recv_sockets.sk6->sk, &fl6, NULL); if (IS_ERR(ndst)) { rxe_dbg_qp(qp, "no route to %pI6\n", daddr); return NULL; } if (unlikely(ndst->error)) { rxe_dbg_qp(qp, "no route to %pI6\n", daddr); goto put; } return ndst; put: dst_release(ndst); return NULL; } #else static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { return NULL; } #endif static struct dst_entry *rxe_find_route(struct net_device *ndev, struct rxe_qp *qp, struct rxe_av *av) { struct dst_entry *dst = NULL; if (qp_type(qp) == IB_QPT_RC) dst = sk_dst_get(qp->sk->sk); if (!dst || !dst_check(dst, qp->dst_cookie)) { if (dst) dst_release(dst); if (av->network_type == RXE_NETWORK_TYPE_IPV4) { struct in_addr *saddr; struct in_addr *daddr; saddr = &av->sgid_addr._sockaddr_in.sin_addr; daddr = &av->dgid_addr._sockaddr_in.sin_addr; dst = rxe_find_route4(qp, ndev, saddr, daddr); } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) { struct in6_addr *saddr6; struct in6_addr *daddr6; saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr; daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr; dst = rxe_find_route6(qp, ndev, saddr6, daddr6); #if IS_ENABLED(CONFIG_IPV6) if (dst) qp->dst_cookie = rt6_get_cookie((struct rt6_info *)dst); #endif } if (dst && (qp_type(qp) == IB_QPT_RC)) { dst_hold(dst); sk_dst_set(qp->sk->sk, dst); } } return dst; } static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct udphdr *udph; struct rxe_dev *rxe; struct net_device *ndev = skb->dev; struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); /* takes a reference on rxe->ib_dev * drop when skb is freed */ rxe = rxe_get_dev_from_net(ndev); if (!rxe && is_vlan_dev(ndev)) rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); if (!rxe) goto drop; if (skb_linearize(skb)) { ib_device_put(&rxe->ib_dev); goto drop; } udph = udp_hdr(skb); pkt->rxe = rxe; pkt->port_num = 1; pkt->hdr = (u8 *)(udph + 1); pkt->mask = RXE_GRH_MASK; pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); /* remove udp header */ skb_pull(skb, sizeof(struct udphdr)); rxe_rcv(skb); return 0; drop: kfree_skb(skb); return 0; } static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, bool ipv6) { int err; struct socket *sock; struct udp_port_cfg udp_cfg = { }; struct udp_tunnel_sock_cfg tnl_cfg = { }; if (ipv6) { udp_cfg.family = AF_INET6; udp_cfg.ipv6_v6only = 1; } else { udp_cfg.family = AF_INET; } udp_cfg.local_udp_port = port; /* Create UDP socket */ err = udp_sock_create(net, &udp_cfg, &sock); if (err < 0) return ERR_PTR(err); tnl_cfg.encap_type = 1; tnl_cfg.encap_rcv = rxe_udp_encap_recv; /* Setup UDP tunnel */ setup_udp_tunnel_sock(net, sock, &tnl_cfg); return sock; } static void rxe_release_udp_tunnel(struct socket *sk) { if (sk) udp_tunnel_sock_release(sk); } static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, __be16 dst_port) { struct udphdr *udph; __skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); udph = udp_hdr(skb); udph->dest = dst_port; udph->source = src_port; udph->len = htons(skb->len); udph->check = 0; } static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, __be32 saddr, __be32 daddr, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { struct iphdr *iph; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, dst_clone(dst)); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = IPVERSION; iph->ihl = sizeof(struct iphdr) >> 2; iph->tot_len = htons(skb->len); iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = daddr; iph->saddr = saddr; iph->ttl = ttl; __ip_select_ident(dev_net(dst->dev), iph, skb_shinfo(skb)->gso_segs ?: 1); } static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, struct in6_addr *saddr, struct in6_addr *daddr, __u8 proto, __u8 prio, __u8 ttl) { struct ipv6hdr *ip6h; memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_set(skb, dst_clone(dst)); __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); ip6_flow_hdr(ip6h, prio, htonl(0)); ip6h->payload_len = htons(skb->len); ip6h->nexthdr = proto; ip6h->hop_limit = ttl; ip6h->daddr = *daddr; ip6h->saddr = *saddr; ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); } static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt, struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; struct dst_entry *dst; bool xnet = false; __be16 df = htons(IP_DF); struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; dst = rxe_find_route(skb->dev, qp, av); if (!dst) { rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), cpu_to_be16(ROCE_V2_UDP_DPORT)); prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP, av->grh.traffic_class, av->grh.hop_limit, df, xnet); dst_release(dst); return 0; } static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt, struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; struct dst_entry *dst; struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; dst = rxe_find_route(skb->dev, qp, av); if (!dst) { rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), cpu_to_be16(ROCE_V2_UDP_DPORT)); prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP, av->grh.traffic_class, av->grh.hop_limit); dst_release(dst); return 0; } int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, struct sk_buff *skb) { int err = 0; if (skb->protocol == htons(ETH_P_IP)) err = prepare4(av, pkt, skb); else if (skb->protocol == htons(ETH_P_IPV6)) err = prepare6(av, pkt, skb); if (ether_addr_equal(skb->dev->dev_addr, av->dmac)) pkt->mask |= RXE_LOOPBACK_MASK; return err; } static void rxe_skb_tx_dtor(struct sk_buff *skb) { struct net_device *ndev = skb->dev; struct rxe_dev *rxe; unsigned int qp_index; struct rxe_qp *qp; int skb_out; rxe = rxe_get_dev_from_net(ndev); if (!rxe && is_vlan_dev(ndev)) rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); if (WARN_ON(!rxe)) return; qp_index = (int)(uintptr_t)skb->sk->sk_user_data; if (!qp_index) return; qp = rxe_pool_get_index(&rxe->qp_pool, qp_index); if (!qp) goto put_dev; skb_out = atomic_dec_return(&qp->skb_out); if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW) rxe_sched_task(&qp->send_task); rxe_put(qp); put_dev: ib_device_put(&rxe->ib_dev); sock_put(skb->sk); } static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) { int err; struct sock *sk = pkt->qp->sk->sk; sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); else err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); return err; } /* fix up a send packet to match the packets * received from UDP before looping them back */ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) { struct sock *sk = pkt->qp->sk->sk; memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) skb_pull(skb, sizeof(struct iphdr)); else skb_pull(skb, sizeof(struct ipv6hdr)); if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { kfree_skb(skb); return -EIO; } /* remove udp header */ skb_pull(skb, sizeof(struct udphdr)); rxe_rcv(skb); return 0; } int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct sk_buff *skb) { int err; int is_request = pkt->mask & RXE_REQ_MASK; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); unsigned long flags; spin_lock_irqsave(&qp->state_lock, flags); if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || (!is_request && (qp_state(qp) < IB_QPS_RTR))) { spin_unlock_irqrestore(&qp->state_lock, flags); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); goto drop; } spin_unlock_irqrestore(&qp->state_lock, flags); rxe_icrc_generate(skb, pkt); if (pkt->mask & RXE_LOOPBACK_MASK) err = rxe_loopback(skb, pkt); else err = rxe_send(skb, pkt); if (err) { rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); return err; } rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); goto done; drop: kfree_skb(skb); err = 0; done: return err; } struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt) { unsigned int hdr_len; struct sk_buff *skb = NULL; struct net_device *ndev; const struct ib_gid_attr *attr; const int port_num = 1; attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); if (IS_ERR(attr)) return NULL; if (av->network_type == RXE_NETWORK_TYPE_IPV4) hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct iphdr); else hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct ipv6hdr); rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(attr); if (IS_ERR(ndev)) { rcu_read_unlock(); goto out; } skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev), GFP_ATOMIC); if (unlikely(!skb)) { rcu_read_unlock(); goto out; } skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); /* FIXME: hold reference to this netdev until life of this skb. */ skb->dev = ndev; rcu_read_unlock(); if (av->network_type == RXE_NETWORK_TYPE_IPV4) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); pkt->rxe = rxe; pkt->port_num = port_num; pkt->hdr = skb_put(skb, paylen); pkt->mask |= RXE_GRH_MASK; out: rdma_put_gid_attr(attr); return skb; } /* * this is required by rxe_cfg to match rxe devices in * /sys/class/infiniband up with their underlying ethernet devices */ const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num) { struct net_device *ndev; char *ndev_name; ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); if (!ndev) return NULL; ndev_name = ndev->name; dev_put(ndev); return ndev_name; } int rxe_net_add(const char *ibdev_name, struct net_device *ndev) { int err; struct rxe_dev *rxe = NULL; rxe = ib_alloc_device(rxe_dev, ib_dev); if (!rxe) return -ENOMEM; ib_mark_name_assigned_by_user(&rxe->ib_dev); err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev); if (err) { ib_dealloc_device(&rxe->ib_dev); return err; } return 0; } static void rxe_port_event(struct rxe_dev *rxe, enum ib_event_type event) { struct ib_event ev; ev.device = &rxe->ib_dev; ev.element.port_num = 1; ev.event = event; ib_dispatch_event(&ev); } /* Caller must hold net_info_lock */ void rxe_port_up(struct rxe_dev *rxe) { rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); dev_info(&rxe->ib_dev.dev, "set active\n"); } /* Caller must hold net_info_lock */ void rxe_port_down(struct rxe_dev *rxe) { rxe_port_event(rxe, IB_EVENT_PORT_ERR); rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); dev_info(&rxe->ib_dev.dev, "set down\n"); } void rxe_set_port_state(struct rxe_dev *rxe) { struct net_device *ndev; ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); if (!ndev) return; if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE) rxe_port_up(rxe); else rxe_port_down(rxe); dev_put(ndev); } static int rxe_notify(struct notifier_block *not_blk, unsigned long event, void *arg) { struct net_device *ndev = netdev_notifier_info_to_dev(arg); struct rxe_dev *rxe = rxe_get_dev_from_net(ndev); if (!rxe) return NOTIFY_OK; switch (event) { case NETDEV_UNREGISTER: ib_unregister_device_queued(&rxe->ib_dev); break; case NETDEV_CHANGEMTU: rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu); rxe_set_mtu(rxe, ndev->mtu); break; case NETDEV_DOWN: case NETDEV_CHANGE: if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN) rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); break; case NETDEV_REBOOT: case NETDEV_GOING_DOWN: case NETDEV_CHANGEADDR: case NETDEV_CHANGENAME: case NETDEV_FEAT_CHANGE: default: rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n", event, ndev->name); break; } ib_device_put(&rxe->ib_dev); return NOTIFY_OK; } static struct notifier_block rxe_net_notifier = { .notifier_call = rxe_notify, }; static int rxe_net_ipv4_init(void) { recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, htons(ROCE_V2_UDP_DPORT), false); if (IS_ERR(recv_sockets.sk4)) { recv_sockets.sk4 = NULL; pr_err("Failed to create IPv4 UDP tunnel\n"); return -1; } return 0; } static int rxe_net_ipv6_init(void) { #if IS_ENABLED(CONFIG_IPV6) recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, htons(ROCE_V2_UDP_DPORT), true); if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) { recv_sockets.sk6 = NULL; pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n"); return 0; } if (IS_ERR(recv_sockets.sk6)) { recv_sockets.sk6 = NULL; pr_err("Failed to create IPv6 UDP tunnel\n"); return -1; } #endif return 0; } void rxe_net_exit(void) { rxe_release_udp_tunnel(recv_sockets.sk6); rxe_release_udp_tunnel(recv_sockets.sk4); unregister_netdevice_notifier(&rxe_net_notifier); } int rxe_net_init(void) { int err; recv_sockets.sk6 = NULL; err = rxe_net_ipv4_init(); if (err) return err; err = rxe_net_ipv6_init(); if (err) goto err_out; err = register_netdevice_notifier(&rxe_net_notifier); if (err) { pr_err("Failed to register netdev notifier\n"); goto err_out; } return 0; err_out: rxe_net_exit(); return err; } |
5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | // SPDX-License-Identifier: GPL-2.0-only /* IP tables module for matching the value of the IPv4/IPv6 DSCP field * * (C) 2002 by Harald Welte <laforge@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/dsfield.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_dscp.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: DSCP/TOS field match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_dscp"); MODULE_ALIAS("ip6t_dscp"); MODULE_ALIAS("ipt_tos"); MODULE_ALIAS("ip6t_tos"); static bool dscp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_dscp_info *info = par->matchinfo; u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; return (dscp == info->dscp) ^ !!info->invert; } static bool dscp_mt6(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_dscp_info *info = par->matchinfo; u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT; return (dscp == info->dscp) ^ !!info->invert; } static int dscp_mt_check(const struct xt_mtchk_param *par) { const struct xt_dscp_info *info = par->matchinfo; if (info->dscp > XT_DSCP_MAX) return -EDOM; return 0; } static bool tos_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_tos_match_info *info = par->matchinfo; if (xt_family(par) == NFPROTO_IPV4) return ((ip_hdr(skb)->tos & info->tos_mask) == info->tos_value) ^ !!info->invert; else return ((ipv6_get_dsfield(ipv6_hdr(skb)) & info->tos_mask) == info->tos_value) ^ !!info->invert; } static struct xt_match dscp_mt_reg[] __read_mostly = { { .name = "dscp", .family = NFPROTO_IPV4, .checkentry = dscp_mt_check, .match = dscp_mt, .matchsize = sizeof(struct xt_dscp_info), .me = THIS_MODULE, }, { .name = "dscp", .family = NFPROTO_IPV6, .checkentry = dscp_mt_check, .match = dscp_mt6, .matchsize = sizeof(struct xt_dscp_info), .me = THIS_MODULE, }, { .name = "tos", .revision = 1, .family = NFPROTO_IPV4, .match = tos_mt, .matchsize = sizeof(struct xt_tos_match_info), .me = THIS_MODULE, }, { .name = "tos", .revision = 1, .family = NFPROTO_IPV6, .match = tos_mt, .matchsize = sizeof(struct xt_tos_match_info), .me = THIS_MODULE, }, }; static int __init dscp_mt_init(void) { return xt_register_matches(dscp_mt_reg, ARRAY_SIZE(dscp_mt_reg)); } static void __exit dscp_mt_exit(void) { xt_unregister_matches(dscp_mt_reg, ARRAY_SIZE(dscp_mt_reg)); } module_init(dscp_mt_init); module_exit(dscp_mt_exit); |
7 10 1 7 7 10 10 1 10 5 10 10 10 10 10 1 10 8 1 5 11 5 2 1 5 10 9 9 13 1 1 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/module.h> #include <net/sock.h> #include <linux/netlink.h> #include <linux/sock_diag.h> #include <linux/netlink_diag.h> #include <linux/rhashtable.h> #include "af_netlink.h" static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->groups == NULL) return 0; return nla_put(nlskb, NETLINK_DIAG_GROUPS, NLGRPSZ(nlk->ngroups), nlk->groups); } static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb) { struct netlink_sock *nlk = nlk_sk(sk); u32 flags = 0; if (nlk->cb_running) flags |= NDIAG_FLAG_CB_RUNNING; if (nlk_test_bit(RECV_PKTINFO, sk)) flags |= NDIAG_FLAG_PKTINFO; if (nlk_test_bit(BROADCAST_SEND_ERROR, sk)) flags |= NDIAG_FLAG_BROADCAST_ERROR; if (nlk_test_bit(RECV_NO_ENOBUFS, sk)) flags |= NDIAG_FLAG_NO_ENOBUFS; if (nlk_test_bit(LISTEN_ALL_NSID, sk)) flags |= NDIAG_FLAG_LISTEN_ALL_NSID; if (nlk_test_bit(CAP_ACK, sk)) flags |= NDIAG_FLAG_CAP_ACK; return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags); } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct netlink_diag_req *req, u32 portid, u32 seq, u32 flags, int sk_ino) { struct nlmsghdr *nlh; struct netlink_diag_msg *rep; struct netlink_sock *nlk = nlk_sk(sk); nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), flags); if (!nlh) return -EMSGSIZE; rep = nlmsg_data(nlh); rep->ndiag_family = AF_NETLINK; rep->ndiag_type = sk->sk_type; rep->ndiag_protocol = sk->sk_protocol; rep->ndiag_state = sk->sk_state; rep->ndiag_ino = sk_ino; rep->ndiag_portid = nlk->portid; rep->ndiag_dst_portid = nlk->dst_portid; rep->ndiag_dst_group = nlk->dst_group; sock_diag_save_cookie(sk, rep->ndiag_cookie); if ((req->ndiag_show & NDIAG_SHOW_GROUPS) && sk_diag_dump_groups(sk, skb)) goto out_nlmsg_trim; if ((req->ndiag_show & NDIAG_SHOW_MEMINFO) && sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO)) goto out_nlmsg_trim; if ((req->ndiag_show & NDIAG_SHOW_FLAGS) && sk_diag_put_flags(sk, skb)) goto out_nlmsg_trim; nlmsg_end(skb, nlh); return 0; out_nlmsg_trim: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, int protocol, int s_num) { struct rhashtable_iter *hti = (void *)cb->args[2]; struct netlink_table *tbl = &nl_table[protocol]; struct net *net = sock_net(skb->sk); struct netlink_diag_req *req; struct netlink_sock *nlsk; unsigned long flags; struct sock *sk; int num = 2; int ret = 0; req = nlmsg_data(cb->nlh); if (s_num > 1) goto mc_list; num--; if (!hti) { hti = kmalloc(sizeof(*hti), GFP_KERNEL); if (!hti) return -ENOMEM; cb->args[2] = (long)hti; } if (!s_num) rhashtable_walk_enter(&tbl->hash, hti); rhashtable_walk_start(hti); while ((nlsk = rhashtable_walk_next(hti))) { if (IS_ERR(nlsk)) { ret = PTR_ERR(nlsk); if (ret == -EAGAIN) { ret = 0; continue; } break; } sk = (struct sock *)nlsk; if (!net_eq(sock_net(sk), net)) continue; if (sk_diag_fill(sk, skb, req, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, sock_i_ino(sk)) < 0) { ret = 1; break; } } rhashtable_walk_stop(hti); if (ret) goto done; rhashtable_walk_exit(hti); num++; mc_list: read_lock_irqsave(&nl_table_lock, flags); sk_for_each_bound(sk, &tbl->mc_list) { if (sk_hashed(sk)) continue; if (!net_eq(sock_net(sk), net)) continue; if (num < s_num) { num++; continue; } if (sk_diag_fill(sk, skb, req, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, __sock_i_ino(sk)) < 0) { ret = 1; break; } num++; } read_unlock_irqrestore(&nl_table_lock, flags); done: cb->args[0] = num; return ret; } static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct netlink_diag_req *req; int s_num = cb->args[0]; int err = 0; req = nlmsg_data(cb->nlh); if (req->sdiag_protocol == NDIAG_PROTO_ALL) { int i; for (i = cb->args[1]; i < MAX_LINKS; i++) { err = __netlink_diag_dump(skb, cb, i, s_num); if (err) break; s_num = 0; } cb->args[1] = i; } else { if (req->sdiag_protocol >= MAX_LINKS) return -ENOENT; err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); } return err <= 0 ? err : skb->len; } static int netlink_diag_dump_done(struct netlink_callback *cb) { struct rhashtable_iter *hti = (void *)cb->args[2]; if (cb->args[0] == 1) rhashtable_walk_exit(hti); kfree(hti); return 0; } static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) { int hdrlen = sizeof(struct netlink_diag_req); struct net *net = sock_net(skb->sk); if (nlmsg_len(h) < hdrlen) return -EINVAL; if (h->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = netlink_diag_dump, .done = netlink_diag_dump_done, }; return netlink_dump_start(net->diag_nlsk, skb, h, &c); } else return -EOPNOTSUPP; } static const struct sock_diag_handler netlink_diag_handler = { .owner = THIS_MODULE, .family = AF_NETLINK, .dump = netlink_diag_handler_dump, }; static int __init netlink_diag_init(void) { return sock_diag_register(&netlink_diag_handler); } static void __exit netlink_diag_exit(void) { sock_diag_unregister(&netlink_diag_handler); } module_init(netlink_diag_init); module_exit(netlink_diag_exit); MODULE_DESCRIPTION("Netlink-based socket monitoring/diagnostic interface (sock_diag)"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */); |
1040 763 1 29 270 25 474 19 336 294 48 335 336 6 335 336 332 5 157 84 90 13 331 5 5 141 2 140 137 136 137 17 120 136 119 17 137 119 28 4 4 7 7 154 155 155 154 154 19 136 139 140 140 139 140 3 3 17 17 17 1 17 15 14 13 13 11 11 1 1 6 3 3 1 7 7 6 3 30 2 28 28 9 18 3 13 17 31 42 26 8 4 9 4 1 3 3 10 4 10 3 29 4 14 16 13 1 8 5 13 7 1 6 3 1 125 119 2 5 5 4 1 15 5 5 6 52 52 43 1 1 1 41 9 15 30 7 7 2111 1965 163 410 166 544 18 545 126 777 2 4 7 7 4 2 7 85 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 | // SPDX-License-Identifier: GPL-2.0-or-later /* linux/net/ipv4/arp.c * * Copyright (C) 1994 by Florian La Roche * * This module implements the Address Resolution Protocol ARP (RFC 826), * which is used to convert IP addresses (or in the future maybe other * high-level addresses) into a low-level hardware address (like an Ethernet * address). * * Fixes: * Alan Cox : Removed the Ethernet assumptions in * Florian's code * Alan Cox : Fixed some small errors in the ARP * logic * Alan Cox : Allow >4K in /proc * Alan Cox : Make ARP add its own protocol entry * Ross Martin : Rewrote arp_rcv() and arp_get_info() * Stephen Henson : Add AX25 support to arp_get_info() * Alan Cox : Drop data when a device is downed. * Alan Cox : Use init_timer(). * Alan Cox : Double lock fixes. * Martin Seine : Move the arphdr structure * to if_arp.h for compatibility. * with BSD based programs. * Andrew Tridgell : Added ARP netmask code and * re-arranged proxy handling. * Alan Cox : Changed to use notifiers. * Niibe Yutaka : Reply for this device or proxies only. * Alan Cox : Don't proxy across hardware types! * Jonathan Naylor : Added support for NET/ROM. * Mike Shaver : RFC1122 checks. * Jonathan Naylor : Only lookup the hardware address for * the correct hardware type. * Germano Caronni : Assorted subtle races. * Craig Schlenter : Don't modify permanent entry * during arp_rcv. * Russ Nelson : Tidied up a few bits. * Alexey Kuznetsov: Major changes to caching and behaviour, * eg intelligent arp probing and * generation * of host down events. * Alan Cox : Missing unlock in device events. * Eckes : ARP ioctl control errors. * Alexey Kuznetsov: Arp free fix. * Manuel Rodriguez: Gratuitous ARP. * Jonathan Layes : Added arpd support through kerneld * message queue (960314) * Mike Shaver : /proc/sys/net/ipv4/arp_* support * Mike McLagan : Routing by source * Stuart Cheshire : Metricom and grat arp fixes * *** FOR 2.1 clean this up *** * Lawrence V. Stefani: (08/12/96) Added FDDI support. * Alan Cox : Took the AP1000 nasty FDDI hack and * folded into the mainstream FDDI code. * Ack spit, Linus how did you allow that * one in... * Jes Sorensen : Make FDDI work again in 2.1.x and * clean up the APFDDI & gen. FDDI bits. * Alexey Kuznetsov: new arp state machine; * now it is in net/core/neighbour.c. * Krzysztof Halasa: Added Frame Relay ARP support. * Arnaldo C. Melo : convert /proc/net/arp to seq_file * Shmulik Hen: Split arp_send to arp_create and * arp_xmit so intermediate drivers like * bonding can change the skb before * sending (e.g. insert 8021q tag). * Harald Welte : convert to make use of jenkins hash * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/capability.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/fddidevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/net.h> #include <linux/rcupdate.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <net/net_namespace.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/sock.h> #include <net/arp.h> #include <net/ax25.h> #include <net/netrom.h> #include <net/dst_metadata.h> #include <net/ip_tunnels.h> #include <linux/uaccess.h> #include <linux/netfilter_arp.h> /* * Interface to generic neighbour cache. */ static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); static bool arp_key_eq(const struct neighbour *n, const void *pkey); static int arp_constructor(struct neighbour *neigh); static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb); static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); static void parp_redo(struct sk_buff *skb); static int arp_is_multicast(const void *pkey); static const struct neigh_ops arp_generic_ops = { .family = AF_INET, .solicit = arp_solicit, .error_report = arp_error_report, .output = neigh_resolve_output, .connected_output = neigh_connected_output, }; static const struct neigh_ops arp_hh_ops = { .family = AF_INET, .solicit = arp_solicit, .error_report = arp_error_report, .output = neigh_resolve_output, .connected_output = neigh_resolve_output, }; static const struct neigh_ops arp_direct_ops = { .family = AF_INET, .output = neigh_direct_output, .connected_output = neigh_direct_output, }; struct neigh_table arp_tbl = { .family = AF_INET, .key_len = 4, .protocol = cpu_to_be16(ETH_P_IP), .hash = arp_hash, .key_eq = arp_key_eq, .constructor = arp_constructor, .proxy_redo = parp_redo, .is_multicast = arp_is_multicast, .id = "arp_cache", .parms = { .tbl = &arp_tbl, .reachable_time = 30 * HZ, .data = { [NEIGH_VAR_MCAST_PROBES] = 3, [NEIGH_VAR_UCAST_PROBES] = 3, [NEIGH_VAR_RETRANS_TIME] = 1 * HZ, [NEIGH_VAR_BASE_REACHABLE_TIME] = 30 * HZ, [NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ, [NEIGH_VAR_INTERVAL_PROBE_TIME_MS] = 5 * HZ, [NEIGH_VAR_GC_STALETIME] = 60 * HZ, [NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX, [NEIGH_VAR_PROXY_QLEN] = 64, [NEIGH_VAR_ANYCAST_DELAY] = 1 * HZ, [NEIGH_VAR_PROXY_DELAY] = (8 * HZ) / 10, [NEIGH_VAR_LOCKTIME] = 1 * HZ, }, }, .gc_interval = 30 * HZ, .gc_thresh1 = 128, .gc_thresh2 = 512, .gc_thresh3 = 1024, }; EXPORT_SYMBOL(arp_tbl); int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) { switch (dev->type) { case ARPHRD_ETHER: case ARPHRD_FDDI: case ARPHRD_IEEE802: ip_eth_mc_map(addr, haddr); return 0; case ARPHRD_INFINIBAND: ip_ib_mc_map(addr, dev->broadcast, haddr); return 0; case ARPHRD_IPGRE: ip_ipgre_mc_map(addr, dev->broadcast, haddr); return 0; default: if (dir) { memcpy(haddr, dev->broadcast, dev->addr_len); return 0; } } return -EINVAL; } static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) { return arp_hashfn(pkey, dev, hash_rnd); } static bool arp_key_eq(const struct neighbour *neigh, const void *pkey) { return neigh_key_eq32(neigh, pkey); } static int arp_constructor(struct neighbour *neigh) { __be32 addr; struct net_device *dev = neigh->dev; struct in_device *in_dev; struct neigh_parms *parms; u32 inaddr_any = INADDR_ANY; if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len); addr = *(__be32 *)neigh->primary_key; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); return -EINVAL; } neigh->type = inet_addr_type_dev_table(dev_net(dev), dev, addr); parms = in_dev->arp_parms; __neigh_parms_put(neigh->parms); neigh->parms = neigh_parms_clone(parms); rcu_read_unlock(); if (!dev->header_ops) { neigh->nud_state = NUD_NOARP; neigh->ops = &arp_direct_ops; neigh->output = neigh_direct_output; } else { /* Good devices (checked by reading texts, but only Ethernet is tested) ARPHRD_ETHER: (ethernet, apfddi) ARPHRD_FDDI: (fddi) ARPHRD_IEEE802: (tr) ARPHRD_METRICOM: (strip) ARPHRD_ARCNET: etc. etc. etc. ARPHRD_IPDDP will also work, if author repairs it. I did not it, because this driver does not work even in old paradigm. */ if (neigh->type == RTN_MULTICAST) { neigh->nud_state = NUD_NOARP; arp_mc_map(addr, neigh->ha, dev, 1); } else if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) { neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->dev_addr, dev->addr_len); } else if (neigh->type == RTN_BROADCAST || (dev->flags & IFF_POINTOPOINT)) { neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->broadcast, dev->addr_len); } if (dev->header_ops->cache) neigh->ops = &arp_hh_ops; else neigh->ops = &arp_generic_ops; if (neigh->nud_state & NUD_VALID) neigh->output = neigh->ops->connected_output; else neigh->output = neigh->ops->output; } return 0; } static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb) { dst_link_failure(skb); kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED); } /* Create and send an arp packet. */ static void arp_send_dst(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *target_hw, struct dst_entry *dst) { struct sk_buff *skb; /* arp on this interface. */ if (dev->flags & IFF_NOARP) return; skb = arp_create(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw, target_hw); if (!skb) return; skb_dst_set(skb, dst_clone(dst)); arp_xmit(skb); } void arp_send(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *target_hw) { arp_send_dst(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw, target_hw, NULL); } EXPORT_SYMBOL(arp_send); static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) { __be32 saddr = 0; u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL; struct net_device *dev = neigh->dev; __be32 target = *(__be32 *)neigh->primary_key; int probes = atomic_read(&neigh->probes); struct in_device *in_dev; struct dst_entry *dst = NULL; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); return; } switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { default: case 0: /* By default announce any local IP */ if (skb && inet_addr_type_dev_table(dev_net(dev), dev, ip_hdr(skb)->saddr) == RTN_LOCAL) saddr = ip_hdr(skb)->saddr; break; case 1: /* Restrict announcements of saddr in same subnet */ if (!skb) break; saddr = ip_hdr(skb)->saddr; if (inet_addr_type_dev_table(dev_net(dev), dev, saddr) == RTN_LOCAL) { /* saddr should be known to target */ if (inet_addr_onlink(in_dev, target, saddr)) break; } saddr = 0; break; case 2: /* Avoid secondary IPs, get a primary/preferred one */ break; } rcu_read_unlock(); if (!saddr) saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES); if (probes < 0) { if (!(READ_ONCE(neigh->nud_state) & NUD_VALID)) pr_debug("trying to ucast probe in NUD_INVALID\n"); neigh_ha_snapshot(dst_ha, neigh, dev); dst_hw = dst_ha; } else { probes -= NEIGH_VAR(neigh->parms, APP_PROBES); if (probes < 0) { neigh_app_ns(neigh); return; } } if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE)) dst = skb_dst(skb); arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, dst_hw, dev->dev_addr, NULL, dst); } static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) { struct net *net = dev_net(in_dev->dev); int scope; switch (IN_DEV_ARP_IGNORE(in_dev)) { case 0: /* Reply, the tip is already validated */ return 0; case 1: /* Reply only if tip is configured on the incoming interface */ sip = 0; scope = RT_SCOPE_HOST; break; case 2: /* * Reply only if tip is configured on the incoming interface * and is in same subnet as sip */ scope = RT_SCOPE_HOST; break; case 3: /* Do not reply for scope host addresses */ sip = 0; scope = RT_SCOPE_LINK; in_dev = NULL; break; case 4: /* Reserved */ case 5: case 6: case 7: return 0; case 8: /* Do not reply */ return 1; default: return 0; } return !inet_confirm_addr(net, in_dev, sip, tip, scope); } static int arp_accept(struct in_device *in_dev, __be32 sip) { struct net *net = dev_net(in_dev->dev); int scope = RT_SCOPE_LINK; switch (IN_DEV_ARP_ACCEPT(in_dev)) { case 0: /* Don't create new entries from garp */ return 0; case 1: /* Create new entries from garp */ return 1; case 2: /* Create a neighbor in the arp table only if sip * is in the same subnet as an address configured * on the interface that received the garp message */ return !!inet_confirm_addr(net, in_dev, sip, 0, scope); default: return 0; } } static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) { struct rtable *rt; int flag = 0; /*unsigned long now; */ struct net *net = dev_net(dev); rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev), RT_SCOPE_UNIVERSE); if (IS_ERR(rt)) return 1; if (rt->dst.dev != dev) { __NET_INC_STATS(net, LINUX_MIB_ARPFILTER); flag = 1; } ip_rt_put(rt); return flag; } /* * Check if we can use proxy ARP for this path */ static inline int arp_fwd_proxy(struct in_device *in_dev, struct net_device *dev, struct rtable *rt) { struct in_device *out_dev; int imi, omi = -1; if (rt->dst.dev == dev) return 0; if (!IN_DEV_PROXY_ARP(in_dev)) return 0; imi = IN_DEV_MEDIUM_ID(in_dev); if (imi == 0) return 1; if (imi == -1) return 0; /* place to check for proxy_arp for routes */ out_dev = __in_dev_get_rcu(rt->dst.dev); if (out_dev) omi = IN_DEV_MEDIUM_ID(out_dev); return omi != imi && omi != -1; } /* * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev) * * RFC3069 supports proxy arp replies back to the same interface. This * is done to support (ethernet) switch features, like RFC 3069, where * the individual ports are not allowed to communicate with each * other, BUT they are allowed to talk to the upstream router. As * described in RFC 3069, it is possible to allow these hosts to * communicate through the upstream router, by proxy_arp'ing. * * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation" * * This technology is known by different names: * In RFC 3069 it is called VLAN Aggregation. * Cisco and Allied Telesyn call it Private VLAN. * Hewlett-Packard call it Source-Port filtering or port-isolation. * Ericsson call it MAC-Forced Forwarding (RFC Draft). * */ static inline int arp_fwd_pvlan(struct in_device *in_dev, struct net_device *dev, struct rtable *rt, __be32 sip, __be32 tip) { /* Private VLAN is only concerned about the same ethernet segment */ if (rt->dst.dev != dev) return 0; /* Don't reply on self probes (often done by windowz boxes)*/ if (sip == tip) return 0; if (IN_DEV_PROXY_ARP_PVLAN(in_dev)) return 1; else return 0; } /* * Interface to link layer: send routine and receive handler. */ /* * Create an arp packet. If dest_hw is not set, we create a broadcast * message. */ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *target_hw) { struct sk_buff *skb; struct arphdr *arp; unsigned char *arp_ptr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; /* * Allocate a buffer */ skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); if (!skb) return NULL; skb_reserve(skb, hlen); skb_reset_network_header(skb); arp = skb_put(skb, arp_hdr_len(dev)); skb->dev = dev; skb->protocol = htons(ETH_P_ARP); if (!src_hw) src_hw = dev->dev_addr; if (!dest_hw) dest_hw = dev->broadcast; /* * Fill the device header for the ARP frame */ if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0) goto out; /* * Fill out the arp protocol part. * * The arp hardware type should match the device type, except for FDDI, * which (according to RFC 1390) should always equal 1 (Ethernet). */ /* * Exceptions everywhere. AX.25 uses the AX.25 PID value not the * DIX code for the protocol. Make these device structure fields. */ switch (dev->type) { default: arp->ar_hrd = htons(dev->type); arp->ar_pro = htons(ETH_P_IP); break; #if IS_ENABLED(CONFIG_AX25) case ARPHRD_AX25: arp->ar_hrd = htons(ARPHRD_AX25); arp->ar_pro = htons(AX25_P_IP); break; #if IS_ENABLED(CONFIG_NETROM) case ARPHRD_NETROM: arp->ar_hrd = htons(ARPHRD_NETROM); arp->ar_pro = htons(AX25_P_IP); break; #endif #endif #if IS_ENABLED(CONFIG_FDDI) case ARPHRD_FDDI: arp->ar_hrd = htons(ARPHRD_ETHER); arp->ar_pro = htons(ETH_P_IP); break; #endif } arp->ar_hln = dev->addr_len; arp->ar_pln = 4; arp->ar_op = htons(type); arp_ptr = (unsigned char *)(arp + 1); memcpy(arp_ptr, src_hw, dev->addr_len); arp_ptr += dev->addr_len; memcpy(arp_ptr, &src_ip, 4); arp_ptr += 4; switch (dev->type) { #if IS_ENABLED(CONFIG_FIREWIRE_NET) case ARPHRD_IEEE1394: break; #endif default: if (target_hw) memcpy(arp_ptr, target_hw, dev->addr_len); else memset(arp_ptr, 0, dev->addr_len); arp_ptr += dev->addr_len; } memcpy(arp_ptr, &dest_ip, 4); return skb; out: kfree_skb(skb); return NULL; } EXPORT_SYMBOL(arp_create); static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { return dev_queue_xmit(skb); } /* * Send an arp packet. */ void arp_xmit(struct sk_buff *skb) { rcu_read_lock(); /* Send it off, maybe filter it using firewalling first. */ NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev, arp_xmit_finish); rcu_read_unlock(); } EXPORT_SYMBOL(arp_xmit); static bool arp_is_garp(struct net *net, struct net_device *dev, int *addr_type, __be16 ar_op, __be32 sip, __be32 tip, unsigned char *sha, unsigned char *tha) { bool is_garp = tip == sip; /* Gratuitous ARP _replies_ also require target hwaddr to be * the same as source. */ if (is_garp && ar_op == htons(ARPOP_REPLY)) is_garp = /* IPv4 over IEEE 1394 doesn't provide target * hardware address field in its ARP payload. */ tha && !memcmp(tha, sha, dev->addr_len); if (is_garp) { *addr_type = inet_addr_type_dev_table(net, dev, sip); if (*addr_type != RTN_UNICAST) is_garp = false; } return is_garp; } /* * Process an arp request. */ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_device *dev = skb->dev; struct in_device *in_dev = __in_dev_get_rcu(dev); struct arphdr *arp; unsigned char *arp_ptr; struct rtable *rt; unsigned char *sha; unsigned char *tha = NULL; __be32 sip, tip; u16 dev_type = dev->type; int addr_type; struct neighbour *n; struct dst_entry *reply_dst = NULL; bool is_garp = false; /* arp_rcv below verifies the ARP header and verifies the device * is ARP'able. */ if (!in_dev) goto out_free_skb; arp = arp_hdr(skb); switch (dev_type) { default: if (arp->ar_pro != htons(ETH_P_IP) || htons(dev_type) != arp->ar_hrd) goto out_free_skb; break; case ARPHRD_ETHER: case ARPHRD_FDDI: case ARPHRD_IEEE802: /* * ETHERNET, and Fibre Channel (which are IEEE 802 * devices, according to RFC 2625) devices will accept ARP * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2). * This is the case also of FDDI, where the RFC 1390 says that * FDDI devices should accept ARP hardware of (1) Ethernet, * however, to be more robust, we'll accept both 1 (Ethernet) * or 6 (IEEE 802.2) */ if ((arp->ar_hrd != htons(ARPHRD_ETHER) && arp->ar_hrd != htons(ARPHRD_IEEE802)) || arp->ar_pro != htons(ETH_P_IP)) goto out_free_skb; break; case ARPHRD_AX25: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_AX25)) goto out_free_skb; break; case ARPHRD_NETROM: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_NETROM)) goto out_free_skb; break; } /* Understand only these message types */ if (arp->ar_op != htons(ARPOP_REPLY) && arp->ar_op != htons(ARPOP_REQUEST)) goto out_free_skb; /* * Extract fields */ arp_ptr = (unsigned char *)(arp + 1); sha = arp_ptr; arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; switch (dev_type) { #if IS_ENABLED(CONFIG_FIREWIRE_NET) case ARPHRD_IEEE1394: break; #endif default: tha = arp_ptr; arp_ptr += dev->addr_len; } memcpy(&tip, arp_ptr, 4); /* * Check for bad requests for 127.x.x.x and requests for multicast * addresses. If this is one such, delete it. */ if (ipv4_is_multicast(tip) || (!IN_DEV_ROUTE_LOCALNET(in_dev) && ipv4_is_loopback(tip))) goto out_free_skb; /* * For some 802.11 wireless deployments (and possibly other networks), * there will be an ARP proxy and gratuitous ARP frames are attacks * and thus should not be accepted. */ if (sip == tip && IN_DEV_ORCONF(in_dev, DROP_GRATUITOUS_ARP)) goto out_free_skb; /* * Special case: We must set Frame Relay source Q.922 address */ if (dev_type == ARPHRD_DLCI) sha = dev->broadcast; /* * Process entry. The idea here is we want to send a reply if it is a * request for us or if it is a request for someone else that we hold * a proxy for. We want to add an entry to our cache if it is a reply * to us or if it is a request for our address. * (The assumption for this last is that if someone is requesting our * address, they are probably intending to talk to us, so it saves time * if we cache their address. Their address is also probably not in * our cache, since ours is not in their cache.) * * Putting this another way, we only care about replies if they are to * us, in which case we add them to the cache. For requests, we care * about those for us and those for our proxies. We reply to both, * and in the case of requests for us we add the requester to the arp * cache. */ if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb)) reply_dst = (struct dst_entry *) iptunnel_metadata_reply(skb_metadata_dst(skb), GFP_ATOMIC); /* Special case: IPv4 duplicate address detection packet (RFC2131) */ if (sip == 0) { if (arp->ar_op == htons(ARPOP_REQUEST) && inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && !arp_ignore(in_dev, sip, tip)) arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha, reply_dst); goto out_consume_skb; } if (arp->ar_op == htons(ARPOP_REQUEST) && ip_route_input_noref(skb, tip, sip, 0, dev) == 0) { rt = skb_rtable(skb); addr_type = rt->rt_type; if (addr_type == RTN_LOCAL) { int dont_send; dont_send = arp_ignore(in_dev, sip, tip); if (!dont_send && IN_DEV_ARPFILTER(in_dev)) dont_send = arp_filter(sip, tip, dev); if (!dont_send) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) { arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha, reply_dst); neigh_release(n); } } goto out_consume_skb; } else if (IN_DEV_FORWARD(in_dev)) { if (addr_type == RTN_UNICAST && (arp_fwd_proxy(in_dev, dev, rt) || arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || (rt->dst.dev != dev && pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) neigh_release(n); if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || skb->pkt_type == PACKET_HOST || NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha, reply_dst); } else { pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); goto out_free_dst; } goto out_consume_skb; } } } /* Update our ARP tables */ n = __neigh_lookup(&arp_tbl, &sip, dev, 0); addr_type = -1; if (n || arp_accept(in_dev, sip)) { is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op, sip, tip, sha, tha); } if (arp_accept(in_dev, sip)) { /* Unsolicited ARP is not accepted by default. It is possible, that this option should be enabled for some devices (strip is candidate) */ if (!n && (is_garp || (arp->ar_op == htons(ARPOP_REPLY) && (addr_type == RTN_UNICAST || (addr_type < 0 && /* postpone calculation to as late as possible */ inet_addr_type_dev_table(net, dev, sip) == RTN_UNICAST))))) n = __neigh_lookup(&arp_tbl, &sip, dev, 1); } if (n) { int state = NUD_REACHABLE; int override; /* If several different ARP replies follows back-to-back, use the FIRST one. It is possible, if several proxy agents are active. Taking the first reply prevents arp trashing and chooses the fastest router. */ override = time_after(jiffies, n->updated + NEIGH_VAR(n->parms, LOCKTIME)) || is_garp; /* Broadcast replies and request packets do not assert neighbour reachability. */ if (arp->ar_op != htons(ARPOP_REPLY) || skb->pkt_type != PACKET_HOST) state = NUD_STALE; neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0, 0); neigh_release(n); } out_consume_skb: consume_skb(skb); out_free_dst: dst_release(reply_dst); return NET_RX_SUCCESS; out_free_skb: kfree_skb(skb); return NET_RX_DROP; } static void parp_redo(struct sk_buff *skb) { arp_process(dev_net(skb->dev), NULL, skb); } static int arp_is_multicast(const void *pkey) { return ipv4_is_multicast(*((__be32 *)pkey)); } /* * Receive an arp request from the device layer. */ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct arphdr *arp; /* do not tweak dropwatch on an ARP we will ignore */ if (dev->flags & IFF_NOARP || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK) goto consumeskb; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out_of_mem; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull(skb, arp_hdr_len(dev))) goto freeskb; arp = arp_hdr(skb); if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4) goto freeskb; memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, dev_net(dev), NULL, skb, dev, NULL, arp_process); consumeskb: consume_skb(skb); return NET_RX_SUCCESS; freeskb: kfree_skb(skb); out_of_mem: return NET_RX_DROP; } /* * User level interface (ioctl) */ static struct net_device *arp_req_dev_by_name(struct net *net, struct arpreq *r, bool getarp) { struct net_device *dev; if (getarp) dev = dev_get_by_name_rcu(net, r->arp_dev); else dev = __dev_get_by_name(net, r->arp_dev); if (!dev) return ERR_PTR(-ENODEV); /* Mmmm... It is wrong... ARPHRD_NETROM == 0 */ if (!r->arp_ha.sa_family) r->arp_ha.sa_family = dev->type; if ((r->arp_flags & ATF_COM) && r->arp_ha.sa_family != dev->type) return ERR_PTR(-EINVAL); return dev; } static struct net_device *arp_req_dev(struct net *net, struct arpreq *r) { struct net_device *dev; struct rtable *rt; __be32 ip; if (r->arp_dev[0]) return arp_req_dev_by_name(net, r, false); if (r->arp_flags & ATF_PUBL) return NULL; ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; rt = ip_route_output(net, ip, 0, 0, 0, RT_SCOPE_LINK); if (IS_ERR(rt)) return ERR_CAST(rt); dev = rt->dst.dev; ip_rt_put(rt); if (!dev) return ERR_PTR(-EINVAL); return dev; } /* * Set (create) an ARP cache entry. */ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) { if (!dev) { IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; return 0; } if (__in_dev_get_rtnl_net(dev)) { IN_DEV_CONF_SET(__in_dev_get_rtnl_net(dev), PROXY_ARP, on); return 0; } return -ENXIO; } static int arp_req_set_public(struct net *net, struct arpreq *r, struct net_device *dev) { __be32 mask = ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr; if (!de |