| 9 5 61 9 3 3 3 61 61 42 42 61 61 60 61 2 62 62 62 62 62 61 7 62 17 16 11 4 2 4 4 17 5 5 5 5 5 17 13 3 9 9 6 5 4 1 4 1 4 4 4 3 1 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 | // SPDX-License-Identifier: GPL-2.0-only /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ #include <net/tcp.h> #include <net/xfrm.h> #include <net/busy_poll.h> #include <net/rstreason.h> static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) { if (seq == s_win) return true; if (after(end_seq, s_win) && before(seq, e_win)) return true; return seq == e_win && seq == end_seq; } static enum tcp_tw_status tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, const struct sk_buff *skb, int mib_idx) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, &tcptw->tw_last_oow_ack_time)) { /* Send ACK. Note, we do not put the bucket, * it will be released by caller. */ return TCP_TW_ACK; } /* We are rate-limiting, so just release the tw sock and drop skb. */ inet_twsk_put(tw); return TCP_TW_SUCCESS; } static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq, u32 rcv_nxt) { #ifdef CONFIG_TCP_AO struct tcp_ao_info *ao; ao = rcu_dereference(tcptw->ao_info); if (unlikely(ao && seq < rcv_nxt)) WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1); #endif WRITE_ONCE(tcptw->tw_rcv_nxt, seq); } /* * * Main purpose of TIME-WAIT state is to close connection gracefully, * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN * (and, probably, tail of data) and one or more our ACKs are lost. * * What is TIME-WAIT timeout? It is associated with maximal packet * lifetime in the internet, which results in wrong conclusion, that * it is set to catch "old duplicate segments" wandering out of their path. * It is not quite correct. This timeout is calculated so that it exceeds * maximal retransmission timeout enough to allow to lose one (or more) * segments sent by peer and our ACKs. This time may be calculated from RTO. * * When TIME-WAIT socket receives RST, it means that another end * finally closed and we are allowed to kill TIME-WAIT too. * * Second purpose of TIME-WAIT is catching old duplicate segments. * Well, certainly it is pure paranoia, but if we load TIME-WAIT * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. * * If we invented some more clever way to catch duplicates * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. * * The algorithm below is based on FORMAL INTERPRETATION of RFCs. * When you compare it to RFCs, please, read section SEGMENT ARRIVES * from the very beginning. * * NOTE. With recycling (and later with fin-wait-2) TW bucket * is _not_ stateless. It means, that strictly speaking we must * spinlock it. I do not want! Well, probability of misbehaviour * is ridiculously low and, seems, we could use some mb() tricks * to avoid misread sequence numbers, states etc. --ANK * * We don't need to initialize tmp_out.sack_ok as we don't use the results */ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, const struct tcphdr *th, u32 *tw_isn) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt); struct tcp_options_received tmp_opt; bool paws_reject = false; int ts_recent_stamp; tmp_opt.saw_tstamp = 0; ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp); if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) { tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { if (tmp_opt.rcv_tsecr) tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent); tmp_opt.ts_recent_stamp = ts_recent_stamp; paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) { /* Just repeat all the checks of tcp_rcv_state_process() */ /* Out of window, send ACK */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, rcv_nxt, rcv_nxt + tcptw->tw_rcv_wnd)) return tcp_timewait_check_oow_rate_limit( tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); if (th->rst) goto kill; if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt)) return TCP_TW_RST; /* Dup ACK? */ if (!th->ack || !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) || TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { inet_twsk_put(tw); return TCP_TW_SUCCESS; } /* New data or FIN. If new data arrive after half-duplex close, * reset. */ if (!th->fin || TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1) return TCP_TW_RST; /* FIN arrived, enter true time-wait state. */ WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT); twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq, rcv_nxt); if (tmp_opt.saw_tstamp) { WRITE_ONCE(tcptw->tw_ts_recent_stamp, ktime_get_seconds()); WRITE_ONCE(tcptw->tw_ts_recent, tmp_opt.rcv_tsval); } inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); return TCP_TW_ACK; } /* * Now real TIME-WAIT state. * * RFC 1122: * "When a connection is [...] on TIME-WAIT state [...] * [a TCP] MAY accept a new SYN from the remote TCP to * reopen the connection directly, if it: * * (1) assigns its initial sequence number for the new * connection to be larger than the largest sequence * number it used on the previous connection incarnation, * and * * (2) returns to TIME-WAIT state if the SYN turns out * to be an old duplicate". */ if (!paws_reject && (TCP_SKB_CB(skb)->seq == rcv_nxt && (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { /* In window segment, it may be only reset or bare ack. */ if (th->rst) { /* This is TIME_WAIT assassination, in two flavors. * Oh well... nobody has a sufficient solution to this * protocol bug yet. */ if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { kill: inet_twsk_deschedule_put(tw); return TCP_TW_SUCCESS; } } else { inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); } if (tmp_opt.saw_tstamp) { WRITE_ONCE(tcptw->tw_ts_recent, tmp_opt.rcv_tsval); WRITE_ONCE(tcptw->tw_ts_recent_stamp, ktime_get_seconds()); } inet_twsk_put(tw); return TCP_TW_SUCCESS; } /* Out of window segment. All the segments are ACKed immediately. The only exception is new SYN. We accept it, if it is not old duplicate and we are not in danger to be killed by delayed old duplicates. RFC check is that it has newer sequence number works at rates <40Mbit/sec. However, if paws works, it is reliable AND even more, we even may relax silly seq space cutoff. RED-PEN: we violate main RFC requirement, if this SYN will appear old duplicate (i.e. we receive RST in reply to SYN-ACK), we must return socket to time-wait state. It is not good, but not fatal yet. */ if (th->syn && !th->rst && !th->ack && !paws_reject && (after(TCP_SKB_CB(skb)->seq, rcv_nxt) || (tmp_opt.saw_tstamp && (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) { u32 isn = tcptw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; *tw_isn = isn; return TCP_TW_SYN; } if (paws_reject) __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); if (!th->rst) { /* In this case we must reset the TIMEWAIT timer. * * If it is ACKless SYN it may be both old duplicate * and new good SYN with random sequence number <rcv_nxt. * Do not reschedule in the last case. */ if (paws_reject || th->ack) inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); return tcp_timewait_check_oow_rate_limit( tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); } inet_twsk_put(tw); return TCP_TW_SUCCESS; } EXPORT_SYMBOL(tcp_timewait_state_process); static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) { #ifdef CONFIG_TCP_MD5SIG const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; /* * The timewait bucket does not have the key DB from the * sock structure. We just make a quick copy of the * md5 key being used (if indeed we are using one) * so the timewait ack generating code has the key. */ tcptw->tw_md5_key = NULL; if (!static_branch_unlikely(&tcp_md5_needed.key)) return; key = tp->af_specific->md5_lookup(sk, sk); if (key) { tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); if (!tcptw->tw_md5_key) return; if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) goto out_free; tcp_md5_add_sigpool(); } return; out_free: WARN_ON_ONCE(1); kfree(tcptw->tw_md5_key); tcptw->tw_md5_key = NULL; #endif } /* * Move a socket to time-wait or dead fin-wait-2 state. */ void tcp_time_wait(struct sock *sk, int state, int timeo) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct inet_timewait_sock *tw; tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state); if (tw) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); tw->tw_transparent = inet_test_bit(TRANSPARENT, sk); tw->tw_mark = sk->sk_mark; tw->tw_priority = READ_ONCE(sk->sk_priority); tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; tcptw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_snd_nxt = tp->snd_nxt; tcptw->tw_rcv_wnd = tcp_receive_window(tp); tcptw->tw_ts_recent = tp->rx_opt.ts_recent; tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; tcptw->tw_ts_offset = tp->tsoffset; tw->tw_usec_ts = tp->tcp_usec_ts; tcptw->tw_last_oow_ack_time = 0; tcptw->tw_tx_delay = tp->tcp_tx_delay; tw->tw_txhash = sk->sk_txhash; tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping; #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping; #endif #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); tw->tw_v6_daddr = sk->sk_v6_daddr; tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; tw->tw_tclass = np->tclass; tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); tw->tw_ipv6only = sk->sk_ipv6only; } #endif tcp_time_wait_init(sk, tcptw); tcp_ao_time_wait(tcptw, tp); /* Get the TIME_WAIT timeout firing. */ if (timeo < rto) timeo = rto; if (state == TCP_TIME_WAIT) timeo = TCP_TIMEWAIT_LEN; /* Linkage updates. * Note that access to tw after this point is illegal. */ inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than * non-graceful socket closings. */ NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW); } tcp_update_metrics(sk); tcp_done(sk); } EXPORT_SYMBOL(tcp_time_wait); #ifdef CONFIG_TCP_MD5SIG static void tcp_md5_twsk_free_rcu(struct rcu_head *head) { struct tcp_md5sig_key *key; key = container_of(head, struct tcp_md5sig_key, rcu); kfree(key); static_branch_slow_dec_deferred(&tcp_md5_needed); tcp_md5_release_sigpool(); } #endif void tcp_twsk_destructor(struct sock *sk) { #ifdef CONFIG_TCP_MD5SIG if (static_branch_unlikely(&tcp_md5_needed.key)) { struct tcp_timewait_sock *twsk = tcp_twsk(sk); if (twsk->tw_md5_key) call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu); } #endif tcp_ao_destroy_sock(sk, true); } EXPORT_SYMBOL_GPL(tcp_twsk_destructor); void tcp_twsk_purge(struct list_head *net_exit_list) { bool purged_once = false; struct net *net; list_for_each_entry(net, net_exit_list, exit_list) { if (net->ipv4.tcp_death_row.hashinfo->pernet) { /* Even if tw_refcount == 1, we must clean up kernel reqsk */ inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo); } else if (!purged_once) { inet_twsk_purge(&tcp_hashinfo); purged_once = true; } } } /* Warning : This function is called without sk_listener being locked. * Be sure to read socket fields once, as their value could change under us. */ void tcp_openreq_init_rwin(struct request_sock *req, const struct sock *sk_listener, const struct dst_entry *dst) { struct inet_request_sock *ireq = inet_rsk(req); const struct tcp_sock *tp = tcp_sk(sk_listener); int full_space = tcp_full_space(sk_listener); u32 window_clamp; __u8 rcv_wscale; u32 rcv_wnd; int mss; mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); window_clamp = READ_ONCE(tp->window_clamp); /* Set this up on the first call only */ req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); /* limit the window selection if the user enforce a smaller rx buffer */ if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) req->rsk_window_clamp = full_space; rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); if (rcv_wnd == 0) rcv_wnd = dst_metric(dst, RTAX_INITRWND); else if (full_space < rcv_wnd * mss) full_space = rcv_wnd * mss; /* tcp_full_space because it is guaranteed to be the first packet */ tcp_select_initial_window(sk_listener, full_space, mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, rcv_wnd); ireq->rcv_wscale = rcv_wscale; } EXPORT_SYMBOL(tcp_openreq_init_rwin); static void tcp_ecn_openreq_child(struct tcp_sock *tp, const struct request_sock *req) { tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; } void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) { struct inet_connection_sock *icsk = inet_csk(sk); u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); bool ca_got_dst = false; if (ca_key != TCP_CA_UNSPEC) { const struct tcp_congestion_ops *ca; rcu_read_lock(); ca = tcp_ca_find_key(ca_key); if (likely(ca && bpf_try_module_get(ca, ca->owner))) { icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); icsk->icsk_ca_ops = ca; ca_got_dst = true; } rcu_read_unlock(); } /* If no valid choice made yet, assign current system default ca. */ if (!ca_got_dst && (!icsk->icsk_ca_setsockopt || !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) tcp_assign_congestion_control(sk); tcp_set_ca_state(sk, TCP_CA_Open); } EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, struct request_sock *req, struct tcp_sock *newtp) { #if IS_ENABLED(CONFIG_SMC) struct inet_request_sock *ireq; if (static_branch_unlikely(&tcp_have_smc)) { ireq = inet_rsk(req); if (oldtp->syn_smc && !ireq->smc_ok) newtp->syn_smc = 0; } #endif } /* This is not only more efficient than what we used to do, it eliminates * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM * * Actually, we could lots of memory writes here. tp of listening * socket contains all necessary default parameters. */ struct sock *tcp_create_openreq_child(const struct sock *sk, struct request_sock *req, struct sk_buff *skb) { struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); const struct inet_request_sock *ireq = inet_rsk(req); struct tcp_request_sock *treq = tcp_rsk(req); struct inet_connection_sock *newicsk; const struct tcp_sock *oldtp; struct tcp_sock *newtp; u32 seq; if (!newsk) return NULL; newicsk = inet_csk(newsk); newtp = tcp_sk(newsk); oldtp = tcp_sk(sk); smc_check_reset_syn_req(oldtp, req, newtp); /* Now setup tcp_sock */ newtp->pred_flags = 0; seq = treq->rcv_isn + 1; newtp->rcv_wup = seq; WRITE_ONCE(newtp->copied_seq, seq); WRITE_ONCE(newtp->rcv_nxt, seq); newtp->segs_in = 1; seq = treq->snt_isn + 1; newtp->snd_sml = newtp->snd_una = seq; WRITE_ONCE(newtp->snd_nxt, seq); newtp->snd_up = seq; INIT_LIST_HEAD(&newtp->tsq_node); INIT_LIST_HEAD(&newtp->tsorted_sent_queue); tcp_init_wl(newtp, treq->rcv_isn); minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); newicsk->icsk_ack.lrcvtime = tcp_jiffies32; newtp->lsndtime = tcp_jiffies32; newsk->sk_txhash = READ_ONCE(treq->txhash); newtp->total_retrans = req->num_retrans; tcp_init_xmit_timers(newsk); WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); if (sock_flag(newsk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(newsk, keepalive_time_when(newtp)); newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; newtp->rx_opt.sack_ok = ireq->sack_ok; newtp->window_clamp = req->rsk_window_clamp; newtp->rcv_ssthresh = req->rsk_rcv_wnd; newtp->rcv_wnd = req->rsk_rcv_wnd; newtp->rx_opt.wscale_ok = ireq->wscale_ok; if (newtp->rx_opt.wscale_ok) { newtp->rx_opt.snd_wscale = ireq->snd_wscale; newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; } else { newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp, 65535U); } newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; newtp->max_window = newtp->snd_wnd; if (newtp->rx_opt.tstamp_ok) { newtp->tcp_usec_ts = treq->req_usec_ts; newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent); newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { newtp->tcp_usec_ts = 0; newtp->rx_opt.ts_recent_stamp = 0; newtp->tcp_header_len = sizeof(struct tcphdr); } if (req->num_timeout) { newtp->total_rto = req->num_timeout; newtp->undo_marker = treq->snt_isn; if (newtp->tcp_usec_ts) { newtp->retrans_stamp = treq->snt_synack; newtp->total_rto_time = (u32)(tcp_clock_us() - newtp->retrans_stamp) / USEC_PER_MSEC; } else { newtp->retrans_stamp = div_u64(treq->snt_synack, USEC_PER_SEC / TCP_TS_HZ); newtp->total_rto_time = tcp_clock_ms() - newtp->retrans_stamp; } newtp->total_rto_recoveries = 1; } newtp->tsoffset = treq->ts_off; #ifdef CONFIG_TCP_MD5SIG newtp->md5sig_info = NULL; /*XXX*/ #endif #ifdef CONFIG_TCP_AO newtp->ao_info = NULL; if (tcp_rsk_used_ao(req)) { struct tcp_ao_key *ao_key; ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1); if (ao_key) newtp->tcp_header_len += tcp_ao_len_aligned(ao_key); } #endif if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; newtp->rx_opt.mss_clamp = req->mss; tcp_ecn_openreq_child(newtp, req); newtp->fastopen_req = NULL; RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); newtp->bpf_chg_cc_inprogress = 0; tcp_bpf_clone(sk, newsk); __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1); return newsk; } EXPORT_SYMBOL(tcp_create_openreq_child); /* * Process an incoming packet for SYN_RECV sockets represented as a * request_sock. Normally sk is the listener socket but for TFO it * points to the child socket. * * XXX (TFO) - The current impl contains a special check for ack * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? * * We don't need to initialize tmp_opt.sack_ok as we don't use the results * * Note: If @fastopen is true, this can be called from process context. * Otherwise, this is from BH context. */ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req, bool fastopen, bool *req_stolen) { struct tcp_options_received tmp_opt; struct sock *child; const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); bool paws_reject = false; bool own_req; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = READ_ONCE(req->ts_recent); if (tmp_opt.rcv_tsecr) tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; /* We do not store true stamp, but it is not required, * it can be estimated (approximately) * from another data. */ tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ; paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } /* Check for pure retransmitted SYN. */ if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && flg == TCP_FLAG_SYN && !paws_reject) { /* * RFC793 draws (Incorrectly! It was fixed in RFC1122) * this case on figure 6 and figure 8, but formal * protocol description says NOTHING. * To be more exact, it says that we should send ACK, * because this segment (at least, if it has no data) * is out of window. * * CONCLUSION: RFC793 (even with RFC1122) DOES NOT * describe SYN-RECV state. All the description * is wrong, we cannot believe to it and should * rely only on common sense and implementation * experience. * * Enforce "SYN-ACK" according to figure 8, figure 6 * of RFC793, fixed by RFC1122. * * Note that even if there is new data in the SYN packet * they will be thrown away too. * * Reset timer after retransmitting SYNACK, similar to * the idea of fast retransmit in recovery. */ if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSYNRECV, &tcp_rsk(req)->last_oow_ack_time) && !inet_rtx_syn_ack(sk, req)) { unsigned long expires = jiffies; expires += reqsk_timeout(req, TCP_RTO_MAX); if (!fastopen) mod_timer_pending(&req->rsk_timer, expires); else req->rsk_timer.expires = expires; } return NULL; } /* Further reproduces section "SEGMENT ARRIVES" for state SYN-RECEIVED of RFC793. It is broken, however, it does not work only when SYNs are crossed. You would think that SYN crossing is impossible here, since we should have a SYN_SENT socket (from connect()) on our end, but this is not true if the crossed SYNs were sent to both ends by a malicious third party. We must defend against this, and to do that we first verify the ACK (as per RFC793, page 36) and reset if it is invalid. Is this a true full defense? To convince ourselves, let us consider a way in which the ACK test can still pass in this 'malicious crossed SYNs' case. Malicious sender sends identical SYNs (and thus identical sequence numbers) to both A and B: A: gets SYN, seq=7 B: gets SYN, seq=7 By our good fortune, both A and B select the same initial send sequence number of seven :-) A: sends SYN|ACK, seq=7, ack_seq=8 B: sends SYN|ACK, seq=7, ack_seq=8 So we are now A eating this SYN|ACK, ACK test passes. So does sequence test, SYN is truncated, and thus we consider it a bare ACK. If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this bare ACK. Otherwise, we create an established connection. Both ends (listening sockets) accept the new incoming connection and try to talk to each other. 8-) Note: This case is both harmless, and rare. Possibility is about the same as us discovering intelligent life on another plant tomorrow. But generally, we should (RFC lies!) to accept ACK from SYNACK both here and in tcp_rcv_state_process(). tcp_rcv_state_process() does not, hence, we do not too. Note that the case is absolutely generic: we cannot optimize anything here without violating protocol. All the checks must be made before attempt to create socket. */ /* RFC793 page 36: "If the connection is in any non-synchronized state ... * and the incoming segment acknowledges something not yet * sent (the segment carries an unacceptable ACK) ... * a reset is sent." * * Invalid ACK: reset will be sent by listening socket. * Note that the ACK validity check for a Fast Open socket is done * elsewhere and is checked directly against the child socket rather * than req because user data may have been sent out. */ if ((flg & TCP_FLAG_ACK) && !fastopen && (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) return sk; /* Also, it would be not so bad idea to check rcv_tsecr, which * is essentially ACK extension and too early or too late values * should cause reset in unsynchronized states. */ /* RFC793: "first check sequence number". */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + tcp_synack_window(req))) { /* Out of window: send ACK and drop. */ if (!(flg & TCP_FLAG_RST) && !tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSYNRECV, &tcp_rsk(req)->last_oow_ack_time)) req->rsk_ops->send_ack(sk, skb, req); if (paws_reject) NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); return NULL; } /* In sequence, PAWS is OK. */ /* TODO: We probably should defer ts_recent change once * we take ownership of @req. */ if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval); if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { /* Truncate SYN, it is out of window starting at tcp_rsk(req)->rcv_isn + 1. */ flg &= ~TCP_FLAG_SYN; } /* RFC793: "second check the RST bit" and * "fourth, check the SYN bit" */ if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); goto embryonic_reset; } /* ACK sequence verified above, just make sure ACK is * set. If ACK not set, just silently drop the packet. * * XXX (TFO) - if we ever allow "data after SYN", the * following check needs to be removed. */ if (!(flg & TCP_FLAG_ACK)) return NULL; /* For Fast Open no more processing is needed (sk is the * child socket). */ if (fastopen) return sk; /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { inet_rsk(req)->acked = 1; __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); return NULL; } /* OK, ACK is valid, create big socket and * feed this segment to it. It will repeat all * the tests. THIS SEGMENT MUST MOVE SOCKET TO * ESTABLISHED STATE. If it will be dropped after * socket is created, wait for troubles. */ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, req, &own_req); if (!child) goto listen_overflow; if (own_req && rsk_drop_req(req)) { reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); return child; } sock_rps_save_rxhash(child, skb); tcp_synack_rtt_meas(child, req); *req_stolen = !own_req; return inet_csk_complete_hashdance(sk, child, req, own_req); listen_overflow: if (sk != req->rsk_listener) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { inet_rsk(req)->acked = 1; return NULL; } embryonic_reset: if (!(flg & TCP_FLAG_RST)) { /* Received a bad SYN pkt - for TFO We try not to reset * the local connection unless it's really necessary to * avoid becoming vulnerable to outside attack aiming at * resetting legit local connections. */ req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN); } else if (fastopen) { /* received a valid RST pkt */ reqsk_fastopen_remove(sk, req, true); tcp_reset(sk, skb); } if (!fastopen) { bool unlinked = inet_csk_reqsk_queue_drop(sk, req); if (unlinked) __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); *req_stolen = !unlinked; } return NULL; } EXPORT_SYMBOL(tcp_check_req); /* * Queue segment on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket. * * For the vast majority of cases child->sk_state will be TCP_SYN_RECV * when entering. But other states are possible due to a race condition * where after __inet_lookup_established() fails but before the listener * locked is obtained, other packets cause the same connection to * be created. */ enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb) __releases(&((child)->sk_lock.slock)) { enum skb_drop_reason reason = SKB_NOT_DROPPED_YET; int state = child->sk_state; /* record sk_napi_id and sk_rx_queue_mapping of child. */ sk_mark_napi_id_set(child, skb); tcp_segs_in(tcp_sk(child), skb); if (!sock_owned_by_user(child)) { reason = tcp_rcv_state_process(child, skb); /* Wakeup parent, send SIGIO */ if (state == TCP_SYN_RECV && child->sk_state != state) parent->sk_data_ready(parent); } else { /* Alas, it is possible again, because we do lookup * in main socket hash table and lock on listening * socket does not protect us more. */ __sk_add_backlog(child, skb); } bh_unlock_sock(child); sock_put(child); return reason; } EXPORT_SYMBOL(tcp_child_process); |
| 1 1 11 11 11 11 7 1 11 5 11 8 8 10 1 10 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 Realtek Semiconductor Corp. All rights reserved. */ #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> #include <uapi/linux/mdio.h> #include <linux/mdio.h> #include <linux/usb/cdc.h> #include <linux/suspend.h> #include <linux/atomic.h> #include <linux/acpi.h> #include <linux/firmware.h> #include <crypto/hash.h> #include <linux/usb/r8152.h> #include <net/gso.h> /* Information for net-next */ #define NETNEXT_VERSION "12" /* Information for net */ #define NET_VERSION "13" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" #define R8152_PHY_ID 32 #define PLA_IDR 0xc000 #define PLA_RCR 0xc010 #define PLA_RCR1 0xc012 #define PLA_RMS 0xc016 #define PLA_RXFIFO_CTRL0 0xc0a0 #define PLA_RXFIFO_FULL 0xc0a2 #define PLA_RXFIFO_CTRL1 0xc0a4 #define PLA_RX_FIFO_FULL 0xc0a6 #define PLA_RXFIFO_CTRL2 0xc0a8 #define PLA_RX_FIFO_EMPTY 0xc0aa #define PLA_DMY_REG0 0xc0b0 #define PLA_FMC 0xc0b4 #define PLA_CFG_WOL 0xc0b6 #define PLA_TEREDO_CFG 0xc0bc #define PLA_TEREDO_WAKE_BASE 0xc0c4 #define PLA_MAR 0xcd00 #define PLA_BACKUP 0xd000 #define PLA_BDC_CR 0xd1a0 #define PLA_TEREDO_TIMER 0xd2cc #define PLA_REALWOW_TIMER 0xd2e8 #define PLA_UPHY_TIMER 0xd388 #define PLA_SUSPEND_FLAG 0xd38a #define PLA_INDICATE_FALG 0xd38c #define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */ #define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */ #define PLA_EXTRA_STATUS 0xd398 #define PLA_GPHY_CTRL 0xd3ae #define PLA_POL_GPIO_CTRL 0xdc6a #define PLA_EFUSE_DATA 0xdd00 #define PLA_EFUSE_CMD 0xdd02 #define PLA_LEDSEL 0xdd90 #define PLA_LED_FEATURE 0xdd92 #define PLA_PHYAR 0xde00 #define PLA_BOOT_CTRL 0xe004 #define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 #define PLA_EEE_TXTWSYS 0xe04c #define PLA_EEE_TXTWSYS_2P5G 0xe058 #define PLA_EEEP_CR 0xe080 #define PLA_MAC_PWR_CTRL 0xe0c0 #define PLA_MAC_PWR_CTRL2 0xe0ca #define PLA_MAC_PWR_CTRL3 0xe0cc #define PLA_MAC_PWR_CTRL4 0xe0ce #define PLA_WDT6_CTRL 0xe428 #define PLA_TCR0 0xe610 #define PLA_TCR1 0xe612 #define PLA_MTPS 0xe615 #define PLA_TXFIFO_CTRL 0xe618 #define PLA_TXFIFO_FULL 0xe61a #define PLA_RSTTALLY 0xe800 #define PLA_CR 0xe813 #define PLA_CRWECR 0xe81c #define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */ #define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */ #define PLA_CONFIG5 0xe822 #define PLA_PHY_PWR 0xe84c #define PLA_OOB_CTRL 0xe84f #define PLA_CPCR 0xe854 #define PLA_MISC_0 0xe858 #define PLA_MISC_1 0xe85a #define PLA_OCP_GPHY_BASE 0xe86c #define PLA_TALLYCNT 0xe890 #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 #define PLA_CONFIG6 0xe90a /* CONFIG6 */ #define PLA_USB_CFG 0xe952 #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a #define PLA_BP_2 0xfc2c #define PLA_BP_3 0xfc2e #define PLA_BP_4 0xfc30 #define PLA_BP_5 0xfc32 #define PLA_BP_6 0xfc34 #define PLA_BP_7 0xfc36 #define PLA_BP_EN 0xfc38 #define USB_USB2PHY 0xb41e #define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 #define USB_L1_CTRL 0xb45e #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 #define USB_CSR_DUMMY2 0xb466 #define USB_DEV_STAT 0xb808 #define USB_CONNECT_TIMER 0xcbf8 #define USB_MSC_TIMER 0xcbfc #define USB_BURST_SIZE 0xcfc0 #define USB_FW_FIX_EN0 0xcfca #define USB_FW_FIX_EN1 0xcfcc #define USB_LPM_CONFIG 0xcfd8 #define USB_ECM_OPTION 0xcfee #define USB_CSTMR 0xcfef /* RTL8153A */ #define USB_MISC_2 0xcfff #define USB_ECM_OP 0xd26b #define USB_GPHY_CTRL 0xd284 #define USB_SPEED_OPTION 0xd32a #define USB_FW_CTRL 0xd334 /* RTL8153B */ #define USB_FC_TIMER 0xd340 #define USB_USB_CTRL 0xd406 #define USB_PHY_CTRL 0xd408 #define USB_TX_AGG 0xd40a #define USB_RX_BUF_TH 0xd40c #define USB_USB_TIMER 0xd428 #define USB_RX_EARLY_TIMEOUT 0xd42c #define USB_RX_EARLY_SIZE 0xd42e #define USB_PM_CTRL_STATUS 0xd432 /* RTL8153A */ #define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */ #define USB_TX_DMA 0xd434 #define USB_UPT_RXDMA_OWN 0xd437 #define USB_UPHY3_MDCMDIO 0xd480 #define USB_TOLERANCE 0xd490 #define USB_LPM_CTRL 0xd41a #define USB_BMU_RESET 0xd4b0 #define USB_BMU_CONFIG 0xd4b4 #define USB_U1U2_TIMER 0xd4da #define USB_FW_TASK 0xd4e8 /* RTL8153B */ #define USB_RX_AGGR_NUM 0xd4ee #define USB_UPS_CTRL 0xd800 #define USB_POWER_CUT 0xd80a #define USB_MISC_0 0xd81a #define USB_MISC_1 0xd81f #define USB_AFE_CTRL2 0xd824 #define USB_UPHY_XTAL 0xd826 #define USB_UPS_CFG 0xd842 #define USB_UPS_FLAGS 0xd848 #define USB_WDT1_CTRL 0xe404 #define USB_WDT11_CTRL 0xe43c #define USB_BP_BA PLA_BP_BA #define USB_BP_0 PLA_BP_0 #define USB_BP_1 PLA_BP_1 #define USB_BP_2 PLA_BP_2 #define USB_BP_3 PLA_BP_3 #define USB_BP_4 PLA_BP_4 #define USB_BP_5 PLA_BP_5 #define USB_BP_6 PLA_BP_6 #define USB_BP_7 PLA_BP_7 #define USB_BP_EN PLA_BP_EN /* RTL8153A */ #define USB_BP_8 0xfc38 /* RTL8153B */ #define USB_BP_9 0xfc3a #define USB_BP_10 0xfc3c #define USB_BP_11 0xfc3e #define USB_BP_12 0xfc40 #define USB_BP_13 0xfc42 #define USB_BP_14 0xfc44 #define USB_BP_15 0xfc46 #define USB_BP2_EN 0xfc48 /* OCP Registers */ #define OCP_ALDPS_CONFIG 0x2010 #define OCP_EEE_CONFIG1 0x2080 #define OCP_EEE_CONFIG2 0x2092 #define OCP_EEE_CONFIG3 0x2094 #define OCP_BASE_MII 0xa400 #define OCP_EEE_AR 0xa41a #define OCP_EEE_DATA 0xa41c #define OCP_PHY_STATUS 0xa420 #define OCP_INTR_EN 0xa424 #define OCP_NCTL_CFG 0xa42c #define OCP_POWER_CFG 0xa430 #define OCP_EEE_CFG 0xa432 #define OCP_SRAM_ADDR 0xa436 #define OCP_SRAM_DATA 0xa438 #define OCP_DOWN_SPEED 0xa442 #define OCP_EEE_ABLE 0xa5c4 #define OCP_EEE_ADV 0xa5d0 #define OCP_EEE_LPABLE 0xa5d2 #define OCP_10GBT_CTRL 0xa5d4 #define OCP_10GBT_STAT 0xa5d6 #define OCP_EEE_ADV2 0xa6d4 #define OCP_PHY_STATE 0xa708 /* nway state for 8153 */ #define OCP_PHY_PATCH_STAT 0xb800 #define OCP_PHY_PATCH_CMD 0xb820 #define OCP_PHY_LOCK 0xb82e #define OCP_ADC_IOFFSET 0xbcfc #define OCP_ADC_CFG 0xbc06 #define OCP_SYSCLK_CFG 0xc416 /* SRAM Register */ #define SRAM_GREEN_CFG 0x8011 #define SRAM_LPF_CFG 0x8012 #define SRAM_GPHY_FW_VER 0x801e #define SRAM_10M_AMP1 0x8080 #define SRAM_10M_AMP2 0x8082 #define SRAM_IMPEDANCE 0x8084 #define SRAM_PHY_LOCK 0xb82e /* PLA_RCR */ #define RCR_AAP 0x00000001 #define RCR_APM 0x00000002 #define RCR_AM 0x00000004 #define RCR_AB 0x00000008 #define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB) #define SLOT_EN BIT(11) /* PLA_RCR1 */ #define OUTER_VLAN BIT(7) #define INNER_VLAN BIT(6) /* PLA_RXFIFO_CTRL0 */ #define RXFIFO_THR1_NORMAL 0x00080002 #define RXFIFO_THR1_OOB 0x01800003 /* PLA_RXFIFO_FULL */ #define RXFIFO_FULL_MASK 0xfff /* PLA_RXFIFO_CTRL1 */ #define RXFIFO_THR2_FULL 0x00000060 #define RXFIFO_THR2_HIGH 0x00000038 #define RXFIFO_THR2_OOB 0x0000004a #define RXFIFO_THR2_NORMAL 0x00a0 /* PLA_RXFIFO_CTRL2 */ #define RXFIFO_THR3_FULL 0x00000078 #define RXFIFO_THR3_HIGH 0x00000048 #define RXFIFO_THR3_OOB 0x0000005a #define RXFIFO_THR3_NORMAL 0x0110 /* PLA_TXFIFO_CTRL */ #define TXFIFO_THR_NORMAL 0x00400008 #define TXFIFO_THR_NORMAL2 0x01000008 /* PLA_DMY_REG0 */ #define ECM_ALDPS 0x0002 /* PLA_FMC */ #define FMC_FCR_MCU_EN 0x0001 /* PLA_EEEP_CR */ #define EEEP_CR_EEEP_TX 0x0002 /* PLA_WDT6_CTRL */ #define WDT6_SET_MODE 0x0010 /* PLA_TCR0 */ #define TCR0_TX_EMPTY 0x0800 #define TCR0_AUTO_FIFO 0x0080 /* PLA_TCR1 */ #define VERSION_MASK 0x7cf0 #define IFG_MASK (BIT(3) | BIT(9) | BIT(8)) #define IFG_144NS BIT(9) #define IFG_96NS (BIT(9) | BIT(8)) /* PLA_MTPS */ #define MTPS_JUMBO (12 * 1024 / 64) #define MTPS_DEFAULT (6 * 1024 / 64) /* PLA_RSTTALLY */ #define TALLY_RESET 0x0001 /* PLA_CR */ #define CR_RST 0x10 #define CR_RE 0x08 #define CR_TE 0x04 /* PLA_CRWECR */ #define CRWECR_NORAML 0x00 #define CRWECR_CONFIG 0xc0 /* PLA_OOB_CTRL */ #define NOW_IS_OOB 0x80 #define TXFIFO_EMPTY 0x20 #define RXFIFO_EMPTY 0x10 #define LINK_LIST_READY 0x02 #define DIS_MCU_CLROOB 0x01 #define FIFO_EMPTY (TXFIFO_EMPTY | RXFIFO_EMPTY) /* PLA_MISC_1 */ #define RXDY_GATED_EN 0x0008 /* PLA_SFF_STS_7 */ #define RE_INIT_LL 0x8000 #define MCU_BORW_EN 0x4000 /* PLA_CPCR */ #define FLOW_CTRL_EN BIT(0) #define CPCR_RX_VLAN 0x0040 /* PLA_CFG_WOL */ #define MAGIC_EN 0x0001 /* PLA_TEREDO_CFG */ #define TEREDO_SEL 0x8000 #define TEREDO_WAKE_MASK 0x7f00 #define TEREDO_RS_EVENT_MASK 0x00fe #define OOB_TEREDO_EN 0x0001 /* PLA_BDC_CR */ #define ALDPS_PROXY_MODE 0x0001 /* PLA_EFUSE_CMD */ #define EFUSE_READ_CMD BIT(15) #define EFUSE_DATA_BIT16 BIT(7) /* PLA_CONFIG34 */ #define LINK_ON_WAKE_EN 0x0010 #define LINK_OFF_WAKE_EN 0x0008 /* PLA_CONFIG6 */ #define LANWAKE_CLR_EN BIT(0) /* PLA_USB_CFG */ #define EN_XG_LIP BIT(1) #define EN_G_LIP BIT(2) /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 #define UWF_EN 0x0010 #define LAN_WAKE_EN 0x0002 /* PLA_LED_FEATURE */ #define LED_MODE_MASK 0x0700 /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 #define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 #define MCU_CLK_RATIO 0x07010f07 #define MCU_CLK_RATIO_MASK 0x0f0f0f0f #define ALDPS_SPDWN_RATIO 0x0f87 /* PLA_MAC_PWR_CTRL2 */ #define EEE_SPDWN_RATIO 0x8007 #define MAC_CLK_SPDWN_EN BIT(15) #define EEE_SPDWN_RATIO_MASK 0xff /* PLA_MAC_PWR_CTRL3 */ #define PLA_MCU_SPDWN_EN BIT(14) #define PKT_AVAIL_SPDWN_EN 0x0100 #define SUSPEND_SPDWN_EN 0x0004 #define U1U2_SPDWN_EN 0x0002 #define L1_SPDWN_EN 0x0001 /* PLA_MAC_PWR_CTRL4 */ #define PWRSAVE_SPDWN_EN 0x1000 #define RXDV_SPDWN_EN 0x0800 #define TX10MIDLE_EN 0x0100 #define IDLE_SPDWN_EN BIT(6) #define TP100_SPDWN_EN 0x0020 #define TP500_SPDWN_EN 0x0010 #define TP1000_SPDWN_EN 0x0008 #define EEE_SPDWN_EN 0x0001 /* PLA_GPHY_INTR_IMR */ #define GPHY_STS_MSK 0x0001 #define SPEED_DOWN_MSK 0x0002 #define SPDWN_RXDV_MSK 0x0004 #define SPDWN_LINKCHG_MSK 0x0008 /* PLA_PHYAR */ #define PHYAR_FLAG 0x80000000 /* PLA_EEE_CR */ #define EEE_RX_EN 0x0001 #define EEE_TX_EN 0x0002 /* PLA_BOOT_CTRL */ #define AUTOLOAD_DONE 0x0002 /* PLA_LWAKE_CTRL_REG */ #define LANWAKE_PIN BIT(7) /* PLA_SUSPEND_FLAG */ #define LINK_CHG_EVENT BIT(0) /* PLA_INDICATE_FALG */ #define UPCOMING_RUNTIME_D3 BIT(0) /* PLA_MACDBG_PRE and PLA_MACDBG_POST */ #define DEBUG_OE BIT(0) #define DEBUG_LTSSM 0x0082 /* PLA_EXTRA_STATUS */ #define CUR_LINK_OK BIT(15) #define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */ #define LINK_CHANGE_FLAG BIT(8) #define POLL_LINK_CHG BIT(0) /* PLA_GPHY_CTRL */ #define GPHY_FLASH BIT(1) /* PLA_POL_GPIO_CTRL */ #define DACK_DET_EN BIT(15) #define POL_GPHY_PATCH BIT(4) /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 /* USB_SSPHYLINK1 */ #define DELAY_PHY_PWR_CHG BIT(1) /* USB_SSPHYLINK2 */ #define pwd_dn_scale_mask 0x3ffe #define pwd_dn_scale(x) ((x) << 1) /* USB_CSR_DUMMY1 */ #define DYNAMIC_BURST 0x0001 /* USB_CSR_DUMMY2 */ #define EP4_FULL_FC 0x0001 /* USB_DEV_STAT */ #define STAT_SPEED_MASK 0x0006 #define STAT_SPEED_HIGH 0x0000 #define STAT_SPEED_FULL 0x0002 /* USB_FW_FIX_EN0 */ #define FW_FIX_SUSPEND BIT(14) /* USB_FW_FIX_EN1 */ #define FW_IP_RESET_EN BIT(9) /* USB_LPM_CONFIG */ #define LPM_U1U2_EN BIT(0) /* USB_TX_AGG */ #define TX_AGG_MAX_THRESHOLD 0x03 /* USB_RX_BUF_TH */ #define RX_THR_SUPPER 0x0c350180 #define RX_THR_HIGH 0x7a120180 #define RX_THR_SLOW 0xffff0180 #define RX_THR_B 0x00010001 /* USB_TX_DMA */ #define TEST_MODE_DISABLE 0x00000001 #define TX_SIZE_ADJUST1 0x00000100 /* USB_BMU_RESET */ #define BMU_RESET_EP_IN 0x01 #define BMU_RESET_EP_OUT 0x02 /* USB_BMU_CONFIG */ #define ACT_ODMA BIT(1) /* USB_UPT_RXDMA_OWN */ #define OWN_UPDATE BIT(0) #define OWN_CLEAR BIT(1) /* USB_FW_TASK */ #define FC_PATCH_TASK BIT(1) /* USB_RX_AGGR_NUM */ #define RX_AGGR_NUM_MASK 0x1ff /* USB_UPS_CTRL */ #define POWER_CUT 0x0100 /* USB_PM_CTRL_STATUS */ #define RESUME_INDICATE 0x0001 /* USB_ECM_OPTION */ #define BYPASS_MAC_RESET BIT(5) /* USB_CSTMR */ #define FORCE_SUPER BIT(0) /* USB_MISC_2 */ #define UPS_FORCE_PWR_DOWN BIT(0) /* USB_ECM_OP */ #define EN_ALL_SPEED BIT(0) /* USB_GPHY_CTRL */ #define GPHY_PATCH_DONE BIT(2) #define BYPASS_FLASH BIT(5) #define BACKUP_RESTRORE BIT(6) /* USB_SPEED_OPTION */ #define RG_PWRDN_EN BIT(8) #define ALL_SPEED_OFF BIT(9) /* USB_FW_CTRL */ #define FLOW_CTRL_PATCH_OPT BIT(1) #define AUTO_SPEEDUP BIT(3) #define FLOW_CTRL_PATCH_2 BIT(8) /* USB_FC_TIMER */ #define CTRL_TIMER_EN BIT(15) /* USB_USB_CTRL */ #define CDC_ECM_EN BIT(3) #define RX_AGG_DISABLE 0x0010 #define RX_ZERO_EN 0x0080 /* USB_U2P3_CTRL */ #define U2P3_ENABLE 0x0001 #define RX_DETECT8 BIT(3) /* USB_POWER_CUT */ #define PWR_EN 0x0001 #define PHASE2_EN 0x0008 #define UPS_EN BIT(4) #define USP_PREWAKE BIT(5) /* USB_MISC_0 */ #define PCUT_STATUS 0x0001 /* USB_RX_EARLY_TIMEOUT */ #define COALESCE_SUPER 85000U #define COALESCE_HIGH 250000U #define COALESCE_SLOW 524280U /* USB_WDT1_CTRL */ #define WTD1_EN BIT(0) /* USB_WDT11_CTRL */ #define TIMER11_EN 0x0001 /* USB_LPM_CTRL */ /* bit 4 ~ 5: fifo empty boundary */ #define FIFO_EMPTY_1FB 0x30 /* 0x1fb * 64 = 32448 bytes */ /* bit 2 ~ 3: LMP timer */ #define LPM_TIMER_MASK 0x0c #define LPM_TIMER_500MS 0x04 /* 500 ms */ #define LPM_TIMER_500US 0x0c /* 500 us */ #define ROK_EXIT_LPM 0x02 /* USB_AFE_CTRL2 */ #define SEN_VAL_MASK 0xf800 #define SEN_VAL_NORMAL 0xa000 #define SEL_RXIDLE 0x0100 /* USB_UPHY_XTAL */ #define OOBS_POLLING BIT(8) /* USB_UPS_CFG */ #define SAW_CNT_1MS_MASK 0x0fff #define MID_REVERSE BIT(5) /* RTL8156A */ /* USB_UPS_FLAGS */ #define UPS_FLAGS_R_TUNE BIT(0) #define UPS_FLAGS_EN_10M_CKDIV BIT(1) #define UPS_FLAGS_250M_CKDIV BIT(2) #define UPS_FLAGS_EN_ALDPS BIT(3) #define UPS_FLAGS_CTAP_SHORT_DIS BIT(4) #define UPS_FLAGS_SPEED_MASK (0xf << 16) #define ups_flags_speed(x) ((x) << 16) #define UPS_FLAGS_EN_EEE BIT(20) #define UPS_FLAGS_EN_500M_EEE BIT(21) #define UPS_FLAGS_EN_EEE_CKDIV BIT(22) #define UPS_FLAGS_EEE_PLLOFF_100 BIT(23) #define UPS_FLAGS_EEE_PLLOFF_GIGA BIT(24) #define UPS_FLAGS_EEE_CMOD_LV_EN BIT(25) #define UPS_FLAGS_EN_GREEN BIT(26) #define UPS_FLAGS_EN_FLOW_CTR BIT(27) enum spd_duplex { NWAY_10M_HALF, NWAY_10M_FULL, NWAY_100M_HALF, NWAY_100M_FULL, NWAY_1000M_FULL, FORCE_10M_HALF, FORCE_10M_FULL, FORCE_100M_HALF, FORCE_100M_FULL, FORCE_1000M_FULL, NWAY_2500M_FULL, }; /* OCP_ALDPS_CONFIG */ #define ENPWRSAVE 0x8000 #define ENPDNPS 0x0200 #define LINKENA 0x0100 #define DIS_SDSAVE 0x0010 /* OCP_PHY_STATUS */ #define PHY_STAT_MASK 0x0007 #define PHY_STAT_EXT_INIT 2 #define PHY_STAT_LAN_ON 3 #define PHY_STAT_PWRDN 5 /* OCP_INTR_EN */ #define INTR_SPEED_FORCE BIT(3) /* OCP_NCTL_CFG */ #define PGA_RETURN_EN BIT(1) /* OCP_POWER_CFG */ #define EEE_CLKDIV_EN 0x8000 #define EN_ALDPS 0x0004 #define EN_10M_PLLOFF 0x0001 /* OCP_EEE_CONFIG1 */ #define RG_TXLPI_MSK_HFDUP 0x8000 #define RG_MATCLR_EN 0x4000 #define EEE_10_CAP 0x2000 #define EEE_NWAY_EN 0x1000 #define TX_QUIET_EN 0x0200 #define RX_QUIET_EN 0x0100 #define sd_rise_time_mask 0x0070 #define sd_rise_time(x) (min(x, 7) << 4) /* bit 4 ~ 6 */ #define RG_RXLPI_MSK_HFDUP 0x0008 #define SDFALLTIME 0x0007 /* bit 0 ~ 2 */ /* OCP_EEE_CONFIG2 */ #define RG_LPIHYS_NUM 0x7000 /* bit 12 ~ 15 */ #define RG_DACQUIET_EN 0x0400 #define RG_LDVQUIET_EN 0x0200 #define RG_CKRSEL 0x0020 #define RG_EEEPRG_EN 0x0010 /* OCP_EEE_CONFIG3 */ #define fast_snr_mask 0xff80 #define fast_snr(x) (min(x, 0x1ff) << 7) /* bit 7 ~ 15 */ #define RG_LFS_SEL 0x0060 /* bit 6 ~ 5 */ #define MSK_PH 0x0006 /* bit 0 ~ 3 */ /* OCP_EEE_AR */ /* bit[15:14] function */ #define FUN_ADDR 0x0000 #define FUN_DATA 0x4000 /* bit[4:0] device addr */ /* OCP_EEE_CFG */ #define CTAP_SHORT_EN 0x0040 #define EEE10_EN 0x0010 /* OCP_DOWN_SPEED */ #define EN_EEE_CMODE BIT(14) #define EN_EEE_1000 BIT(13) #define EN_EEE_100 BIT(12) #define EN_10M_CLKDIV BIT(11) #define EN_10M_BGOFF 0x0080 /* OCP_10GBT_CTRL */ #define RTL_ADV2_5G_F_R BIT(5) /* Advertise 2.5GBASE-T fast-retrain */ /* OCP_PHY_STATE */ #define TXDIS_STATE 0x01 #define ABD_STATE 0x02 /* OCP_PHY_PATCH_STAT */ #define PATCH_READY BIT(6) /* OCP_PHY_PATCH_CMD */ #define PATCH_REQUEST BIT(4) /* OCP_PHY_LOCK */ #define PATCH_LOCK BIT(0) /* OCP_ADC_CFG */ #define CKADSEL_L 0x0100 #define ADC_EN 0x0080 #define EN_EMI_L 0x0040 /* OCP_SYSCLK_CFG */ #define sysclk_div_expo(x) (min(x, 5) << 8) #define clk_div_expo(x) (min(x, 5) << 4) /* SRAM_GREEN_CFG */ #define GREEN_ETH_EN BIT(15) #define R_TUNE_EN BIT(11) /* SRAM_LPF_CFG */ #define LPF_AUTO_TUNE 0x8000 /* SRAM_10M_AMP1 */ #define GDAC_IB_UPALL 0x0008 /* SRAM_10M_AMP2 */ #define AMP_DN 0x0200 /* SRAM_IMPEDANCE */ #define RX_DRIVING_MASK 0x6000 /* SRAM_PHY_LOCK */ #define PHY_PATCH_LOCK 0x0001 /* MAC PASSTHRU */ #define AD_MASK 0xfee0 #define BND_MASK 0x0004 #define BD_MASK 0x0001 #define EFUSE 0xcfdb #define PASS_THRU_MASK 0x1 #define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */ enum rtl_register_content { _2500bps = BIT(10), _1250bps = BIT(9), _500bps = BIT(8), _tx_flow = BIT(6), _rx_flow = BIT(5), _1000bps = 0x10, _100bps = 0x08, _10bps = 0x04, LINK_STATUS = 0x02, FULL_DUP = 0x01, }; #define is_speed_2500(_speed) (((_speed) & (_2500bps | LINK_STATUS)) == (_2500bps | LINK_STATUS)) #define is_flow_control(_speed) (((_speed) & (_tx_flow | _rx_flow)) == (_tx_flow | _rx_flow)) #define RTL8152_MAX_TX 4 #define RTL8152_MAX_RX 10 #define INTBUFSIZE 2 #define TX_ALIGN 4 #define RX_ALIGN 8 #define RTL8152_RX_MAX_PENDING 4096 #define RTL8152_RXFG_HEADSZ 256 #define INTR_LINK 0x0004 #define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8152_TX_TIMEOUT (5 * HZ) #define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN) #define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN) #define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN) /* rtl8152 flags */ enum rtl8152_flags { RTL8152_INACCESSIBLE = 0, RTL8152_SET_RX_MODE, WORK_ENABLE, RTL8152_LINK_CHG, SELECTIVE_SUSPEND, PHY_RESET, SCHEDULE_TASKLET, GREEN_ETHERNET, RX_EPROTO, IN_PRE_RESET, PROBED_WITH_NO_ERRORS, PROBE_SHOULD_RETRY, }; #define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e #define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054 #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 #define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062 struct tally_counter { __le64 tx_packets; __le64 rx_packets; __le64 tx_errors; __le32 rx_errors; __le16 rx_missed; __le16 align_errors; __le32 tx_one_collision; __le32 tx_multi_collision; __le64 rx_unicast; __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; __le16 tx_underrun; }; struct rx_desc { __le32 opts1; #define RX_LEN_MASK 0x7fff __le32 opts2; #define RD_UDP_CS BIT(23) #define RD_TCP_CS BIT(22) #define RD_IPV6_CS BIT(20) #define RD_IPV4_CS BIT(19) __le32 opts3; #define IPF BIT(23) /* IP checksum fail */ #define UDPF BIT(22) /* UDP checksum fail */ #define TCPF BIT(21) /* TCP checksum fail */ #define RX_VLAN_TAG BIT(16) __le32 opts4; __le32 opts5; __le32 opts6; }; struct tx_desc { __le32 opts1; #define TX_FS BIT(31) /* First segment of a packet */ #define TX_LS BIT(30) /* Final segment of a packet */ #define GTSENDV4 BIT(28) #define GTSENDV6 BIT(27) #define GTTCPHO_SHIFT 18 #define GTTCPHO_MAX 0x7fU #define TX_LEN_MAX 0x3ffffU __le32 opts2; #define UDP_CS BIT(31) /* Calculate UDP/IP checksum */ #define TCP_CS BIT(30) /* Calculate TCP/IP checksum */ #define IPV4_CS BIT(29) /* Calculate IPv4 checksum */ #define IPV6_CS BIT(28) /* Calculate IPv6 checksum */ #define MSS_SHIFT 17 #define MSS_MAX 0x7ffU #define TCPHO_SHIFT 17 #define TCPHO_MAX 0x7ffU #define TX_VLAN_TAG BIT(16) }; struct r8152; struct rx_agg { struct list_head list, info_list; struct urb *urb; struct r8152 *context; struct page *page; void *buffer; }; struct tx_agg { struct list_head list; struct urb *urb; struct r8152 *context; void *buffer; void *head; u32 skb_num; u32 skb_len; }; struct r8152 { unsigned long flags; struct usb_device *udev; struct napi_struct napi; struct usb_interface *intf; struct net_device *netdev; struct urb *intr_urb; struct tx_agg tx_info[RTL8152_MAX_TX]; struct list_head rx_info, rx_used; struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notifier; #endif struct tasklet_struct tx_tl; struct rtl_ops { void (*init)(struct r8152 *tp); int (*enable)(struct r8152 *tp); void (*disable)(struct r8152 *tp); void (*up)(struct r8152 *tp); void (*down)(struct r8152 *tp); void (*unload)(struct r8152 *tp); int (*eee_get)(struct r8152 *tp, struct ethtool_keee *eee); int (*eee_set)(struct r8152 *tp, struct ethtool_keee *eee); bool (*in_nway)(struct r8152 *tp); void (*hw_phy_cfg)(struct r8152 *tp); void (*autosuspend_en)(struct r8152 *tp, bool enable); void (*change_mtu)(struct r8152 *tp); } rtl_ops; struct ups_info { u32 r_tune:1; u32 _10m_ckdiv:1; u32 _250m_ckdiv:1; u32 aldps:1; u32 lite_mode:2; u32 speed_duplex:4; u32 eee:1; u32 eee_lite:1; u32 eee_ckdiv:1; u32 eee_plloff_100:1; u32 eee_plloff_giga:1; u32 eee_cmod_lv:1; u32 green:1; u32 flow_control:1; u32 ctap_short_off:1; } ups_info; #define RTL_VER_SIZE 32 struct rtl_fw { const char *fw_name; const struct firmware *fw; char version[RTL_VER_SIZE]; int (*pre_fw)(struct r8152 *tp); int (*post_fw)(struct r8152 *tp); bool retry; } rtl_fw; atomic_t rx_count; bool eee_en; int intr_interval; u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; u32 coalesce; u32 advertising; u32 rx_buf_sz; u32 rx_copybreak; u32 rx_pending; u32 fc_pause_on, fc_pause_off; unsigned int pipe_in, pipe_out, pipe_intr, pipe_ctrl_in, pipe_ctrl_out; u32 support_2500full:1; u32 lenovo_macpassthru:1; u32 dell_tb_rx_agg_bug:1; u16 ocp_base; u16 speed; u16 eee_adv; u8 *intr_buff; u8 version; u8 duplex; u8 autoneg; unsigned int reg_access_reset_count; }; /** * struct fw_block - block type and total length * @type: type of the current block, such as RTL_FW_END, RTL_FW_PLA, * RTL_FW_USB and so on. * @length: total length of the current block. */ struct fw_block { __le32 type; __le32 length; } __packed; /** * struct fw_header - header of the firmware file * @checksum: checksum of sha256 which is calculated from the whole file * except the checksum field of the file. That is, calculate sha256 * from the version field to the end of the file. * @version: version of this firmware. * @blocks: the first firmware block of the file */ struct fw_header { u8 checksum[32]; char version[RTL_VER_SIZE]; struct fw_block blocks[]; } __packed; enum rtl8152_fw_flags { FW_FLAGS_USB = 0, FW_FLAGS_PLA, FW_FLAGS_START, FW_FLAGS_STOP, FW_FLAGS_NC, FW_FLAGS_NC1, FW_FLAGS_NC2, FW_FLAGS_UC2, FW_FLAGS_UC, FW_FLAGS_SPEED_UP, FW_FLAGS_VER, }; enum rtl8152_fw_fixup_cmd { FW_FIXUP_AND = 0, FW_FIXUP_OR, FW_FIXUP_NOT, FW_FIXUP_XOR, }; struct fw_phy_set { __le16 addr; __le16 data; } __packed; struct fw_phy_speed_up { struct fw_block blk_hdr; __le16 fw_offset; __le16 version; __le16 fw_reg; __le16 reserved; char info[]; } __packed; struct fw_phy_ver { struct fw_block blk_hdr; struct fw_phy_set ver; __le32 reserved; } __packed; struct fw_phy_fixup { struct fw_block blk_hdr; struct fw_phy_set setting; __le16 bit_cmd; __le16 reserved; } __packed; struct fw_phy_union { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; struct fw_phy_set pre_set[2]; struct fw_phy_set bp[8]; struct fw_phy_set bp_en; u8 pre_num; u8 bp_num; char info[]; } __packed; /** * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. * The layout of the firmware block is: * <struct fw_mac> + <info> + <firmware data>. * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_mac + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. * @bp_ba_addr: the register to write break point base address. Depends on * chip. * @bp_ba_value: break point base address. Depends on chip. * @bp_en_addr: the register to write break point enabled mask. Depends * on chip. * @bp_en_value: break point enabled mask. Depends on the firmware. * @bp_start: the start register of break points. Depends on chip. * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. * @bp: break points. Depends on firmware. * @reserved: reserved space (unused) * @fw_ver_reg: the register to store the fw version. * @fw_ver_data: the firmware version of the current type. * @info: additional information for debugging, and is followed by the * binary data of firmware. */ struct fw_mac { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; __le16 bp_ba_addr; __le16 bp_ba_value; __le16 bp_en_addr; __le16 bp_en_value; __le16 bp_start; __le16 bp_num; __le16 bp[16]; /* any value determined by firmware */ __le32 reserved; __le16 fw_ver_reg; u8 fw_ver_data; char info[]; } __packed; /** * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START. * This is used to set patch key when loading the firmware of PHY. * @blk_hdr: firmware descriptor (type, length) * @key_reg: the register to write the patch key. * @key_data: patch key. * @reserved: reserved space (unused) */ struct fw_phy_patch_key { struct fw_block blk_hdr; __le16 key_reg; __le16 key_data; __le32 reserved; } __packed; /** * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC. * The layout of the firmware block is: * <struct fw_phy_nc> + <info> + <firmware data>. * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_phy_nc + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. * @ba_reg: the register to write the base address. Depends on chip. * @ba_data: base address. Depends on chip. * @patch_en_addr: the register of enabling patch mode. Depends on chip. * @patch_en_value: patch mode enabled mask. Depends on the firmware. * @mode_reg: the regitster of switching the mode. * @mode_pre: the mode needing to be set before loading the firmware. * @mode_post: the mode to be set when finishing to load the firmware. * @reserved: reserved space (unused) * @bp_start: the start register of break points. Depends on chip. * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. * @bp: break points. Depends on firmware. * @info: additional information for debugging, and is followed by the * binary data of firmware. */ struct fw_phy_nc { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; __le16 ba_reg; __le16 ba_data; __le16 patch_en_addr; __le16 patch_en_value; __le16 mode_reg; __le16 mode_pre; __le16 mode_post; __le16 reserved; __le16 bp_start; __le16 bp_num; __le16 bp[4]; char info[]; } __packed; enum rtl_fw_type { RTL_FW_END = 0, RTL_FW_PLA, RTL_FW_USB, RTL_FW_PHY_START, RTL_FW_PHY_STOP, RTL_FW_PHY_NC, RTL_FW_PHY_FIXUP, RTL_FW_PHY_UNION_NC, RTL_FW_PHY_UNION_NC1, RTL_FW_PHY_UNION_NC2, RTL_FW_PHY_UNION_UC2, RTL_FW_PHY_UNION_UC, RTL_FW_PHY_UNION_MISC, RTL_FW_PHY_SPEED_UP, RTL_FW_PHY_VER, }; enum rtl_version { RTL_VER_UNKNOWN = 0, RTL_VER_01, RTL_VER_02, RTL_VER_03, RTL_VER_04, RTL_VER_05, RTL_VER_06, RTL_VER_07, RTL_VER_08, RTL_VER_09, RTL_TEST_01, RTL_VER_10, RTL_VER_11, RTL_VER_12, RTL_VER_13, RTL_VER_14, RTL_VER_15, RTL_VER_MAX }; enum tx_csum_stat { TX_CSUM_SUCCESS = 0, TX_CSUM_TSO, TX_CSUM_NONE }; #define RTL_ADVERTISED_10_HALF BIT(0) #define RTL_ADVERTISED_10_FULL BIT(1) #define RTL_ADVERTISED_100_HALF BIT(2) #define RTL_ADVERTISED_100_FULL BIT(3) #define RTL_ADVERTISED_1000_HALF BIT(4) #define RTL_ADVERTISED_1000_FULL BIT(5) #define RTL_ADVERTISED_2500_FULL BIT(6) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; static unsigned int agg_buf_sz = 16384; #define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) /* If register access fails then we block access and issue a reset. If this * happens too many times in a row without a successful access then we stop * trying to reset and just leave access blocked. */ #define REGISTER_ACCESS_MAX_RESETS 3 static void rtl_set_inaccessible(struct r8152 *tp) { set_bit(RTL8152_INACCESSIBLE, &tp->flags); smp_mb__after_atomic(); } static void rtl_set_accessible(struct r8152 *tp) { clear_bit(RTL8152_INACCESSIBLE, &tp->flags); smp_mb__after_atomic(); } static int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, const char *msg_tag) { struct usb_device *udev = tp->udev; int ret; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; ret = usb_control_msg(udev, pipe, request, requesttype, value, index, data, size, USB_CTRL_GET_TIMEOUT); /* No need to issue a reset to report an error if the USB device got * unplugged; just return immediately. */ if (ret == -ENODEV) return ret; /* If the write was successful then we're done */ if (ret >= 0) { tp->reg_access_reset_count = 0; return ret; } dev_err(&udev->dev, "Failed to %s %d bytes at %#06x/%#06x (%d)\n", msg_tag, size, value, index, ret); /* Block all future register access until we reset. Much of the code * in the driver doesn't check for errors. Notably, many parts of the * driver do a read/modify/write of a register value without * confirming that the read succeeded. Writing back modified garbage * like this can fully wedge the adapter, requiring a power cycle. */ rtl_set_inaccessible(tp); /* If probe hasn't yet finished, then we'll request a retry of the * whole probe routine if we get any control transfer errors. We * never have to clear this bit since we free/reallocate the whole "tp" * structure if we retry probe. */ if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) { set_bit(PROBE_SHOULD_RETRY, &tp->flags); return ret; } /* Failing to access registers in pre-reset is not surprising since we * wouldn't be resetting if things were behaving normally. The register * access we do in pre-reset isn't truly mandatory--we're just reusing * the disable() function and trying to be nice by powering the * adapter down before resetting it. Thus, if we're in pre-reset, * we'll return right away and not try to queue up yet another reset. * We know the post-reset is already coming. */ if (test_bit(IN_PRE_RESET, &tp->flags)) return ret; if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) { usb_queue_reset_device(tp->intf); tp->reg_access_reset_count++; } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) { dev_err(&udev->dev, "Tried to reset %d times; giving up.\n", REGISTER_ACCESS_MAX_RESETS); } return ret; } static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { int ret; void *tmp; tmp = kmalloc(size, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = r8152_control_msg(tp, tp->pipe_ctrl_in, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, value, index, tmp, size, "read"); if (ret < 0) memset(data, 0xff, size); else memcpy(data, tmp, size); kfree(tmp); return ret; } static int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { int ret; void *tmp; tmp = kmemdup(data, size, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = r8152_control_msg(tp, tp->pipe_ctrl_out, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, value, index, tmp, size, "write"); kfree(tmp); return ret; } static void rtl_set_unplug(struct r8152 *tp) { if (tp->udev->state == USB_STATE_NOTATTACHED) rtl_set_inaccessible(tp); } static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data, u16 type) { u16 limit = 64; int ret = 0; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ if ((size & 3) || !size || (index & 3) || !data) return -EPERM; if ((u32)index + (u32)size > 0xffff) return -EPERM; while (size) { if (size > limit) { ret = get_registers(tp, index, type, limit, data); if (ret < 0) break; index += limit; data += limit; size -= limit; } else { ret = get_registers(tp, index, type, size, data); if (ret < 0) break; index += size; data += size; size = 0; break; } } if (ret == -ENODEV) rtl_set_unplug(tp); return ret; } static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data, u16 type) { int ret; u16 byteen_start, byteen_end, byen; u16 limit = 512; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ if ((size & 3) || !size || (index & 3) || !data) return -EPERM; if ((u32)index + (u32)size > 0xffff) return -EPERM; byteen_start = byteen & BYTE_EN_START_MASK; byteen_end = byteen & BYTE_EN_END_MASK; byen = byteen_start | (byteen_start << 4); /* Split the first DWORD if the byte_en is not 0xff */ if (byen != BYTE_EN_DWORD) { ret = set_registers(tp, index, type | byen, 4, data); if (ret < 0) goto error1; index += 4; data += 4; size -= 4; } if (size) { byen = byteen_end | (byteen_end >> 4); /* Split the last DWORD if the byte_en is not 0xff */ if (byen != BYTE_EN_DWORD) size -= 4; while (size) { if (size > limit) { ret = set_registers(tp, index, type | BYTE_EN_DWORD, limit, data); if (ret < 0) goto error1; index += limit; data += limit; size -= limit; } else { ret = set_registers(tp, index, type | BYTE_EN_DWORD, size, data); if (ret < 0) goto error1; index += size; data += size; size = 0; break; } } /* Set the last DWORD */ if (byen != BYTE_EN_DWORD) ret = set_registers(tp, index, type | byen, 4, data); } error1: if (ret == -ENODEV) rtl_set_unplug(tp); return ret; } static inline int pla_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data) { return generic_ocp_read(tp, index, size, data, MCU_TYPE_PLA); } static inline int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_PLA); } static inline int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB); } static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) { __le32 data; generic_ocp_read(tp, index, sizeof(data), &data, type); return __le32_to_cpu(data); } static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) { __le32 tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type); } static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) { u32 data; __le32 tmp; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; index &= ~3; byen <<= shift; generic_ocp_read(tp, index, sizeof(tmp), &tmp, type | byen); data = __le32_to_cpu(tmp); data >>= (shift * 8); data &= 0xffff; return (u16)data; } static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) { u32 mask = 0xffff; __le32 tmp; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; data &= mask; if (index & 2) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); index &= ~3; } tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); } static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) { u32 data; __le32 tmp; u8 shift = index & 3; index &= ~3; generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); data = __le32_to_cpu(tmp); data >>= (shift * 8); data &= 0xff; return (u8)data; } static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) { u32 mask = 0xff; __le32 tmp; u16 byen = BYTE_EN_BYTE; u8 shift = index & 3; data &= mask; if (index & 3) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); index &= ~3; } tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); } static u16 ocp_reg_read(struct r8152 *tp, u16 addr) { u16 ocp_base, ocp_index; ocp_base = addr & 0xf000; if (ocp_base != tp->ocp_base) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base); tp->ocp_base = ocp_base; } ocp_index = (addr & 0x0fff) | 0xb000; return ocp_read_word(tp, MCU_TYPE_PLA, ocp_index); } static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data) { u16 ocp_base, ocp_index; ocp_base = addr & 0xf000; if (ocp_base != tp->ocp_base) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base); tp->ocp_base = ocp_base; } ocp_index = (addr & 0x0fff) | 0xb000; ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data); } static inline void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) { ocp_reg_write(tp, OCP_BASE_MII + reg_addr * 2, value); } static inline int r8152_mdio_read(struct r8152 *tp, u32 reg_addr) { return ocp_reg_read(tp, OCP_BASE_MII + reg_addr * 2); } static void sram_write(struct r8152 *tp, u16 addr, u16 data) { ocp_reg_write(tp, OCP_SRAM_ADDR, addr); ocp_reg_write(tp, OCP_SRAM_DATA, data); } static u16 sram_read(struct r8152 *tp, u16 addr) { ocp_reg_write(tp, OCP_SRAM_ADDR, addr); return ocp_reg_read(tp, OCP_SRAM_DATA); } static int read_mii_word(struct net_device *netdev, int phy_id, int reg) { struct r8152 *tp = netdev_priv(netdev); int ret; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (phy_id != R8152_PHY_ID) return -EINVAL; ret = r8152_mdio_read(tp, reg); return ret; } static void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) { struct r8152 *tp = netdev_priv(netdev); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (phy_id != R8152_PHY_ID) return; r8152_mdio_write(tp, reg, val); } static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising); static int __rtl8152_set_mac_address(struct net_device *netdev, void *p, bool in_resume) { struct r8152 *tp = netdev_priv(netdev); struct sockaddr *addr = p; int ret = -EADDRNOTAVAIL; if (!is_valid_ether_addr(addr->sa_data)) goto out1; if (!in_resume) { ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out1; } mutex_lock(&tp->control); eth_hw_addr_set(netdev, addr->sa_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); mutex_unlock(&tp->control); if (!in_resume) usb_autopm_put_interface(tp->intf); out1: return ret; } static int rtl8152_set_mac_address(struct net_device *netdev, void *p) { return __rtl8152_set_mac_address(netdev, p, false); } /* Devices containing proper chips can support a persistent * host system provided MAC address. * Examples of this are Dell TB15 and Dell WD15 docks */ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; int ret = -EINVAL; u32 ocp_data; unsigned char buf[6]; char *mac_obj_name; acpi_object_type mac_obj_type; int mac_strlen; if (tp->lenovo_macpassthru) { mac_obj_name = "\\MACA"; mac_obj_type = ACPI_TYPE_STRING; mac_strlen = 0x16; } else { /* test for -AD variant of RTL8153 */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if ((ocp_data & AD_MASK) == 0x1000) { /* test for MAC address pass-through bit */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); if ((ocp_data & PASS_THRU_MASK) != 1) { netif_dbg(tp, probe, tp->netdev, "No efuse for RTL8153-AD MAC pass through\n"); return -ENODEV; } } else { /* test for RTL8153-BND and RTL8153-BD */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) { netif_dbg(tp, probe, tp->netdev, "Invalid variant for MAC pass through\n"); return -ENODEV; } } mac_obj_name = "\\_SB.AMAC"; mac_obj_type = ACPI_TYPE_BUFFER; mac_strlen = 0x17; } /* returns _AUXMAC_#AABBCCDDEEFF# */ status = acpi_evaluate_object(NULL, mac_obj_name, NULL, &buffer); obj = (union acpi_object *)buffer.pointer; if (!ACPI_SUCCESS(status)) return -ENODEV; if (obj->type != mac_obj_type || obj->string.length != mac_strlen) { netif_warn(tp, probe, tp->netdev, "Invalid buffer for pass-thru MAC addr: (%d, %d)\n", obj->type, obj->string.length); goto amacout; } if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 || strncmp(obj->string.pointer + 0x15, "#", 1) != 0) { netif_warn(tp, probe, tp->netdev, "Invalid header when reading pass-thru MAC addr\n"); goto amacout; } ret = hex2bin(buf, obj->string.pointer + 9, 6); if (!(ret == 0 && is_valid_ether_addr(buf))) { netif_warn(tp, probe, tp->netdev, "Invalid MAC for pass-thru MAC addr: %d, %pM\n", ret, buf); ret = -EINVAL; goto amacout; } memcpy(sa->sa_data, buf, 6); tp->netdev->addr_assign_type = NET_ADDR_STOLEN; netif_info(tp, probe, tp->netdev, "Using pass-thru MAC addr %pM\n", sa->sa_data); amacout: kfree(obj); return ret; } static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa) { struct net_device *dev = tp->netdev; int ret; sa->sa_family = dev->type; ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data); if (ret < 0) { if (tp->version == RTL_VER_01) { ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data); } else { /* if device doesn't support MAC pass through this will * be expected to be non-zero */ ret = vendor_mac_passthru_addr_read(tp, sa); if (ret < 0) ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data); } } if (ret < 0) { netif_err(tp, probe, dev, "Get ether addr fail\n"); } else if (!is_valid_ether_addr(sa->sa_data)) { netif_err(tp, probe, dev, "Invalid ether addr %pM\n", sa->sa_data); eth_hw_addr_random(dev); ether_addr_copy(sa->sa_data, dev->dev_addr); netif_info(tp, probe, dev, "Random ether addr %pM\n", sa->sa_data); return 0; } return ret; } static int set_ethernet_addr(struct r8152 *tp, bool in_resume) { struct net_device *dev = tp->netdev; struct sockaddr sa; int ret; ret = determine_ethernet_addr(tp, &sa); if (ret < 0) return ret; if (tp->version == RTL_VER_01) eth_hw_addr_set(dev, sa.sa_data); else ret = __rtl8152_set_mac_address(dev, &sa, in_resume); return ret; } static void read_bulk_callback(struct urb *urb) { struct net_device *netdev; int status = urb->status; struct rx_agg *agg; struct r8152 *tp; unsigned long flags; agg = urb->context; if (!agg) return; tp = agg->context; if (!tp) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; netdev = tp->netdev; /* When link down, the driver would cancel all bulks. */ /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(netdev)) return; usb_mark_last_busy(tp->udev); switch (status) { case 0: if (urb->actual_length < ETH_ZLEN) break; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: rtl_set_unplug(tp); netif_device_detach(tp->netdev); return; case -EPROTO: urb->actual_length = 0; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); set_bit(RX_EPROTO, &tp->flags); schedule_delayed_work(&tp->schedule, 1); return; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: if (net_ratelimit()) netdev_warn(netdev, "maybe reset is needed?\n"); break; default: if (net_ratelimit()) netdev_warn(netdev, "Rx status %d\n", status); break; } r8152_submit_rx(tp, agg, GFP_ATOMIC); } static void write_bulk_callback(struct urb *urb) { struct net_device_stats *stats; struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; unsigned long flags; int status = urb->status; agg = urb->context; if (!agg) return; tp = agg->context; if (!tp) return; netdev = tp->netdev; stats = &netdev->stats; if (status) { if (net_ratelimit()) netdev_warn(netdev, "Tx status %d\n", status); stats->tx_errors += agg->skb_num; } else { stats->tx_packets += agg->skb_num; stats->tx_bytes += agg->skb_len; } spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); if (!netif_carrier_ok(netdev)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!skb_queue_empty(&tp->tx_queue)) tasklet_schedule(&tp->tx_tl); } static void intr_callback(struct urb *urb) { struct r8152 *tp; __le16 *d; int status = urb->status; int res; tp = urb->context; if (!tp) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); fallthrough; case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: if (net_ratelimit()) netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: netif_info(tp, intr, tp->netdev, "intr status %d\n", status); goto resubmit; } d = urb->transfer_buffer; if (INTR_LINK & __le16_to_cpu(d[0])) { if (!netif_carrier_ok(tp->netdev)) { set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } else { if (netif_carrier_ok(tp->netdev)) { netif_stop_queue(tp->netdev); set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } resubmit: res = usb_submit_urb(urb, GFP_ATOMIC); if (res == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (res) { netif_err(tp, intr, tp->netdev, "can't resubmit intr, status %d\n", res); } } static inline void *rx_agg_align(void *data) { return (void *)ALIGN((uintptr_t)data, RX_ALIGN); } static inline void *tx_agg_align(void *data) { return (void *)ALIGN((uintptr_t)data, TX_ALIGN); } static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg) { list_del(&agg->info_list); usb_free_urb(agg->urb); put_page(agg->page); kfree(agg); atomic_dec(&tp->rx_count); } static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags) { struct net_device *netdev = tp->netdev; int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; unsigned int order = get_order(tp->rx_buf_sz); struct rx_agg *rx_agg; unsigned long flags; rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node); if (!rx_agg) return NULL; rx_agg->page = alloc_pages(mflags | __GFP_COMP | __GFP_NOWARN, order); if (!rx_agg->page) goto free_rx; rx_agg->buffer = page_address(rx_agg->page); rx_agg->urb = usb_alloc_urb(0, mflags); if (!rx_agg->urb) goto free_buf; rx_agg->context = tp; INIT_LIST_HEAD(&rx_agg->list); INIT_LIST_HEAD(&rx_agg->info_list); spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&rx_agg->info_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); atomic_inc(&tp->rx_count); return rx_agg; free_buf: __free_pages(rx_agg->page, order); free_rx: kfree(rx_agg); return NULL; } static void free_all_mem(struct r8152 *tp) { struct rx_agg *agg, *agg_next; unsigned long flags; int i; spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list) free_rx_agg(tp, agg); spin_unlock_irqrestore(&tp->rx_lock, flags); WARN_ON(atomic_read(&tp->rx_count)); for (i = 0; i < RTL8152_MAX_TX; i++) { usb_free_urb(tp->tx_info[i].urb); tp->tx_info[i].urb = NULL; kfree(tp->tx_info[i].buffer); tp->tx_info[i].buffer = NULL; tp->tx_info[i].head = NULL; } usb_free_urb(tp->intr_urb); tp->intr_urb = NULL; kfree(tp->intr_buff); tp->intr_buff = NULL; } static int alloc_all_mem(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct usb_interface *intf = tp->intf; struct usb_host_interface *alt = intf->cur_altsetting; struct usb_host_endpoint *ep_intr = alt->endpoint + 2; int node, i; node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); INIT_LIST_HEAD(&tp->rx_info); INIT_LIST_HEAD(&tp->tx_free); INIT_LIST_HEAD(&tp->rx_done); skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->rx_queue); atomic_set(&tp->rx_count, 0); for (i = 0; i < RTL8152_MAX_RX; i++) { if (!alloc_rx_agg(tp, GFP_KERNEL)) goto err1; } for (i = 0; i < RTL8152_MAX_TX; i++) { struct urb *urb; u8 *buf; buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != tx_agg_align(buf)) { kfree(buf); buf = kmalloc_node(agg_buf_sz + TX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { kfree(buf); goto err1; } INIT_LIST_HEAD(&tp->tx_info[i].list); tp->tx_info[i].context = tp; tp->tx_info[i].urb = urb; tp->tx_info[i].buffer = buf; tp->tx_info[i].head = tx_agg_align(buf); list_add_tail(&tp->tx_info[i].list, &tp->tx_free); } tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!tp->intr_urb) goto err1; tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!tp->intr_buff) goto err1; tp->intr_interval = (int)ep_intr->desc.bInterval; usb_fill_int_urb(tp->intr_urb, tp->udev, tp->pipe_intr, tp->intr_buff, INTBUFSIZE, intr_callback, tp, tp->intr_interval); return 0; err1: free_all_mem(tp); return -ENOMEM; } static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp) { struct tx_agg *agg = NULL; unsigned long flags; if (list_empty(&tp->tx_free)) return NULL; spin_lock_irqsave(&tp->tx_lock, flags); if (!list_empty(&tp->tx_free)) { struct list_head *cursor; cursor = tp->tx_free.next; list_del_init(cursor); agg = list_entry(cursor, struct tx_agg, list); } spin_unlock_irqrestore(&tp->tx_lock, flags); return agg; } /* r8152_csum_workaround() * The hw limits the value of the transport offset. When the offset is out of * range, calculate the checksum by sw. */ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, struct sk_buff_head *list) { if (skb_shinfo(skb)->gso_size) { netdev_features_t features = tp->netdev->features; struct sk_buff *segs, *seg, *next; struct sk_buff_head seg_list; features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); segs = skb_gso_segment(skb, features); if (IS_ERR(segs) || !segs) goto drop; __skb_queue_head_init(&seg_list); skb_list_walk_safe(segs, seg, next) { skb_mark_not_on_list(seg); __skb_queue_tail(&seg_list, seg); } skb_queue_splice(&seg_list, list); dev_kfree_skb(skb); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_help(skb) < 0) goto drop; __skb_queue_head(list, skb); } else { struct net_device_stats *stats; drop: stats = &tp->netdev->stats; stats->tx_dropped++; dev_kfree_skb(skb); } } static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) { if (skb_vlan_tag_present(skb)) { u32 opts2; opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb)); desc->opts2 |= cpu_to_le32(opts2); } } static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb) { u32 opts2 = le32_to_cpu(desc->opts2); if (opts2 & RX_VLAN_TAG) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb, u32 len) { u32 mss = skb_shinfo(skb)->gso_size; u32 opts1, opts2 = 0; int ret = TX_CSUM_SUCCESS; WARN_ON_ONCE(len > TX_LEN_MAX); opts1 = len | TX_FS | TX_LS; if (mss) { u32 transport_offset = (u32)skb_transport_offset(skb); if (transport_offset > GTTCPHO_MAX) { netif_warn(tp, tx_err, tp->netdev, "Invalid transport offset 0x%x for TSO\n", transport_offset); ret = TX_CSUM_TSO; goto unavailable; } switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts1 |= GTSENDV4; break; case htons(ETH_P_IPV6): if (skb_cow_head(skb, 0)) { ret = TX_CSUM_TSO; goto unavailable; } tcp_v6_gso_csum_prep(skb); opts1 |= GTSENDV6; break; default: WARN_ON_ONCE(1); break; } opts1 |= transport_offset << GTTCPHO_SHIFT; opts2 |= min(mss, MSS_MAX) << MSS_SHIFT; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u32 transport_offset = (u32)skb_transport_offset(skb); u8 ip_protocol; if (transport_offset > TCPHO_MAX) { netif_warn(tp, tx_err, tp->netdev, "Invalid transport offset 0x%x\n", transport_offset); ret = TX_CSUM_NONE; goto unavailable; } switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts2 |= IPV4_CS; ip_protocol = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): opts2 |= IPV6_CS; ip_protocol = ipv6_hdr(skb)->nexthdr; break; default: ip_protocol = IPPROTO_RAW; break; } if (ip_protocol == IPPROTO_TCP) opts2 |= TCP_CS; else if (ip_protocol == IPPROTO_UDP) opts2 |= UDP_CS; else WARN_ON_ONCE(1); opts2 |= transport_offset << TCPHO_SHIFT; } desc->opts2 = cpu_to_le32(opts2); desc->opts1 = cpu_to_le32(opts1); unavailable: return ret; } static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) { struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; int remain, ret; u8 *tx_data; __skb_queue_head_init(&skb_head); spin_lock(&tx_queue->lock); skb_queue_splice_init(tx_queue, &skb_head); spin_unlock(&tx_queue->lock); tx_data = agg->head; agg->skb_num = 0; agg->skb_len = 0; remain = agg_buf_sz; while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { struct tx_desc *tx_desc; struct sk_buff *skb; unsigned int len; skb = __skb_dequeue(&skb_head); if (!skb) break; len = skb->len + sizeof(*tx_desc); if (len > remain) { __skb_queue_head(&skb_head, skb); break; } tx_data = tx_agg_align(tx_data); tx_desc = (struct tx_desc *)tx_data; if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) { r8152_csum_workaround(tp, skb, &skb_head); continue; } rtl_tx_vlan_tag(tx_desc, skb); tx_data += sizeof(*tx_desc); len = skb->len; if (skb_copy_bits(skb, 0, tx_data, len) < 0) { struct net_device_stats *stats = &tp->netdev->stats; stats->tx_dropped++; dev_kfree_skb_any(skb); tx_data -= sizeof(*tx_desc); continue; } tx_data += len; agg->skb_len += len; agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); if (tp->dell_tb_rx_agg_bug) break; } if (!skb_queue_empty(&skb_head)) { spin_lock(&tx_queue->lock); skb_queue_splice(&skb_head, tx_queue); spin_unlock(&tx_queue->lock); } netif_tx_lock(tp->netdev); if (netif_queue_stopped(tp->netdev) && skb_queue_len(&tp->tx_queue) < tp->tx_qlen) netif_wake_queue(tp->netdev); netif_tx_unlock(tp->netdev); ret = usb_autopm_get_interface_async(tp->intf); if (ret < 0) goto out_tx_fill; usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_out, agg->head, (int)(tx_data - (u8 *)agg->head), (usb_complete_t)write_bulk_callback, agg); ret = usb_submit_urb(agg->urb, GFP_ATOMIC); if (ret < 0) usb_autopm_put_interface_async(tp->intf); out_tx_fill: return ret; } static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) { u8 checksum = CHECKSUM_NONE; u32 opts2, opts3; if (!(tp->netdev->features & NETIF_F_RXCSUM)) goto return_result; opts2 = le32_to_cpu(rx_desc->opts2); opts3 = le32_to_cpu(rx_desc->opts3); if (opts2 & RD_IPV4_CS) { if (opts3 & IPF) checksum = CHECKSUM_NONE; else if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) checksum = CHECKSUM_UNNECESSARY; else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) checksum = CHECKSUM_UNNECESSARY; } else if (opts2 & RD_IPV6_CS) { if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) checksum = CHECKSUM_UNNECESSARY; else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) checksum = CHECKSUM_UNNECESSARY; } return_result: return checksum; } static inline bool rx_count_exceed(struct r8152 *tp) { return atomic_read(&tp->rx_count) > RTL8152_MAX_RX; } static inline int agg_offset(struct rx_agg *agg, void *addr) { return (int)(addr - agg->buffer); } static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags) { struct rx_agg *agg, *agg_next, *agg_free = NULL; unsigned long flags; spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) { if (page_count(agg->page) == 1) { if (!agg_free) { list_del_init(&agg->list); agg_free = agg; continue; } if (rx_count_exceed(tp)) { list_del_init(&agg->list); free_rx_agg(tp, agg); } break; } } spin_unlock_irqrestore(&tp->rx_lock, flags); if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending) agg_free = alloc_rx_agg(tp, mflags); return agg_free; } static int rx_bottom(struct r8152 *tp, int budget) { unsigned long flags; struct list_head *cursor, *next, rx_queue; int ret = 0, work_done = 0; struct napi_struct *napi = &tp->napi; if (!skb_queue_empty(&tp->rx_queue)) { while (work_done < budget) { struct sk_buff *skb = __skb_dequeue(&tp->rx_queue); struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; unsigned int pkt_len; if (!skb) break; pkt_len = skb->len; napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; } } if (list_empty(&tp->rx_done) || work_done >= budget) goto out1; clear_bit(RX_EPROTO, &tp->flags); INIT_LIST_HEAD(&rx_queue); spin_lock_irqsave(&tp->rx_lock, flags); list_splice_init(&tp->rx_done, &rx_queue); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_safe(cursor, next, &rx_queue) { struct rx_desc *rx_desc; struct rx_agg *agg, *agg_free; int len_used = 0; struct urb *urb; u8 *rx_data; /* A bulk transfer of USB may contain may packets, so the * total packets may more than the budget. Deal with all * packets in current bulk transfer, and stop to handle the * next bulk transfer until next schedule, if budget is * exhausted. */ if (work_done >= budget) break; list_del_init(cursor); agg = list_entry(cursor, struct rx_agg, list); urb = agg->urb; if (urb->status != 0 || urb->actual_length < ETH_ZLEN) goto submit; agg_free = rtl_get_free_rx(tp, GFP_ATOMIC); rx_desc = agg->buffer; rx_data = agg->buffer; len_used += sizeof(struct rx_desc); while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; unsigned int pkt_len, rx_frag_head_sz, len; struct sk_buff *skb; bool use_frags; WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000); pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) break; len_used += pkt_len; if (urb->actual_length < len_used) break; pkt_len -= ETH_FCS_LEN; len = pkt_len; rx_data += sizeof(struct rx_desc); if (!agg_free || tp->rx_copybreak > len) use_frags = false; else use_frags = true; if (use_frags) { /* If the budget is exhausted, the packet * would be queued in the driver. That is, * napi_gro_frags() wouldn't be called, so * we couldn't use napi_get_frags(). */ if (work_done >= budget) { rx_frag_head_sz = tp->rx_copybreak; skb = napi_alloc_skb(napi, rx_frag_head_sz); } else { rx_frag_head_sz = 0; skb = napi_get_frags(napi); } } else { rx_frag_head_sz = 0; skb = napi_alloc_skb(napi, len); } if (!skb) { stats->rx_dropped++; goto find_next_rx; } skb->ip_summed = r8152_rx_csum(tp, rx_desc); rtl_rx_vlan_tag(rx_desc, skb); if (use_frags) { if (rx_frag_head_sz) { memcpy(skb->data, rx_data, rx_frag_head_sz); skb_put(skb, rx_frag_head_sz); len -= rx_frag_head_sz; rx_data += rx_frag_head_sz; skb->protocol = eth_type_trans(skb, netdev); } skb_add_rx_frag(skb, 0, agg->page, agg_offset(agg, rx_data), len, SKB_DATA_ALIGN(len)); get_page(agg->page); } else { memcpy(skb->data, rx_data, len); skb_put(skb, len); skb->protocol = eth_type_trans(skb, netdev); } if (work_done < budget) { if (use_frags) napi_gro_frags(napi); else napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; } else { __skb_queue_tail(&tp->rx_queue, skb); } find_next_rx: rx_data = rx_agg_align(rx_data + len + ETH_FCS_LEN); rx_desc = (struct rx_desc *)rx_data; len_used = agg_offset(agg, rx_data); len_used += sizeof(struct rx_desc); } WARN_ON(!agg_free && page_count(agg->page) > 1); if (agg_free) { spin_lock_irqsave(&tp->rx_lock, flags); if (page_count(agg->page) == 1) { list_add(&agg_free->list, &tp->rx_used); } else { list_add_tail(&agg->list, &tp->rx_used); agg = agg_free; urb = agg->urb; } spin_unlock_irqrestore(&tp->rx_lock, flags); } submit: if (!ret) { ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); } else { urb->actual_length = 0; list_add_tail(&agg->list, next); } } /* Splice the remained list back to rx_done for next schedule */ if (!list_empty(&rx_queue)) { spin_lock_irqsave(&tp->rx_lock, flags); list_splice(&rx_queue, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } out1: return work_done; } static void tx_bottom(struct r8152 *tp) { int res; do { struct net_device *netdev = tp->netdev; struct tx_agg *agg; if (skb_queue_empty(&tp->tx_queue)) break; agg = r8152_get_tx_agg(tp); if (!agg) break; res = r8152_tx_agg_fill(tp, agg); if (!res) continue; if (res == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(netdev); } else { struct net_device_stats *stats = &netdev->stats; unsigned long flags; netif_warn(tp, tx_err, netdev, "failed tx_urb %d\n", res); stats->tx_dropped += agg->skb_num; spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); spin_unlock_irqrestore(&tp->tx_lock, flags); } } while (res == 0); } static void bottom_half(struct tasklet_struct *t) { struct r8152 *tp = from_tasklet(tp, t, tx_tl); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; /* When link down, the driver would cancel all bulks. */ /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(tp->netdev)) return; clear_bit(SCHEDULE_TASKLET, &tp->flags); tx_bottom(tp); } static int r8152_poll(struct napi_struct *napi, int budget) { struct r8152 *tp = container_of(napi, struct r8152, napi); int work_done; if (!budget) return 0; work_done = rx_bottom(tp, budget); if (work_done < budget) { if (!napi_complete_done(napi, work_done)) goto out; if (!list_empty(&tp->rx_done)) napi_schedule(napi); } out: return work_done; } static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { int ret; /* The rx would be stopped, so skip submitting */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev)) return 0; usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_in, agg->buffer, tp->rx_buf_sz, (usb_complete_t)read_bulk_callback, agg); ret = usb_submit_urb(agg->urb, mem_flags); if (ret == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (ret) { struct urb *urb = agg->urb; unsigned long flags; urb->actual_length = 0; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); netif_err(tp, rx_err, tp->netdev, "Couldn't submit rx[%p], ret = %d\n", agg, ret); napi_schedule(&tp->napi); } return ret; } static void rtl_drop_queued_tx(struct r8152 *tp) { struct net_device_stats *stats = &tp->netdev->stats; struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; struct sk_buff *skb; if (skb_queue_empty(tx_queue)) return; __skb_queue_head_init(&skb_head); spin_lock_bh(&tx_queue->lock); skb_queue_splice_init(tx_queue, &skb_head); spin_unlock_bh(&tx_queue->lock); while ((skb = __skb_dequeue(&skb_head))) { dev_kfree_skb(skb); stats->tx_dropped++; } } static void rtl8152_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct r8152 *tp = netdev_priv(netdev); netif_warn(tp, tx_err, netdev, "Tx timeout\n"); usb_queue_reset_device(tp->intf); } static void rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); if (netif_carrier_ok(netdev)) { set_bit(RTL8152_SET_RX_MODE, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } static void _rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); u32 mc_filter[2]; /* Multicast hash filter */ __le32 tmp[2]; u32 ocp_data; netif_stop_queue(netdev); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_data |= RCR_AB | RCR_APM; if (netdev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ netif_notice(tp, link, netdev, "Promiscuous mode enabled\n"); ocp_data |= RCR_AM | RCR_AAP; mc_filter[1] = 0xffffffff; mc_filter[0] = 0xffffffff; } else if ((netdev->flags & IFF_MULTICAST && netdev_mc_count(netdev) > multicast_filter_limit) || (netdev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ ocp_data |= RCR_AM; mc_filter[1] = 0xffffffff; mc_filter[0] = 0xffffffff; } else { mc_filter[1] = 0; mc_filter[0] = 0; if (netdev->flags & IFF_MULTICAST) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, netdev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); ocp_data |= RCR_AM; } } } tmp[0] = __cpu_to_le32(swab32(mc_filter[1])); tmp[1] = __cpu_to_le32(swab32(mc_filter[0])); pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); netif_wake_queue(netdev); } static netdev_features_t rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { u32 mss = skb_shinfo(skb)->gso_size; int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && skb_transport_offset(skb) > max_offset) features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) features &= ~NETIF_F_GSO_MASK; return features; } static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); skb_tx_timestamp(skb); skb_queue_tail(&tp->tx_queue, skb); if (!list_empty(&tp->tx_free)) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { set_bit(SCHEDULE_TASKLET, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } else { usb_mark_last_busy(tp->udev); tasklet_schedule(&tp->tx_tl); } } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); } return NETDEV_TX_OK; } static void r8152b_reset_packet_filter(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC); ocp_data &= ~FMC_FCR_MCU_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); ocp_data |= FMC_FCR_MCU_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); } static void rtl8152_nic_reset(struct r8152 *tp) { u32 ocp_data; int i; switch (tp->version) { case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data &= ~CR_TE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data &= ~BMU_RESET_EP_IN; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data |= CDC_ECM_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data &= ~CR_RE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data |= BMU_RESET_EP_IN; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~CDC_ECM_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); break; default: ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) break; usleep_range(100, 400); } break; } } static void set_tx_qlen(struct r8152 *tp) { tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc)); } static inline u16 rtl8152_get_speed(struct r8152 *tp) { return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); } static void rtl_eee_plus_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); if (enable) ocp_data |= EEEP_CR_EEEP_TX; else ocp_data &= ~EEEP_CR_EEEP_TX; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); } static void rtl_set_eee_plus(struct r8152 *tp) { if (rtl8152_get_speed(tp) & _10bps) rtl_eee_plus_en(tp, true); else rtl_eee_plus_en(tp, false); } static void rxdy_gated_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); if (enable) ocp_data |= RXDY_GATED_EN; else ocp_data &= ~RXDY_GATED_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); } static int rtl_start_rx(struct r8152 *tp) { struct rx_agg *agg, *agg_next; struct list_head tmp_list; unsigned long flags; int ret = 0, i = 0; INIT_LIST_HEAD(&tmp_list); spin_lock_irqsave(&tp->rx_lock, flags); INIT_LIST_HEAD(&tp->rx_done); INIT_LIST_HEAD(&tp->rx_used); list_splice_init(&tp->rx_info, &tmp_list); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { INIT_LIST_HEAD(&agg->list); /* Only RTL8152_MAX_RX rx_agg need to be submitted. */ if (++i > RTL8152_MAX_RX) { spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_used); spin_unlock_irqrestore(&tp->rx_lock, flags); } else if (unlikely(ret < 0)) { spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } else { ret = r8152_submit_rx(tp, agg, GFP_KERNEL); } } spin_lock_irqsave(&tp->rx_lock, flags); WARN_ON(!list_empty(&tp->rx_info)); list_splice(&tmp_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); return ret; } static int rtl_stop_rx(struct r8152 *tp) { struct rx_agg *agg, *agg_next; struct list_head tmp_list; unsigned long flags; INIT_LIST_HEAD(&tmp_list); /* The usb_kill_urb() couldn't be used in atomic. * Therefore, move the list of rx_info to a tmp one. * Then, list_for_each_entry_safe could be used without * spin lock. */ spin_lock_irqsave(&tp->rx_lock, flags); list_splice_init(&tp->rx_info, &tmp_list); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { /* At least RTL8152_MAX_RX rx_agg have the page_count being * equal to 1, so the other ones could be freed safely. */ if (page_count(agg->page) > 1) free_rx_agg(tp, agg); else usb_kill_urb(agg->urb); } /* Move back the list of temp to the rx_info */ spin_lock_irqsave(&tp->rx_lock, flags); WARN_ON(!list_empty(&tp->rx_info)); list_splice(&tmp_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); while (!skb_queue_empty(&tp->rx_queue)) dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); return 0; } static void rtl_set_ifg(struct r8152 *tp, u16 speed) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); ocp_data &= ~IFG_MASK; if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { ocp_data |= IFG_144NS; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data &= ~TX10MIDLE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); } else { ocp_data |= IFG_96NS; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= TX10MIDLE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); } } static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) { ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, OWN_UPDATE | OWN_CLEAR); } static int rtl_enable(struct r8152 *tp) { u32 ocp_data; r8152b_reset_packet_filter(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data |= CR_RE | CR_TE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: break; default: r8153b_rx_agg_chg_indicate(tp); break; } rxdy_gated_en(tp, false); return 0; } static int rtl8152_enable(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); return rtl_enable(tp); } static void r8153_set_rx_early_timeout(struct r8152 *tp) { u32 ocp_data = tp->coalesce / 8; switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns. */ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, 128 / 8); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_data); break; case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, 640 / 8); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_data); break; default: break; } } static void r8153_set_rx_early_size(struct r8152 *tp) { u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 4); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; default: WARN_ON_ONCE(1); break; } } static int rtl8153_enable(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); rtl_set_ifg(tp, rtl8152_get_speed(tp)); switch (tp->version) { case RTL_VER_09: case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); break; default: break; } return rtl_enable(tp); } static void rtl_disable(struct r8152 *tp) { u32 ocp_data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl_drop_queued_tx(tp); for (i = 0; i < RTL8152_MAX_TX; i++) usb_kill_urb(tp->tx_info[i].urb); rxdy_gated_en(tp, true); for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY) break; usleep_range(1000, 2000); } for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY) break; usleep_range(1000, 2000); } rtl_stop_rx(tp); rtl8152_nic_reset(tp); } static void r8152_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL); if (enable) ocp_data |= POWER_CUT; else ocp_data &= ~POWER_CUT; ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS); ocp_data &= ~RESUME_INDICATE; ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data); } static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) { u32 ocp_data; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); if (enable) ocp_data |= CPCR_RX_VLAN; else ocp_data &= ~CPCR_RX_VLAN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); break; case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: default: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR1); if (enable) ocp_data |= OUTER_VLAN | INNER_VLAN; else ocp_data &= ~(OUTER_VLAN | INNER_VLAN); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR1, ocp_data); break; } } static int rtl8152_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = features ^ dev->features; struct r8152 *tp = netdev_priv(dev); int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); if (changed & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) rtl_rx_vlan_en(tp, true); else rtl_rx_vlan_en(tp, false); } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl_get_wol(struct r8152 *tp) { u32 ocp_data; u32 wolopts = 0; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); if (ocp_data & LINK_ON_WAKE_EN) wolopts |= WAKE_PHY; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); if (ocp_data & UWF_EN) wolopts |= WAKE_UCAST; if (ocp_data & BWF_EN) wolopts |= WAKE_BCAST; if (ocp_data & MWF_EN) wolopts |= WAKE_MCAST; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL); if (ocp_data & MAGIC_EN) wolopts |= WAKE_MAGIC; return wolopts; } static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) { u32 ocp_data; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data &= ~LINK_ON_WAKE_EN; if (wolopts & WAKE_PHY) ocp_data |= LINK_ON_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN); if (wolopts & WAKE_UCAST) ocp_data |= UWF_EN; if (wolopts & WAKE_BCAST) ocp_data |= BWF_EN; if (wolopts & WAKE_MCAST) ocp_data |= MWF_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL); ocp_data &= ~MAGIC_EN; if (wolopts & WAKE_MAGIC) ocp_data |= MAGIC_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data); if (wolopts & WAKE_ANY) device_set_wakeup_enable(&tp->udev->dev, true); else device_set_wakeup_enable(&tp->udev->dev, false); } static void r8153_mac_clk_speed_down(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); /* MAC clock speed down */ if (enable) ocp_data |= MAC_CLK_SPDWN_EN; else ocp_data &= ~MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } static void r8156_mac_clk_spd(struct r8152 *tp, bool enable) { u32 ocp_data; /* MAC clock speed down */ if (enable) { /* aldps_spdwn_ratio, tp10_spdwn_ratio */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0x0403); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ocp_data &= ~EEE_SPDWN_RATIO_MASK; ocp_data |= MAC_CLK_SPDWN_EN | 0x03; /* eee_spdwn_ratio */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } else { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ocp_data &= ~MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } } static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; if (enable) memset(u1u2, 0xff, sizeof(u1u2)); else memset(u1u2, 0x00, sizeof(u1u2)); usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); } static void r8153b_u1u2en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG); if (enable) ocp_data |= LPM_U1U2_EN; else ocp_data &= ~LPM_U1U2_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG, ocp_data); } static void r8153_u2p3en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); if (enable) ocp_data |= U2P3_ENABLE; else ocp_data &= ~U2P3_ENABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); } static void r8153b_ups_flags(struct r8152 *tp) { u32 ups_flags = 0; if (tp->ups_info.green) ups_flags |= UPS_FLAGS_EN_GREEN; if (tp->ups_info.aldps) ups_flags |= UPS_FLAGS_EN_ALDPS; if (tp->ups_info.eee) ups_flags |= UPS_FLAGS_EN_EEE; if (tp->ups_info.flow_control) ups_flags |= UPS_FLAGS_EN_FLOW_CTR; if (tp->ups_info.eee_ckdiv) ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; if (tp->ups_info.eee_cmod_lv) ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; if (tp->ups_info.r_tune) ups_flags |= UPS_FLAGS_R_TUNE; if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; if (tp->ups_info.eee_plloff_100) ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; if (tp->ups_info.eee_plloff_giga) ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; if (tp->ups_info._250m_ckdiv) ups_flags |= UPS_FLAGS_250M_CKDIV; if (tp->ups_info.ctap_short_off) ups_flags |= UPS_FLAGS_CTAP_SHORT_DIS; switch (tp->ups_info.speed_duplex) { case NWAY_10M_HALF: ups_flags |= ups_flags_speed(1); break; case NWAY_10M_FULL: ups_flags |= ups_flags_speed(2); break; case NWAY_100M_HALF: ups_flags |= ups_flags_speed(3); break; case NWAY_100M_FULL: ups_flags |= ups_flags_speed(4); break; case NWAY_1000M_FULL: ups_flags |= ups_flags_speed(5); break; case FORCE_10M_HALF: ups_flags |= ups_flags_speed(6); break; case FORCE_10M_FULL: ups_flags |= ups_flags_speed(7); break; case FORCE_100M_HALF: ups_flags |= ups_flags_speed(8); break; case FORCE_100M_FULL: ups_flags |= ups_flags_speed(9); break; default: break; } ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void r8156_ups_flags(struct r8152 *tp) { u32 ups_flags = 0; if (tp->ups_info.green) ups_flags |= UPS_FLAGS_EN_GREEN; if (tp->ups_info.aldps) ups_flags |= UPS_FLAGS_EN_ALDPS; if (tp->ups_info.eee) ups_flags |= UPS_FLAGS_EN_EEE; if (tp->ups_info.flow_control) ups_flags |= UPS_FLAGS_EN_FLOW_CTR; if (tp->ups_info.eee_ckdiv) ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; if (tp->ups_info.eee_plloff_100) ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; if (tp->ups_info.eee_plloff_giga) ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; if (tp->ups_info._250m_ckdiv) ups_flags |= UPS_FLAGS_250M_CKDIV; switch (tp->ups_info.speed_duplex) { case FORCE_10M_HALF: ups_flags |= ups_flags_speed(0); break; case FORCE_10M_FULL: ups_flags |= ups_flags_speed(1); break; case FORCE_100M_HALF: ups_flags |= ups_flags_speed(2); break; case FORCE_100M_FULL: ups_flags |= ups_flags_speed(3); break; case NWAY_10M_HALF: ups_flags |= ups_flags_speed(4); break; case NWAY_10M_FULL: ups_flags |= ups_flags_speed(5); break; case NWAY_100M_HALF: ups_flags |= ups_flags_speed(6); break; case NWAY_100M_FULL: ups_flags |= ups_flags_speed(7); break; case NWAY_1000M_FULL: ups_flags |= ups_flags_speed(8); break; case NWAY_2500M_FULL: ups_flags |= ups_flags_speed(9); break; default: break; } switch (tp->ups_info.lite_mode) { case 1: ups_flags |= 0 << 5; break; case 2: ups_flags |= 2 << 5; break; case 0: default: ups_flags |= 1 << 5; break; } ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void rtl_green_en(struct r8152 *tp, bool enable) { u16 data; data = sram_read(tp, SRAM_GREEN_CFG); if (enable) data |= GREEN_ETH_EN; else data &= ~GREEN_ETH_EN; sram_write(tp, SRAM_GREEN_CFG, data); tp->ups_info.green = enable; } static void r8153b_green_en(struct r8152 *tp, bool enable) { if (enable) { sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */ sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */ sram_write(tp, 0x805d, 0x0022); /* 1000M short abiq&ldvbias */ } else { sram_write(tp, 0x8045, 0x2444); /* 10M abiq&ldvbias */ sram_write(tp, 0x804d, 0x2444); /* 100M short abiq&ldvbias */ sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */ } rtl_green_en(tp, true); } static u16 r8153_phy_status(struct r8152 *tp, u16 desired) { u16 data; int i; for (i = 0; i < 500; i++) { data = ocp_reg_read(tp, OCP_PHY_STATUS); data &= PHY_STAT_MASK; if (desired) { if (data == desired) break; } else if (data == PHY_STAT_LAN_ON || data == PHY_STAT_PWRDN || data == PHY_STAT_EXT_INIT) { break; } msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } return data; } static void r8153b_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8153b_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; for (i = 0; i < 500; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } } } static void r8153c_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8153b_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_data &= ~BIT(7); ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; for (i = 0; i < 500; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= BIT(8); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } } static void r8156_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8156_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); switch (tp->version) { case RTL_VER_13: case RTL_VER_15: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPHY_XTAL); ocp_data &= ~OOBS_POLLING; ocp_write_byte(tp, MCU_TYPE_USB, USB_UPHY_XTAL, ocp_data); break; default: break; } } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } } } static void r8153_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) ocp_data |= PWR_EN | PHASE2_EN; else ocp_data &= ~(PWR_EN | PHASE2_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } static void r8153b_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) ocp_data |= PWR_EN | PHASE2_EN; else ocp_data &= ~PWR_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } static void r8153_queue_wake(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG); if (enable) ocp_data |= UPCOMING_RUNTIME_D3; else ocp_data &= ~UPCOMING_RUNTIME_D3; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG); ocp_data &= ~LINK_CHG_EVENT; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); ocp_data &= ~LINK_CHANGE_FLAG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } static bool rtl_can_wakeup(struct r8152 *tp) { struct usb_device *udev = tp->udev; return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP); } static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) { if (enable) { u32 ocp_data; __rtl_set_wol(tp, WAKE_ANY); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= LINK_OFF_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } else { u32 ocp_data; __rtl_set_wol(tp, tp->saved_wolopts); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data &= ~LINK_OFF_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } } static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); } else { rtl_runtime_suspend_enable(tp, false); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } r8153_u1u2en(tp, true); } } static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); r8153b_ups_en(tp, true); } else { r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } } static void rtl8153c_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); r8153c_ups_en(tp, true); } else { r8153c_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153b_u1u2en(tp, true); } } static void rtl8156_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); } else { r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_u2p3en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } } static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_14: case RTL_VER_15: default: /* The bit 0 ~ 7 are relative with teredo settings. They are * W1C (write 1 to clear), so set all 1 to disable it. */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff); break; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE); ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); } static void rtl_reset_bmu(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT); ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT; ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); } /* Clear the bp to stop the firmware before loading a new one */ static void rtl_clear_bp(struct r8152 *tp, u16 type) { u16 bp[16] = {0}; u16 bp_num; switch (tp->version) { case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: if (type == MCU_TYPE_USB) { ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0); bp_num = 16; break; } fallthrough; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_byte(tp, type, PLA_BP_EN, 0); fallthrough; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: bp_num = 8; break; case RTL_VER_14: default: ocp_write_word(tp, type, USB_BP2_EN, 0); bp_num = 16; break; } generic_ocp_write(tp, PLA_BP_0, BYTE_EN_DWORD, bp_num << 1, bp, type); /* wait 3 ms to make sure the firmware is stopped */ usleep_range(3000, 6000); ocp_write_word(tp, type, PLA_BP_BA, 0); } static inline void rtl_reset_ocp_base(struct r8152 *tp) { tp->ocp_base = -1; } static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait) { u16 data, check; int i; data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD); if (request) { data |= PATCH_REQUEST; check = 0; } else { data &= ~PATCH_REQUEST; check = PATCH_READY; } ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data); for (i = 0; wait && i < 5000; i++) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; usleep_range(1000, 2000); ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT); if ((ocp_data & PATCH_READY) ^ check) break; } if (request && wait && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) { dev_err(&tp->intf->dev, "PHY patch request fail\n"); rtl_phy_patch_request(tp, false, false); return -ETIME; } else { return 0; } } static void rtl_patch_key_set(struct r8152 *tp, u16 key_addr, u16 patch_key) { if (patch_key && key_addr) { sram_write(tp, key_addr, patch_key); sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK); } else if (key_addr) { u16 data; sram_write(tp, 0x0000, 0x0000); data = ocp_reg_read(tp, OCP_PHY_LOCK); data &= ~PATCH_LOCK; ocp_reg_write(tp, OCP_PHY_LOCK, data); sram_write(tp, key_addr, 0x0000); } else { WARN_ON_ONCE(1); } } static int rtl_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key, bool wait) { if (rtl_phy_patch_request(tp, true, wait)) return -ETIME; rtl_patch_key_set(tp, key_addr, patch_key); return 0; } static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait) { rtl_patch_key_set(tp, key_addr, 0); rtl_phy_patch_request(tp, false, wait); return 0; } static bool rtl8152_is_fw_phy_speed_up_ok(struct r8152 *tp, struct fw_phy_speed_up *phy) { u16 fw_offset; u32 length; bool rc = false; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_14: goto out; case RTL_VER_13: case RTL_VER_15: default: break; } fw_offset = __le16_to_cpu(phy->fw_offset); length = __le32_to_cpu(phy->blk_hdr.length); if (fw_offset < sizeof(*phy) || length <= fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length & 3) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(phy->fw_reg) != 0x9A00) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_ver_ok(struct r8152 *tp, struct fw_phy_ver *ver) { bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } if (__le32_to_cpu(ver->blk_hdr.length) != sizeof(*ver)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(ver->ver.addr) != SRAM_GPHY_FW_VER) { dev_err(&tp->intf->dev, "invalid phy ver addr\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_fixup_ok(struct r8152 *tp, struct fw_phy_fixup *fix) { bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } if (__le32_to_cpu(fix->blk_hdr.length) != sizeof(*fix)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(fix->setting.addr) != OCP_PHY_PATCH_CMD || __le16_to_cpu(fix->setting.data) != BIT(7)) { dev_err(&tp->intf->dev, "invalid phy fixup\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_union_ok(struct r8152 *tp, struct fw_phy_union *phy) { u16 fw_offset; u32 length; bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } fw_offset = __le16_to_cpu(phy->fw_offset); length = __le32_to_cpu(phy->blk_hdr.length); if (fw_offset < sizeof(*phy) || length <= fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length & 1) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (phy->pre_num > 2) { dev_err(&tp->intf->dev, "invalid pre_num %d\n", phy->pre_num); goto out; } if (phy->bp_num > 8) { dev_err(&tp->intf->dev, "invalid bp_num %d\n", phy->bp_num); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy) { u32 length; u16 fw_offset, fw_reg, ba_reg, patch_en_addr, mode_reg, bp_start; bool rc = false; switch (tp->version) { case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: fw_reg = 0xa014; ba_reg = 0xa012; patch_en_addr = 0xa01a; mode_reg = 0xb820; bp_start = 0xa000; break; default: goto out; } fw_offset = __le16_to_cpu(phy->fw_offset); if (fw_offset < sizeof(*phy)) { dev_err(&tp->intf->dev, "fw_offset too small\n"); goto out; } length = __le32_to_cpu(phy->blk_hdr.length); if (length < fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= __le16_to_cpu(phy->fw_offset); if (!length || (length & 1)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(phy->fw_reg) != fw_reg) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } if (__le16_to_cpu(phy->ba_reg) != ba_reg) { dev_err(&tp->intf->dev, "invalid base address register\n"); goto out; } if (__le16_to_cpu(phy->patch_en_addr) != patch_en_addr) { dev_err(&tp->intf->dev, "invalid patch mode enabled register\n"); goto out; } if (__le16_to_cpu(phy->mode_reg) != mode_reg) { dev_err(&tp->intf->dev, "invalid register to switch the mode\n"); goto out; } if (__le16_to_cpu(phy->bp_start) != bp_start) { dev_err(&tp->intf->dev, "invalid start register of break point\n"); goto out; } if (__le16_to_cpu(phy->bp_num) > 4) { dev_err(&tp->intf->dev, "invalid break point number\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) { u16 fw_reg, bp_ba_addr, bp_en_addr, bp_start, fw_offset; bool rc = false; u32 length, type; int i, max_bp; type = __le32_to_cpu(mac->blk_hdr.type); if (type == RTL_FW_PLA) { switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = 0; bp_start = PLA_BP_0; max_bp = 8; break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = PLA_BP_EN; bp_start = PLA_BP_0; max_bp = 8; break; case RTL_VER_14: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = USB_BP2_EN; bp_start = PLA_BP_0; max_bp = 16; break; default: goto out; } } else if (type == RTL_FW_USB) { switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: fw_reg = 0xf800; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP_EN; bp_start = USB_BP_0; max_bp = 8; break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_14: case RTL_VER_15: fw_reg = 0xe600; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP2_EN; bp_start = USB_BP_0; max_bp = 16; break; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: default: goto out; } } else { goto out; } fw_offset = __le16_to_cpu(mac->fw_offset); if (fw_offset < sizeof(*mac)) { dev_err(&tp->intf->dev, "fw_offset too small\n"); goto out; } length = __le32_to_cpu(mac->blk_hdr.length); if (length < fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length < 4 || (length & 3)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(mac->fw_reg) != fw_reg) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } if (__le16_to_cpu(mac->bp_ba_addr) != bp_ba_addr) { dev_err(&tp->intf->dev, "invalid base address register\n"); goto out; } if (__le16_to_cpu(mac->bp_en_addr) != bp_en_addr) { dev_err(&tp->intf->dev, "invalid enabled mask register\n"); goto out; } if (__le16_to_cpu(mac->bp_start) != bp_start) { dev_err(&tp->intf->dev, "invalid start register of break point\n"); goto out; } if (__le16_to_cpu(mac->bp_num) > max_bp) { dev_err(&tp->intf->dev, "invalid break point number\n"); goto out; } for (i = __le16_to_cpu(mac->bp_num); i < max_bp; i++) { if (mac->bp[i]) { dev_err(&tp->intf->dev, "unused bp%u is not zero\n", i); goto out; } } rc = true; out: return rc; } /* Verify the checksum for the firmware file. It is calculated from the version * field to the end of the file. Compare the result with the checksum field to * make sure the file is correct. */ static long rtl8152_fw_verify_checksum(struct r8152 *tp, struct fw_header *fw_hdr, size_t size) { unsigned char checksum[sizeof(fw_hdr->checksum)]; struct crypto_shash *alg; struct shash_desc *sdesc; size_t len; long rc; alg = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(alg)) { rc = PTR_ERR(alg); goto out; } if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) { rc = -EFAULT; dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n", crypto_shash_digestsize(alg)); goto free_shash; } len = sizeof(*sdesc) + crypto_shash_descsize(alg); sdesc = kmalloc(len, GFP_KERNEL); if (!sdesc) { rc = -ENOMEM; goto free_shash; } sdesc->tfm = alg; len = size - sizeof(fw_hdr->checksum); rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum); kfree(sdesc); if (rc) goto free_shash; if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) { dev_err(&tp->intf->dev, "checksum fail\n"); rc = -EFAULT; } free_shash: crypto_free_shash(alg); out: return rc; } static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) { const struct firmware *fw = rtl_fw->fw; struct fw_header *fw_hdr = (struct fw_header *)fw->data; unsigned long fw_flags = 0; long ret = -EFAULT; int i; if (fw->size < sizeof(*fw_hdr)) { dev_err(&tp->intf->dev, "file too small\n"); goto fail; } ret = rtl8152_fw_verify_checksum(tp, fw_hdr, fw->size); if (ret) goto fail; ret = -EFAULT; for (i = sizeof(*fw_hdr); i < fw->size;) { struct fw_block *block = (struct fw_block *)&fw->data[i]; u32 type; if ((i + sizeof(*block)) > fw->size) goto fail; type = __le32_to_cpu(block->type); switch (type) { case RTL_FW_END: if (__le32_to_cpu(block->length) != sizeof(*block)) goto fail; goto fw_end; case RTL_FW_PLA: if (test_bit(FW_FLAGS_PLA, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PLA firmware encountered"); goto fail; } if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check PLA firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_PLA, &fw_flags); break; case RTL_FW_USB: if (test_bit(FW_FLAGS_USB, &fw_flags)) { dev_err(&tp->intf->dev, "multiple USB firmware encountered"); goto fail; } if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check USB firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_USB, &fw_flags); break; case RTL_FW_PHY_START: if (test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_START fail\n"); goto fail; } if (__le32_to_cpu(block->length) != sizeof(struct fw_phy_patch_key)) { dev_err(&tp->intf->dev, "Invalid length for PHY_START\n"); goto fail; } __set_bit(FW_FLAGS_START, &fw_flags); break; case RTL_FW_PHY_STOP: if (test_bit(FW_FLAGS_STOP, &fw_flags) || !test_bit(FW_FLAGS_START, &fw_flags)) { dev_err(&tp->intf->dev, "Check PHY_STOP fail\n"); goto fail; } if (__le32_to_cpu(block->length) != sizeof(*block)) { dev_err(&tp->intf->dev, "Invalid length for PHY_STOP\n"); goto fail; } __set_bit(FW_FLAGS_STOP, &fw_flags); break; case RTL_FW_PHY_NC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_NC fail\n"); goto fail; } if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_nc_ok(tp, (struct fw_phy_nc *)block)) { dev_err(&tp->intf->dev, "check PHY NC firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_NC, &fw_flags); break; case RTL_FW_PHY_UNION_NC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY_UNION_NC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC failed\n"); goto fail; } __set_bit(FW_FLAGS_NC, &fw_flags); break; case RTL_FW_PHY_UNION_NC1: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC1 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC1, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC1 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC1 failed\n"); goto fail; } __set_bit(FW_FLAGS_NC1, &fw_flags); break; case RTL_FW_PHY_UNION_NC2: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC2 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC2, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC2 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC2 failed\n"); goto fail; } __set_bit(FW_FLAGS_NC2, &fw_flags); break; case RTL_FW_PHY_UNION_UC2: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_UC2 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_UC2, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY UC2 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_UC2 failed\n"); goto fail; } __set_bit(FW_FLAGS_UC2, &fw_flags); break; case RTL_FW_PHY_UNION_UC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_UC out of order\n"); goto fail; } if (test_bit(FW_FLAGS_UC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY UC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_UC failed\n"); goto fail; } __set_bit(FW_FLAGS_UC, &fw_flags); break; case RTL_FW_PHY_UNION_MISC: if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check RTL_FW_PHY_UNION_MISC failed\n"); goto fail; } break; case RTL_FW_PHY_FIXUP: if (!rtl8152_is_fw_phy_fixup_ok(tp, (struct fw_phy_fixup *)block)) { dev_err(&tp->intf->dev, "check PHY fixup failed\n"); goto fail; } break; case RTL_FW_PHY_SPEED_UP: if (test_bit(FW_FLAGS_SPEED_UP, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY firmware encountered"); goto fail; } if (!rtl8152_is_fw_phy_speed_up_ok(tp, (struct fw_phy_speed_up *)block)) { dev_err(&tp->intf->dev, "check PHY speed up failed\n"); goto fail; } __set_bit(FW_FLAGS_SPEED_UP, &fw_flags); break; case RTL_FW_PHY_VER: if (test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "Invalid order to set PHY version\n"); goto fail; } if (test_bit(FW_FLAGS_VER, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY version encountered"); goto fail; } if (!rtl8152_is_fw_phy_ver_ok(tp, (struct fw_phy_ver *)block)) { dev_err(&tp->intf->dev, "check PHY version failed\n"); goto fail; } __set_bit(FW_FLAGS_VER, &fw_flags); break; default: dev_warn(&tp->intf->dev, "Unknown type %u is found\n", type); break; } /* next block */ i += ALIGN(__le32_to_cpu(block->length), 8); } fw_end: if (test_bit(FW_FLAGS_START, &fw_flags) && !test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "without PHY_STOP\n"); goto fail; } return 0; fail: return ret; } static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, bool wait) { u32 len; u8 *data; rtl_reset_ocp_base(tp); if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return; } len = __le32_to_cpu(phy->blk_hdr.length); len -= __le16_to_cpu(phy->fw_offset); data = (u8 *)phy + __le16_to_cpu(phy->fw_offset); if (rtl_phy_patch_request(tp, true, wait)) return; while (len) { u32 ocp_data, size; int i; if (len < 2048) size = len; else size = 2048; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL); ocp_data |= GPHY_PATCH_DONE | BACKUP_RESTRORE; ocp_write_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL, ocp_data); generic_ocp_write(tp, __le16_to_cpu(phy->fw_reg), 0xff, size, data, MCU_TYPE_USB); data += size; len -= size; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL); ocp_data |= POL_GPHY_PATCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL, ocp_data); for (i = 0; i < 1000; i++) { if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & POL_GPHY_PATCH)) break; } if (i == 1000) { dev_err(&tp->intf->dev, "ram code speedup mode timeout\n"); break; } } rtl_reset_ocp_base(tp); rtl_phy_patch_request(tp, false, wait); if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version)) dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); else dev_err(&tp->intf->dev, "ram code speedup mode fail\n"); } static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver) { u16 ver_addr, ver; ver_addr = __le16_to_cpu(phy_ver->ver.addr); ver = __le16_to_cpu(phy_ver->ver.data); rtl_reset_ocp_base(tp); if (sram_read(tp, ver_addr) >= ver) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return 0; } sram_write(tp, ver_addr, ver); dev_dbg(&tp->intf->dev, "PHY firmware version %x\n", ver); return ver; } static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix) { u16 addr, data; rtl_reset_ocp_base(tp); addr = __le16_to_cpu(fix->setting.addr); data = ocp_reg_read(tp, addr); switch (__le16_to_cpu(fix->bit_cmd)) { case FW_FIXUP_AND: data &= __le16_to_cpu(fix->setting.data); break; case FW_FIXUP_OR: data |= __le16_to_cpu(fix->setting.data); break; case FW_FIXUP_NOT: data &= ~__le16_to_cpu(fix->setting.data); break; case FW_FIXUP_XOR: data ^= __le16_to_cpu(fix->setting.data); break; default: return; } ocp_reg_write(tp, addr, data); dev_dbg(&tp->intf->dev, "applied ocp %x %x\n", addr, data); } static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *phy) { __le16 *data; u32 length; int i, num; rtl_reset_ocp_base(tp); num = phy->pre_num; for (i = 0; i < num; i++) sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr), __le16_to_cpu(phy->pre_set[i].data)); length = __le32_to_cpu(phy->blk_hdr.length); length -= __le16_to_cpu(phy->fw_offset); num = length / 2; data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); for (i = 0; i < num; i++) ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); num = phy->bp_num; for (i = 0; i < num; i++) sram_write(tp, __le16_to_cpu(phy->bp[i].addr), __le16_to_cpu(phy->bp[i].data)); if (phy->bp_num && phy->bp_en.addr) sram_write(tp, __le16_to_cpu(phy->bp_en.addr), __le16_to_cpu(phy->bp_en.data)); dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); } static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) { u16 mode_reg, bp_index; u32 length, i, num; __le16 *data; rtl_reset_ocp_base(tp); mode_reg = __le16_to_cpu(phy->mode_reg); sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre)); sram_write(tp, __le16_to_cpu(phy->ba_reg), __le16_to_cpu(phy->ba_data)); length = __le32_to_cpu(phy->blk_hdr.length); length -= __le16_to_cpu(phy->fw_offset); num = length / 2; data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); for (i = 0; i < num; i++) ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); sram_write(tp, __le16_to_cpu(phy->patch_en_addr), __le16_to_cpu(phy->patch_en_value)); bp_index = __le16_to_cpu(phy->bp_start); num = __le16_to_cpu(phy->bp_num); for (i = 0; i < num; i++) { sram_write(tp, bp_index, __le16_to_cpu(phy->bp[i])); bp_index += 2; } sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_post)); dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); } static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) { u16 bp_en_addr, type, fw_ver_reg; u32 length; u8 *data; switch (__le32_to_cpu(mac->blk_hdr.type)) { case RTL_FW_PLA: type = MCU_TYPE_PLA; break; case RTL_FW_USB: type = MCU_TYPE_USB; break; default: return; } fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); if (fw_ver_reg && ocp_read_byte(tp, MCU_TYPE_USB, fw_ver_reg) >= mac->fw_ver_data) { dev_dbg(&tp->intf->dev, "%s firmware has been the newest\n", type ? "PLA" : "USB"); return; } rtl_clear_bp(tp, type); /* Enable backup/restore of MACDBG. This is required after clearing PLA * break points and before applying the PLA firmware. */ if (tp->version == RTL_VER_04 && type == MCU_TYPE_PLA && !(ocp_read_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST) & DEBUG_OE)) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_PRE, DEBUG_LTSSM); ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST, DEBUG_LTSSM); } length = __le32_to_cpu(mac->blk_hdr.length); length -= __le16_to_cpu(mac->fw_offset); data = (u8 *)mac; data += __le16_to_cpu(mac->fw_offset); if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data, type) < 0) { dev_err(&tp->intf->dev, "Write %s fw fail\n", type ? "PLA" : "USB"); return; } ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr), __le16_to_cpu(mac->bp_ba_value)); if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4), mac->bp, type) < 0) { dev_err(&tp->intf->dev, "Write %s bp fail\n", type ? "PLA" : "USB"); return; } bp_en_addr = __le16_to_cpu(mac->bp_en_addr); if (bp_en_addr) ocp_write_word(tp, type, bp_en_addr, __le16_to_cpu(mac->bp_en_value)); if (fw_ver_reg) ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg, mac->fw_ver_data); dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info); } static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) { struct rtl_fw *rtl_fw = &tp->rtl_fw; const struct firmware *fw; struct fw_header *fw_hdr; struct fw_phy_patch_key *key; u16 key_addr = 0; int i, patch_phy = 1; if (IS_ERR_OR_NULL(rtl_fw->fw)) return; fw = rtl_fw->fw; fw_hdr = (struct fw_header *)fw->data; if (rtl_fw->pre_fw) rtl_fw->pre_fw(tp); for (i = offsetof(struct fw_header, blocks); i < fw->size;) { struct fw_block *block = (struct fw_block *)&fw->data[i]; switch (__le32_to_cpu(block->type)) { case RTL_FW_END: goto post_fw; case RTL_FW_PLA: case RTL_FW_USB: rtl8152_fw_mac_apply(tp, (struct fw_mac *)block); break; case RTL_FW_PHY_START: if (!patch_phy) break; key = (struct fw_phy_patch_key *)block; key_addr = __le16_to_cpu(key->key_reg); rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut); break; case RTL_FW_PHY_STOP: if (!patch_phy) break; WARN_ON(!key_addr); rtl_post_ram_code(tp, key_addr, !power_cut); break; case RTL_FW_PHY_NC: rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block); break; case RTL_FW_PHY_VER: patch_phy = rtl8152_fw_phy_ver(tp, (struct fw_phy_ver *)block); break; case RTL_FW_PHY_UNION_NC: case RTL_FW_PHY_UNION_NC1: case RTL_FW_PHY_UNION_NC2: case RTL_FW_PHY_UNION_UC2: case RTL_FW_PHY_UNION_UC: case RTL_FW_PHY_UNION_MISC: if (patch_phy) rtl8152_fw_phy_union_apply(tp, (struct fw_phy_union *)block); break; case RTL_FW_PHY_FIXUP: if (patch_phy) rtl8152_fw_phy_fixup(tp, (struct fw_phy_fixup *)block); break; case RTL_FW_PHY_SPEED_UP: rtl_ram_code_speed_up(tp, (struct fw_phy_speed_up *)block, !power_cut); break; default: break; } i += ALIGN(__le32_to_cpu(block->length), 8); } post_fw: if (rtl_fw->post_fw) rtl_fw->post_fw(tp); rtl_reset_ocp_base(tp); strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE); dev_dbg(&tp->intf->dev, "load %s successfully\n", rtl_fw->version); } static void rtl8152_release_firmware(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; if (!IS_ERR_OR_NULL(rtl_fw->fw)) { release_firmware(rtl_fw->fw); rtl_fw->fw = NULL; } } static int rtl8152_request_firmware(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; long rc; if (rtl_fw->fw || !rtl_fw->fw_name) { dev_info(&tp->intf->dev, "skip request firmware\n"); rc = 0; goto result; } rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, &tp->intf->dev); if (rc < 0) goto result; rc = rtl8152_check_firmware(tp, rtl_fw); if (rc < 0) release_firmware(rtl_fw->fw); result: if (rc) { rtl_fw->fw = ERR_PTR(rc); dev_warn(&tp->intf->dev, "unable to load firmware patch %s (%ld)\n", rtl_fw->fw_name, rc); } return rc; } static void r8152_aldps_en(struct r8152 *tp, bool enable) { if (enable) { ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | LINKENA | DIS_SDSAVE); } else { ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE); msleep(20); } } static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) { ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); ocp_reg_write(tp, OCP_EEE_DATA, reg); ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); } static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) { u16 data; r8152_mmd_indirect(tp, dev, reg); data = ocp_reg_read(tp, OCP_EEE_DATA); ocp_reg_write(tp, OCP_EEE_AR, 0x0000); return data; } static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) { r8152_mmd_indirect(tp, dev, reg); ocp_reg_write(tp, OCP_EEE_DATA, data); ocp_reg_write(tp, OCP_EEE_AR, 0x0000); } static void r8152_eee_en(struct r8152 *tp, bool enable) { u16 config1, config2, config3; u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; if (enable) { ocp_data |= EEE_RX_EN | EEE_TX_EN; config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; config1 |= sd_rise_time(1); config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; config3 |= fast_snr(42); } else { ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN); config1 |= sd_rise_time(7); config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); config3 |= fast_snr(511); } ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); } static void r8153_eee_en(struct r8152 *tp, bool enable) { u32 ocp_data; u16 config; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); config = ocp_reg_read(tp, OCP_EEE_CFG); if (enable) { ocp_data |= EEE_RX_EN | EEE_TX_EN; config |= EEE10_EN; } else { ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); config &= ~EEE10_EN; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); ocp_reg_write(tp, OCP_EEE_CFG, config); tp->ups_info.eee = enable; } static void r8156_eee_en(struct r8152 *tp, bool enable) { u16 config; r8153_eee_en(tp, enable); config = ocp_reg_read(tp, OCP_EEE_ADV2); if (enable) config |= MDIO_EEE_2_5GT; else config &= ~MDIO_EEE_2_5GT; ocp_reg_write(tp, OCP_EEE_ADV2, config); } static void rtl_eee_enable(struct r8152 *tp, bool enable) { switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: if (enable) { r8152_eee_en(tp, true); r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, tp->eee_adv); } else { r8152_eee_en(tp, false); r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); } break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: if (enable) { r8153_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); } else { r8153_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: if (enable) { r8156_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); } else { r8156_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; default: break; } } static void r8152b_enable_fc(struct r8152 *tp) { u16 anar; anar = r8152_mdio_read(tp, MII_ADVERTISE); anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; r8152_mdio_write(tp, MII_ADVERTISE, anar); tp->ups_info.flow_control = true; } static void rtl8152_disable(struct r8152 *tp) { r8152_aldps_en(tp, false); rtl_disable(tp); r8152_aldps_en(tp, true); } static void r8152b_hw_phy_cfg(struct r8152 *tp) { rtl8152_apply_firmware(tp, false); rtl_eee_enable(tp, tp->eee_en); r8152_aldps_en(tp, true); r8152b_enable_fc(tp); set_bit(PHY_RESET, &tp->flags); } static void wait_oob_link_list_ready(struct r8152 *tp) { u32 ocp_data; int i; for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; usleep_range(1000, 2000); } } static void r8156b_wait_loading_flash(struct r8152 *tp) { if ((ocp_read_word(tp, MCU_TYPE_PLA, PLA_GPHY_CTRL) & GPHY_FLASH) && !(ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & BYPASS_FLASH)) { int i; for (i = 0; i < 100; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE) break; usleep_range(1000, 2000); } } } static void r8152b_exit_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); if (tp->udev->speed == USB_SPEED_FULL || tp->udev->speed == USB_SPEED_LOW) { /* rx share fifo credit near full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_FULL); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_FULL); } else { /* rx share fifo credit near full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_HIGH); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_HIGH); } /* TX share fifo free credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH); ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA, TEST_MODE_DISABLE | TX_SIZE_ADJUST1); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data); } static void r8152b_enter_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); rtl_disable(tp); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } static int r8153_pre_firmware_1(struct r8152 *tp) { int i; /* Wait till the WTD timer is ready. It would take at most 104 ms. */ for (i = 0; i < 104; i++) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (!(ocp_data & WTD1_EN)) break; usleep_range(1000, 2000); } return 0; } static int r8153_post_firmware_1(struct r8152 *tp) { /* set USB_BP_4 to support USB_SPEED_SUPER only */ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, BP4_SUPER_ONLY); /* reset UPHY timer to 36 ms */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16); return 0; } static int r8153_pre_firmware_2(struct r8152 *tp) { u32 ocp_data; r8153_pre_firmware_1(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0); ocp_data &= ~FW_FIX_SUSPEND; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data); return 0; } static int r8153_post_firmware_2(struct r8152 *tp) { u32 ocp_data; /* enable bp0 if support USB_SPEED_SUPER only */ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN); ocp_data |= BIT(0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data); } /* reset UPHY timer to 36 ms */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16); /* enable U3P3 check, set the counter to 4 */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, U3P3_CHECK_EN | 4); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0); ocp_data |= FW_FIX_SUSPEND; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); return 0; } static int r8153_post_firmware_3(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); return 0; } static int r8153b_pre_firmware_1(struct r8152 *tp) { /* enable fc timer and set timer to 1 second. */ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, CTRL_TIMER_EN | (1000 / 8)); return 0; } static int r8153b_post_firmware_1(struct r8152 *tp) { u32 ocp_data; /* enable bp0 for RTL8153-BND */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); if (ocp_data & BND_MASK) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN); ocp_data |= BIT(0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ocp_data |= FLOW_CTRL_PATCH_OPT; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); return 0; } static int r8153c_post_firmware_1(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ocp_data |= FLOW_CTRL_PATCH_2; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return 0; } static int r8156a_post_firmware_1(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); /* Modify U3PHY parameter for compatibility issue */ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4026840e); ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4001acc9); return 0; } static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; data = ocp_reg_read(tp, OCP_POWER_CFG); if (enable) { data |= EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); } else { int i; data &= ~EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); for (i = 0; i < 20; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; usleep_range(1000, 2000); if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100) break; } } tp->ups_info.aldps = enable; } static void r8153_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); rtl8152_apply_firmware(tp, false); if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; ocp_reg_write(tp, OCP_EEE_CFG, data); } data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_10M_BGOFF; ocp_reg_write(tp, OCP_DOWN_SPEED, data); data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EN_10M_PLLOFF; ocp_reg_write(tp, OCP_POWER_CFG, data); sram_write(tp, SRAM_IMPEDANCE, 0x0b13); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); /* Enable LPF corner auto tune */ sram_write(tp, SRAM_LPF_CFG, 0xf70f); /* Adjust 10M Amplitude */ sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208); if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } set_bit(PHY_RESET, &tp->flags); } static u32 r8152_efuse_read(struct r8152 *tp, u8 addr) { u32 ocp_data; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD, EFUSE_READ_CMD | addr); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD); ocp_data = (ocp_data & EFUSE_DATA_BIT16) << 9; /* data of bit16 */ ocp_data |= ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_DATA); return ocp_data; } static void r8153b_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_PWRDN: case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = r8152_mdio_read(tp, MII_BMCR); data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); break; case PHY_STAT_LAN_ON: default: rtl8152_apply_firmware(tp, false); break; } r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = sram_read(tp, SRAM_GREEN_CFG); data |= R_TUNE_EN; sram_write(tp, SRAM_GREEN_CFG, data); data = ocp_reg_read(tp, OCP_NCTL_CFG); data |= PGA_RETURN_EN; ocp_reg_write(tp, OCP_NCTL_CFG, data); /* ADC Bias Calibration: * read efuse offset 0x7d to get a 17-bit data. Remove the dummy/fake * bit (bit3) to rebuild the real 16-bit data. Write the data to the * ADC ioffset. */ ocp_data = r8152_efuse_read(tp, 0x7d); data = (u16)(((ocp_data & 0x1fff0) >> 1) | (ocp_data & 0x7)); if (data != 0xffff) ocp_reg_write(tp, OCP_ADC_IOFFSET, data); /* ups mode tx-link-pulse timing adjustment: * rg_saw_cnt = OCP reg 0xC426 Bit[13:0] * swr_cnt_1ms_ini = 16000000 / rg_saw_cnt */ ocp_data = ocp_reg_read(tp, 0xc426); ocp_data &= 0x3fff; if (ocp_data) { u32 swr_cnt_1ms_ini; swr_cnt_1ms_ini = (16000000 / ocp_data) & SAW_CNT_1MS_MASK; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG); ocp_data = (ocp_data & ~SAW_CNT_1MS_MASK) | swr_cnt_1ms_ini; ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CFG, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); /* Advnace EEE */ if (!rtl_phy_patch_request(tp, true, true)) { data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = true; data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_EEE_CMODE | EN_EEE_1000 | EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info.eee_cmod_lv = true; tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_giga = true; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5)); tp->ups_info._250m_ckdiv = true; rtl_phy_patch_request(tp, false, true); } if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); set_bit(PHY_RESET, &tp->flags); } static void r8153c_hw_phy_cfg(struct r8152 *tp) { r8153b_hw_phy_cfg(tp); tp->ups_info.r_tune = true; } static void rtl8153_change_mtu(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); } static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8153_change_mtu(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); /* TX share fifo free credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); } static void r8153_enter_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); /* RX FIFO settings for OOB */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); rtl_disable(tp); rtl_reset_bmu(tp); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~TEREDO_WAKE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); break; default: break; } rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } static void rtl8153_disable(struct r8152 *tp) { r8153_aldps_en(tp, false); rtl_disable(tp); rtl_reset_bmu(tp); r8153_aldps_en(tp, true); } static u32 fc_pause_on_auto(struct r8152 *tp) { return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); } static u32 fc_pause_off_auto(struct r8152 *tp) { return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); } static void r8156_fc_parameter(struct r8152 *tp) { u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); } static int rtl8156_enable(struct r8152 *tp) { u32 ocp_data; u16 speed; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; r8156_fc_parameter(tp); set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); speed = rtl8152_get_speed(tp); rtl_set_ifg(tp, speed); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); if (speed & _2500bps) ocp_data &= ~IDLE_SPDWN_EN; else ocp_data |= IDLE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); if (speed & _1000bps) ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x11); else if (speed & _500bps) ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x3d); if (tp->udev->speed == USB_SPEED_HIGH) { /* USB 0xb45e[3:0] l1_nyet_hird */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ocp_data &= ~0xf; if (is_flow_control(speed)) ocp_data |= 0xf; else ocp_data |= 0x1; ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return rtl_enable(tp); } static void rtl8156_disable(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 0); rtl8153_disable(tp); } static int rtl8156b_enable(struct r8152 *tp) { u32 ocp_data; u16 speed; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM); ocp_data &= ~RX_AGGR_NUM_MASK; ocp_write_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM, ocp_data); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); speed = rtl8152_get_speed(tp); rtl_set_ifg(tp, speed); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); if (speed & _2500bps) ocp_data &= ~IDLE_SPDWN_EN; else ocp_data |= IDLE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); if (tp->udev->speed == USB_SPEED_HIGH) { ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ocp_data &= ~0xf; if (is_flow_control(speed)) ocp_data |= 0xf; else ocp_data |= 0x1; ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return rtl_enable(tp); } static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising) { u16 bmcr; int ret = 0; if (autoneg == AUTONEG_DISABLE) { if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) return -EINVAL; switch (speed) { case SPEED_10: bmcr = BMCR_SPEED10; if (duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; tp->ups_info.speed_duplex = FORCE_10M_FULL; } else { tp->ups_info.speed_duplex = FORCE_10M_HALF; } break; case SPEED_100: bmcr = BMCR_SPEED100; if (duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; tp->ups_info.speed_duplex = FORCE_100M_FULL; } else { tp->ups_info.speed_duplex = FORCE_100M_HALF; } break; case SPEED_1000: if (tp->mii.supports_gmii) { bmcr = BMCR_SPEED1000 | BMCR_FULLDPLX; tp->ups_info.speed_duplex = NWAY_1000M_FULL; break; } fallthrough; default: ret = -EINVAL; goto out; } if (duplex == DUPLEX_FULL) tp->mii.full_duplex = 1; else tp->mii.full_duplex = 0; tp->mii.force_media = 1; } else { u16 orig, new1; u32 support; support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { support |= RTL_ADVERTISED_1000_FULL; if (tp->support_2500full) support |= RTL_ADVERTISED_2500_FULL; } if (!(advertising & support)) return -EINVAL; orig = r8152_mdio_read(tp, MII_ADVERTISE); new1 = orig & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); if (advertising & RTL_ADVERTISED_10_HALF) { new1 |= ADVERTISE_10HALF; tp->ups_info.speed_duplex = NWAY_10M_HALF; } if (advertising & RTL_ADVERTISED_10_FULL) { new1 |= ADVERTISE_10FULL; tp->ups_info.speed_duplex = NWAY_10M_FULL; } if (advertising & RTL_ADVERTISED_100_HALF) { new1 |= ADVERTISE_100HALF; tp->ups_info.speed_duplex = NWAY_100M_HALF; } if (advertising & RTL_ADVERTISED_100_FULL) { new1 |= ADVERTISE_100FULL; tp->ups_info.speed_duplex = NWAY_100M_FULL; } if (orig != new1) { r8152_mdio_write(tp, MII_ADVERTISE, new1); tp->mii.advertising = new1; } if (tp->mii.supports_gmii) { orig = r8152_mdio_read(tp, MII_CTRL1000); new1 = orig & ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (advertising & RTL_ADVERTISED_1000_FULL) { new1 |= ADVERTISE_1000FULL; tp->ups_info.speed_duplex = NWAY_1000M_FULL; } if (orig != new1) r8152_mdio_write(tp, MII_CTRL1000, new1); } if (tp->support_2500full) { orig = ocp_reg_read(tp, OCP_10GBT_CTRL); new1 = orig & ~MDIO_AN_10GBT_CTRL_ADV2_5G; if (advertising & RTL_ADVERTISED_2500_FULL) { new1 |= MDIO_AN_10GBT_CTRL_ADV2_5G; tp->ups_info.speed_duplex = NWAY_2500M_FULL; } if (orig != new1) ocp_reg_write(tp, OCP_10GBT_CTRL, new1); } bmcr = BMCR_ANENABLE | BMCR_ANRESTART; tp->mii.force_media = 0; } if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET; r8152_mdio_write(tp, MII_BMCR, bmcr); if (bmcr & BMCR_RESET) { int i; for (i = 0; i < 50; i++) { msleep(20); if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) break; } } out: return ret; } static void rtl8152_up(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8152_aldps_en(tp, false); r8152b_exit_oob(tp); r8152_aldps_en(tp, true); } static void rtl8152_down(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } r8152_power_cut_en(tp, false); r8152_aldps_en(tp, false); r8152b_enter_oob(tp); r8152_aldps_en(tp, true); } static void rtl8153_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data |= LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); ocp_data &= ~LANWAKE_PIN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1); ocp_data &= ~DELAY_PHY_PWR_CHG; ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data); r8153_aldps_en(tp, true); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } r8153_u1u2en(tp, true); } static void rtl8153_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data &= ~LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); r8153_aldps_en(tp, false); r8153_enter_oob(tp); r8153_aldps_en(tp, true); } static void rtl8153b_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153_aldps_en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data |= PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); r8153_aldps_en(tp, false); r8153_enter_oob(tp); r8153_aldps_en(tp, true); } static void rtl8153c_change_mtu(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, 10 * 1024 / 64); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); /* Adjust the tx fifo free credit full threshold, otherwise * the fifo would be too small to send a jumbo frame packet. */ if (tp->netdev->mtu < 8000) ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 2048 / 8); else ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 900 / 8); } static void rtl8153c_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8153c_change_mtu(tp); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, 0x02); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 0x08); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= BIT(8); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153_aldps_en(tp, true); r8153b_u1u2en(tp, true); } static void rtl8156_change_mtu(struct r8152 *tp) { u32 rx_max_size = mtu_to_size(tp->netdev->mtu); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rx_max_size); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); r8156_fc_parameter(tp); /* TX share fifo free credit full threshold */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, ALIGN(rx_max_size + sizeof(struct tx_desc), 1024) / 16); } static void rtl8156_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8156_change_mtu(tp); switch (tp->version) { case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ocp_data |= ACT_ODMA; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); break; default: break; } /* share FIFO settings */ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL); ocp_data &= ~RXFIFO_FULL_MASK; ocp_data |= 0x08; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION); ocp_data &= ~(RG_PWRDN_EN | ALL_SPEED_OFF); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, ocp_data); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400); if (tp->saved_wolopts != __rtl_get_wol(tp)) { netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n"); __rtl_set_wol(tp, tp->saved_wolopts); } r8153_aldps_en(tp, true); r8153_u2p3en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } static void rtl8156_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data |= PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); r8153_aldps_en(tp, false); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); /* RX FIFO settings for OOB */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16); rtl_disable(tp); rtl_reset_bmu(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rtl_rx_vlan_en(tp, true); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); r8153_aldps_en(tp, true); } static bool rtl8152_in_nway(struct r8152 *tp) { u16 nway_state; ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000); tp->ocp_base = 0x2000; ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c); /* phy state */ nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a); /* bit 15: TXDIS_STATE, bit 14: ABD_STATE */ if (nway_state & 0xc000) return false; else return true; } static bool rtl8153_in_nway(struct r8152 *tp) { u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff; if (phy_state == TXDIS_STATE || phy_state == ABD_STATE) return false; else return true; } static void r8156_mdio_force_mode(struct r8152 *tp) { u16 data; /* Select force mode through 0xa5b4 bit 15 * 0: MDIO force mode * 1: MMD force mode */ data = ocp_reg_read(tp, 0xa5b4); if (data & BIT(15)) { data &= ~BIT(15); ocp_reg_write(tp, 0xa5b4, data); } } static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct napi_struct *napi = &tp->napi; u16 speed; speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); netif_stop_queue(netdev); napi_disable(napi); netif_carrier_on(netdev); rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); napi_enable(napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); } else if (netif_queue_stopped(netdev) && skb_queue_len(&tp->tx_queue) < tp->tx_qlen) { netif_wake_queue(netdev); } } else { if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); tasklet_disable(&tp->tx_tl); napi_disable(napi); tp->rtl_ops.disable(tp); napi_enable(napi); tasklet_enable(&tp->tx_tl); netif_info(tp, link, netdev, "carrier off\n"); } } } static void rtl_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, schedule.work); /* If the device is unplugged or !netif_running(), the workqueue * doesn't need to wake the device, and could return directly. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev)) return; if (usb_autopm_get_interface(tp->intf) < 0) return; if (!test_bit(WORK_ENABLE, &tp->flags)) goto out1; if (!mutex_trylock(&tp->control)) { schedule_delayed_work(&tp->schedule, 0); goto out1; } if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags)) set_carrier(tp); if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev); /* don't schedule tasket before linking */ if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) && netif_carrier_ok(tp->netdev)) tasklet_schedule(&tp->tx_tl); if (test_and_clear_bit(RX_EPROTO, &tp->flags) && !list_empty(&tp->rx_done)) napi_schedule(&tp->napi); mutex_unlock(&tp->control); out1: usb_autopm_put_interface(tp->intf); } static void rtl_hw_phy_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (usb_autopm_get_interface(tp->intf) < 0) return; mutex_lock(&tp->control); if (rtl8152_request_firmware(tp) == -ENODEV && tp->rtl_fw.retry) { tp->rtl_fw.retry = false; tp->rtl_fw.fw = NULL; /* Delay execution in case request_firmware() is not ready yet. */ queue_delayed_work(system_long_wq, &tp->hw_phy_work, HZ * 10); goto ignore_once; } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); ignore_once: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); } #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct r8152 *tp = container_of(nb, struct r8152, pm_notifier); switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: usb_autopm_get_interface(tp->intf); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: usb_autopm_put_interface(tp->intf); break; case PM_POST_RESTORE: case PM_RESTORE_PREPARE: default: break; } return NOTIFY_DONE; } #endif static int rtl8152_open(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); int res = 0; if (work_busy(&tp->hw_phy_work.work) & WORK_BUSY_PENDING) { cancel_delayed_work_sync(&tp->hw_phy_work); rtl_hw_phy_work_func_t(&tp->hw_phy_work.work); } res = alloc_all_mem(tp); if (res) goto out; res = usb_autopm_get_interface(tp->intf); if (res < 0) goto out_free; mutex_lock(&tp->control); tp->rtl_ops.up(tp); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (res) { if (res == -ENODEV) netif_device_detach(tp->netdev); netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", res); goto out_unlock; } napi_enable(&tp->napi); tasklet_enable(&tp->tx_tl); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); #ifdef CONFIG_PM_SLEEP tp->pm_notifier.notifier_call = rtl_notifier; register_pm_notifier(&tp->pm_notifier); #endif return 0; out_unlock: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out_free: free_all_mem(tp); out: return res; } static int rtl8152_close(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); int res = 0; #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif tasklet_disable(&tp->tx_tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); napi_disable(&tp->napi); netif_stop_queue(netdev); res = usb_autopm_get_interface(tp->intf); if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); rtl_stop_rx(tp); } else { mutex_lock(&tp->control); tp->rtl_ops.down(tp); mutex_unlock(&tp->control); } if (!res) usb_autopm_put_interface(tp->intf); free_all_mem(tp); return res; } static void rtl_tally_reset(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY); ocp_data |= TALLY_RESET; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data); } static void r8152b_init(struct r8152 *tp) { u32 ocp_data; u16 data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } r8152_aldps_en(tp, false); if (tp->version == RTL_VER_01) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); } r8152_power_cut_en(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL); ocp_data &= ~MCU_CLK_RATIO_MASK; ocp_data |= MCU_CLK_RATIO | D3_CLK_GATED_EN; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ocp_data); ocp_data = GPHY_STS_MSK | SPEED_DOWN_MSK | SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); rtl_tally_reset(tp); /* enable rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } static void r8153_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } data = r8153_phy_status(tp, 0); if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || tp->version == RTL_VER_05) ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); if (tp->version == RTL_VER_04) { ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2); ocp_data &= ~pwd_dn_scale_mask; ocp_data |= pwd_dn_scale(96); ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); } else if (tp->version == RTL_VER_05) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0); ocp_data &= ~ECM_ALDPS; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) ocp_data &= ~DYNAMIC_BURST; else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); } else if (tp->version == RTL_VER_06) { ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) ocp_data &= ~DYNAMIC_BURST; else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); r8153_queue_wake(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); ocp_data |= EP4_FULL_FC; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL); ocp_data &= ~TIMER11_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM; if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER) ocp_data |= LPM_TIMER_500MS; else ocp_data |= LPM_TIMER_500US; ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2); ocp_data &= ~SEN_VAL_MASK; ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE; ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); r8153_power_cut_en(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_mac_clk_speed_down(tp, false); r8153_u1u2en(tp, true); usb_enable_lpm(tp->udev); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data |= LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); ocp_data &= ~LANWAKE_PIN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); if (tp->dell_tb_rx_agg_bug) ocp_data |= RX_AGG_DISABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); switch (tp->udev->speed) { case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: tp->coalesce = COALESCE_SUPER; break; case USB_SPEED_HIGH: tp->coalesce = COALESCE_HIGH; break; default: tp->coalesce = COALESCE_SLOW; break; } } static void r8153b_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } data = r8153_phy_status(tp, 0); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); r8153b_power_cut_en(tp, false); r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); if (tp->version == RTL_VER_09) { /* Disable Test IO for 32QFN */ if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= TEST_IO_OFF; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); } } set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8153c_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); /* Disable spi_en */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); ocp_data &= ~BIT(3); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcbf0); ocp_data |= BIT(1); ocp_write_word(tp, MCU_TYPE_USB, 0xcbf0, ocp_data); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); r8153b_power_cut_en(tp, false); r8153c_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~BIT(7); ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8156_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); break; case PHY_STAT_LAN_ON: case PHY_STAT_PWRDN: default: rtl8152_apply_firmware(tp, false); break; } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); switch (tp->version) { case RTL_VER_10: data = ocp_reg_read(tp, 0xad40); data &= ~0x3ff; data |= BIT(7) | BIT(2); ocp_reg_write(tp, 0xad40, data); data = ocp_reg_read(tp, 0xad4e); data |= BIT(4); ocp_reg_write(tp, 0xad4e, data); data = ocp_reg_read(tp, 0xad16); data &= ~0x3ff; data |= 0x6; ocp_reg_write(tp, 0xad16, data); data = ocp_reg_read(tp, 0xad32); data &= ~0x3f; data |= 6; ocp_reg_write(tp, 0xad32, data); data = ocp_reg_read(tp, 0xac08); data &= ~(BIT(12) | BIT(8)); ocp_reg_write(tp, 0xac08, data); data = ocp_reg_read(tp, 0xac8a); data |= BIT(12) | BIT(13) | BIT(14); data &= ~BIT(15); ocp_reg_write(tp, 0xac8a, data); data = ocp_reg_read(tp, 0xad18); data |= BIT(10); ocp_reg_write(tp, 0xad18, data); data = ocp_reg_read(tp, 0xad1a); data |= 0x3ff; ocp_reg_write(tp, 0xad1a, data); data = ocp_reg_read(tp, 0xad1c); data |= 0x3ff; ocp_reg_write(tp, 0xad1c, data); data = sram_read(tp, 0x80ea); data &= ~0xff00; data |= 0xc400; sram_write(tp, 0x80ea, data); data = sram_read(tp, 0x80eb); data &= ~0x0700; data |= 0x0300; sram_write(tp, 0x80eb, data); data = sram_read(tp, 0x80f8); data &= ~0xff00; data |= 0x1c00; sram_write(tp, 0x80f8, data); data = sram_read(tp, 0x80f1); data &= ~0xff00; data |= 0x3000; sram_write(tp, 0x80f1, data); data = sram_read(tp, 0x80fe); data &= ~0xff00; data |= 0xa500; sram_write(tp, 0x80fe, data); data = sram_read(tp, 0x8102); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x8102, data); data = sram_read(tp, 0x8015); data &= ~0xff00; data |= 0x3300; sram_write(tp, 0x8015, data); data = sram_read(tp, 0x8100); data &= ~0xff00; data |= 0x7000; sram_write(tp, 0x8100, data); data = sram_read(tp, 0x8014); data &= ~0xff00; data |= 0xf000; sram_write(tp, 0x8014, data); data = sram_read(tp, 0x8016); data &= ~0xff00; data |= 0x6500; sram_write(tp, 0x8016, data); data = sram_read(tp, 0x80dc); data &= ~0xff00; data |= 0xed00; sram_write(tp, 0x80dc, data); data = sram_read(tp, 0x80df); data |= BIT(8); sram_write(tp, 0x80df, data); data = sram_read(tp, 0x80e1); data &= ~BIT(8); sram_write(tp, 0x80e1, data); data = ocp_reg_read(tp, 0xbf06); data &= ~0x003f; data |= 0x0038; ocp_reg_write(tp, 0xbf06, data); sram_write(tp, 0x819f, 0xddb6); ocp_reg_write(tp, 0xbc34, 0x5555); data = ocp_reg_read(tp, 0xbf0a); data &= ~0x0e00; data |= 0x0a00; ocp_reg_write(tp, 0xbf0a, data); data = ocp_reg_read(tp, 0xbd2c); data &= ~BIT(13); ocp_reg_write(tp, 0xbd2c, data); break; case RTL_VER_11: data = ocp_reg_read(tp, 0xad16); data |= 0x3ff; ocp_reg_write(tp, 0xad16, data); data = ocp_reg_read(tp, 0xad32); data &= ~0x3f; data |= 6; ocp_reg_write(tp, 0xad32, data); data = ocp_reg_read(tp, 0xac08); data &= ~(BIT(12) | BIT(8)); ocp_reg_write(tp, 0xac08, data); data = ocp_reg_read(tp, 0xacc0); data &= ~0x3; data |= BIT(1); ocp_reg_write(tp, 0xacc0, data); data = ocp_reg_read(tp, 0xad40); data &= ~0xe7; data |= BIT(6) | BIT(2); ocp_reg_write(tp, 0xad40, data); data = ocp_reg_read(tp, 0xac14); data &= ~BIT(7); ocp_reg_write(tp, 0xac14, data); data = ocp_reg_read(tp, 0xac80); data &= ~(BIT(8) | BIT(9)); ocp_reg_write(tp, 0xac80, data); data = ocp_reg_read(tp, 0xac5e); data &= ~0x7; data |= BIT(1); ocp_reg_write(tp, 0xac5e, data); ocp_reg_write(tp, 0xad4c, 0x00a8); ocp_reg_write(tp, 0xac5c, 0x01ff); data = ocp_reg_read(tp, 0xac8a); data &= ~0xf0; data |= BIT(4) | BIT(5); ocp_reg_write(tp, 0xac8a, data); ocp_reg_write(tp, 0xb87c, 0x8157); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0500; ocp_reg_write(tp, 0xb87e, data); ocp_reg_write(tp, 0xb87c, 0x8159); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0700; ocp_reg_write(tp, 0xb87e, data); /* AAGC */ ocp_reg_write(tp, 0xb87c, 0x80a2); ocp_reg_write(tp, 0xb87e, 0x0153); ocp_reg_write(tp, 0xb87c, 0x809c); ocp_reg_write(tp, 0xb87e, 0x0153); /* EEE parameter */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS_2P5G, 0x0056); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_USB_CFG); ocp_data |= EN_XG_LIP | EN_G_LIP; ocp_write_word(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); sram_write(tp, 0x8257, 0x020f); /* XG PLL */ sram_write(tp, 0x80ea, 0x7843); /* GIGA Master */ if (rtl_phy_patch_request(tp, true, true)) return; /* Advance EEE */ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= EEE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data &= ~(EN_EEE_100 | EN_EEE_1000); data |= EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_100 = false; tp->ups_info.eee_plloff_giga = false; data = ocp_reg_read(tp, OCP_POWER_CFG); data &= ~EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = false; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, sysclk_div_expo(5)); tp->ups_info._250m_ckdiv = false; rtl_phy_patch_request(tp, false, true); /* enable ADC Ibias Cal */ data = ocp_reg_read(tp, 0xd068); data |= BIT(13); ocp_reg_write(tp, 0xd068, data); /* enable Thermal Sensor */ data = sram_read(tp, 0x81a2); data &= ~BIT(8); sram_write(tp, 0x81a2, data); data = ocp_reg_read(tp, 0xb54c); data &= ~0xff00; data |= 0xdb00; ocp_reg_write(tp, 0xb54c, data); /* Nway 2.5G Lite */ data = ocp_reg_read(tp, 0xa454); data &= ~BIT(0); ocp_reg_write(tp, 0xa454, data); /* CS DSP solution */ data = ocp_reg_read(tp, OCP_10GBT_CTRL); data |= RTL_ADV2_5G_F_R; ocp_reg_write(tp, OCP_10GBT_CTRL, data); data = ocp_reg_read(tp, 0xad4e); data &= ~BIT(4); ocp_reg_write(tp, 0xad4e, data); data = ocp_reg_read(tp, 0xa86a); data &= ~BIT(0); ocp_reg_write(tp, 0xa86a, data); /* MDI SWAP */ if ((ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG) & MID_REVERSE) && (ocp_reg_read(tp, 0xd068) & BIT(1))) { u16 swap_a, swap_b; data = ocp_reg_read(tp, 0xd068); data &= ~0x1f; data |= 0x1; /* p0 */ ocp_reg_write(tp, 0xd068, data); swap_a = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x18; /* p3 */ ocp_reg_write(tp, 0xd068, data); swap_b = ocp_reg_read(tp, 0xd06a); data &= ~0x18; /* p0 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_a & ~0x7ff) | (swap_b & 0x7ff)); data |= 0x18; /* p3 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_b & ~0x7ff) | (swap_a & 0x7ff)); data &= ~0x18; data |= 0x08; /* p1 */ ocp_reg_write(tp, 0xd068, data); swap_a = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x10; /* p2 */ ocp_reg_write(tp, 0xd068, data); swap_b = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x08; /* p1 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_a & ~0x7ff) | (swap_b & 0x7ff)); data &= ~0x18; data |= 0x10; /* p2 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_b & ~0x7ff) | (swap_a & 0x7ff)); swap_a = ocp_reg_read(tp, 0xbd5a); swap_b = ocp_reg_read(tp, 0xbd5c); ocp_reg_write(tp, 0xbd5a, (swap_a & ~0x1f1f) | ((swap_b & 0x1f) << 8) | ((swap_b >> 8) & 0x1f)); ocp_reg_write(tp, 0xbd5c, (swap_b & ~0x1f1f) | ((swap_a & 0x1f) << 8) | ((swap_a >> 8) & 0x1f)); swap_a = ocp_reg_read(tp, 0xbc18); swap_b = ocp_reg_read(tp, 0xbc1a); ocp_reg_write(tp, 0xbc18, (swap_a & ~0x1f1f) | ((swap_b & 0x1f) << 8) | ((swap_b >> 8) & 0x1f)); ocp_reg_write(tp, 0xbc1a, (swap_b & ~0x1f1f) | ((swap_a & 0x1f) << 8) | ((swap_a >> 8) & 0x1f)); } /* Notify the MAC when the speed is changed to force mode. */ data = ocp_reg_read(tp, OCP_INTR_EN); data |= INTR_SPEED_FORCE; ocp_reg_write(tp, OCP_INTR_EN, data); break; default: break; } rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = ocp_reg_read(tp, 0xa428); data &= ~BIT(9); ocp_reg_write(tp, 0xa428, data); data = ocp_reg_read(tp, 0xa5ea); data &= ~BIT(0); ocp_reg_write(tp, 0xa5ea, data); tp->ups_info.lite_mode = 0; if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } static void r8156b_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; switch (tp->version) { case RTL_VER_12: ocp_reg_write(tp, 0xbf86, 0x9000); data = ocp_reg_read(tp, 0xc402); data |= BIT(10); ocp_reg_write(tp, 0xc402, data); data &= ~BIT(10); ocp_reg_write(tp, 0xc402, data); ocp_reg_write(tp, 0xbd86, 0x1010); ocp_reg_write(tp, 0xbd88, 0x1010); data = ocp_reg_read(tp, 0xbd4e); data &= ~(BIT(10) | BIT(11)); data |= BIT(11); ocp_reg_write(tp, 0xbd4e, data); data = ocp_reg_read(tp, 0xbf46); data &= ~0xf00; data |= 0x700; ocp_reg_write(tp, 0xbf46, data); break; case RTL_VER_13: case RTL_VER_15: r8156b_wait_loading_flash(tp); break; default: break; } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = ocp_reg_read(tp, 0xa466); data &= ~BIT(0); ocp_reg_write(tp, 0xa466, data); data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); break; case PHY_STAT_LAN_ON: case PHY_STAT_PWRDN: default: rtl8152_apply_firmware(tp, false); break; } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); switch (tp->version) { case RTL_VER_12: data = ocp_reg_read(tp, 0xbc08); data |= BIT(3) | BIT(2); ocp_reg_write(tp, 0xbc08, data); data = sram_read(tp, 0x8fff); data &= ~0xff00; data |= 0x0400; sram_write(tp, 0x8fff, data); data = ocp_reg_read(tp, 0xacda); data |= 0xff00; ocp_reg_write(tp, 0xacda, data); data = ocp_reg_read(tp, 0xacde); data |= 0xf000; ocp_reg_write(tp, 0xacde, data); ocp_reg_write(tp, 0xac8c, 0x0ffc); ocp_reg_write(tp, 0xac46, 0xb7b4); ocp_reg_write(tp, 0xac50, 0x0fbc); ocp_reg_write(tp, 0xac3c, 0x9240); ocp_reg_write(tp, 0xac4e, 0x0db4); ocp_reg_write(tp, 0xacc6, 0x0707); ocp_reg_write(tp, 0xacc8, 0xa0d3); ocp_reg_write(tp, 0xad08, 0x0007); ocp_reg_write(tp, 0xb87c, 0x8560); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8562); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8564); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8566); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x8568); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x856a); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x8ffe); ocp_reg_write(tp, 0xb87e, 0x0907); ocp_reg_write(tp, 0xb87c, 0x80d6); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x80f2); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x80f4); ocp_reg_write(tp, 0xb87e, 0x6077); ocp_reg_write(tp, 0xb506, 0x01e7); ocp_reg_write(tp, 0xb87c, 0x8013); ocp_reg_write(tp, 0xb87e, 0x0700); ocp_reg_write(tp, 0xb87c, 0x8fb9); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x8fba); ocp_reg_write(tp, 0xb87e, 0x0100); ocp_reg_write(tp, 0xb87c, 0x8fbc); ocp_reg_write(tp, 0xb87e, 0x1900); ocp_reg_write(tp, 0xb87c, 0x8fbe); ocp_reg_write(tp, 0xb87e, 0xe100); ocp_reg_write(tp, 0xb87c, 0x8fc0); ocp_reg_write(tp, 0xb87e, 0x0800); ocp_reg_write(tp, 0xb87c, 0x8fc2); ocp_reg_write(tp, 0xb87e, 0xe500); ocp_reg_write(tp, 0xb87c, 0x8fc4); ocp_reg_write(tp, 0xb87e, 0x0f00); ocp_reg_write(tp, 0xb87c, 0x8fc6); ocp_reg_write(tp, 0xb87e, 0xf100); ocp_reg_write(tp, 0xb87c, 0x8fc8); ocp_reg_write(tp, 0xb87e, 0x0400); ocp_reg_write(tp, 0xb87c, 0x8fca); ocp_reg_write(tp, 0xb87e, 0xf300); ocp_reg_write(tp, 0xb87c, 0x8fcc); ocp_reg_write(tp, 0xb87e, 0xfd00); ocp_reg_write(tp, 0xb87c, 0x8fce); ocp_reg_write(tp, 0xb87e, 0xff00); ocp_reg_write(tp, 0xb87c, 0x8fd0); ocp_reg_write(tp, 0xb87e, 0xfb00); ocp_reg_write(tp, 0xb87c, 0x8fd2); ocp_reg_write(tp, 0xb87e, 0x0100); ocp_reg_write(tp, 0xb87c, 0x8fd4); ocp_reg_write(tp, 0xb87e, 0xf400); ocp_reg_write(tp, 0xb87c, 0x8fd6); ocp_reg_write(tp, 0xb87e, 0xff00); ocp_reg_write(tp, 0xb87c, 0x8fd8); ocp_reg_write(tp, 0xb87e, 0xf600); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG); ocp_data |= EN_XG_LIP | EN_G_LIP; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); ocp_reg_write(tp, 0xb87c, 0x813d); ocp_reg_write(tp, 0xb87e, 0x390e); ocp_reg_write(tp, 0xb87c, 0x814f); ocp_reg_write(tp, 0xb87e, 0x790e); ocp_reg_write(tp, 0xb87c, 0x80b0); ocp_reg_write(tp, 0xb87e, 0x0f31); data = ocp_reg_read(tp, 0xbf4c); data |= BIT(1); ocp_reg_write(tp, 0xbf4c, data); data = ocp_reg_read(tp, 0xbcca); data |= BIT(9) | BIT(8); ocp_reg_write(tp, 0xbcca, data); ocp_reg_write(tp, 0xb87c, 0x8141); ocp_reg_write(tp, 0xb87e, 0x320e); ocp_reg_write(tp, 0xb87c, 0x8153); ocp_reg_write(tp, 0xb87e, 0x720e); ocp_reg_write(tp, 0xb87c, 0x8529); ocp_reg_write(tp, 0xb87e, 0x050e); data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; ocp_reg_write(tp, OCP_EEE_CFG, data); sram_write(tp, 0x816c, 0xc4a0); sram_write(tp, 0x8170, 0xc4a0); sram_write(tp, 0x8174, 0x04a0); sram_write(tp, 0x8178, 0x04a0); sram_write(tp, 0x817c, 0x0719); sram_write(tp, 0x8ff4, 0x0400); sram_write(tp, 0x8ff1, 0x0404); ocp_reg_write(tp, 0xbf4a, 0x001b); ocp_reg_write(tp, 0xb87c, 0x8033); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8037); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x803b); ocp_reg_write(tp, 0xb87e, 0xfc32); ocp_reg_write(tp, 0xb87c, 0x803f); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8043); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8047); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8145); ocp_reg_write(tp, 0xb87e, 0x370e); ocp_reg_write(tp, 0xb87c, 0x8157); ocp_reg_write(tp, 0xb87e, 0x770e); ocp_reg_write(tp, 0xb87c, 0x8169); ocp_reg_write(tp, 0xb87e, 0x0d0a); ocp_reg_write(tp, 0xb87c, 0x817b); ocp_reg_write(tp, 0xb87e, 0x1d0a); data = sram_read(tp, 0x8217); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x8217, data); data = sram_read(tp, 0x821a); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x821a, data); sram_write(tp, 0x80da, 0x0403); data = sram_read(tp, 0x80dc); data &= ~0xff00; data |= 0x1000; sram_write(tp, 0x80dc, data); sram_write(tp, 0x80b3, 0x0384); sram_write(tp, 0x80b7, 0x2007); data = sram_read(tp, 0x80ba); data &= ~0xff00; data |= 0x6c00; sram_write(tp, 0x80ba, data); sram_write(tp, 0x80b5, 0xf009); data = sram_read(tp, 0x80bd); data &= ~0xff00; data |= 0x9f00; sram_write(tp, 0x80bd, data); sram_write(tp, 0x80c7, 0xf083); sram_write(tp, 0x80dd, 0x03f0); data = sram_read(tp, 0x80df); data &= ~0xff00; data |= 0x1000; sram_write(tp, 0x80df, data); sram_write(tp, 0x80cb, 0x2007); data = sram_read(tp, 0x80ce); data &= ~0xff00; data |= 0x6c00; sram_write(tp, 0x80ce, data); sram_write(tp, 0x80c9, 0x8009); data = sram_read(tp, 0x80d1); data &= ~0xff00; data |= 0x8000; sram_write(tp, 0x80d1, data); sram_write(tp, 0x80a3, 0x200a); sram_write(tp, 0x80a5, 0xf0ad); sram_write(tp, 0x809f, 0x6073); sram_write(tp, 0x80a1, 0x000b); data = sram_read(tp, 0x80a9); data &= ~0xff00; data |= 0xc000; sram_write(tp, 0x80a9, data); if (rtl_phy_patch_request(tp, true, true)) return; data = ocp_reg_read(tp, 0xb896); data &= ~BIT(0); ocp_reg_write(tp, 0xb896, data); data = ocp_reg_read(tp, 0xb892); data &= ~0xff00; ocp_reg_write(tp, 0xb892, data); ocp_reg_write(tp, 0xb88e, 0xc23e); ocp_reg_write(tp, 0xb890, 0x0000); ocp_reg_write(tp, 0xb88e, 0xc240); ocp_reg_write(tp, 0xb890, 0x0103); ocp_reg_write(tp, 0xb88e, 0xc242); ocp_reg_write(tp, 0xb890, 0x0507); ocp_reg_write(tp, 0xb88e, 0xc244); ocp_reg_write(tp, 0xb890, 0x090b); ocp_reg_write(tp, 0xb88e, 0xc246); ocp_reg_write(tp, 0xb890, 0x0c0e); ocp_reg_write(tp, 0xb88e, 0xc248); ocp_reg_write(tp, 0xb890, 0x1012); ocp_reg_write(tp, 0xb88e, 0xc24a); ocp_reg_write(tp, 0xb890, 0x1416); data = ocp_reg_read(tp, 0xb896); data |= BIT(0); ocp_reg_write(tp, 0xb896, data); rtl_phy_patch_request(tp, false, true); data = ocp_reg_read(tp, 0xa86a); data |= BIT(0); ocp_reg_write(tp, 0xa86a, data); data = ocp_reg_read(tp, 0xa6f0); data |= BIT(0); ocp_reg_write(tp, 0xa6f0, data); ocp_reg_write(tp, 0xbfa0, 0xd70d); ocp_reg_write(tp, 0xbfa2, 0x4100); ocp_reg_write(tp, 0xbfa4, 0xe868); ocp_reg_write(tp, 0xbfa6, 0xdc59); ocp_reg_write(tp, 0xb54c, 0x3c18); data = ocp_reg_read(tp, 0xbfa4); data &= ~BIT(5); ocp_reg_write(tp, 0xbfa4, data); data = sram_read(tp, 0x817d); data |= BIT(12); sram_write(tp, 0x817d, data); break; case RTL_VER_13: /* 2.5G INRX */ data = ocp_reg_read(tp, 0xac46); data &= ~0x00f0; data |= 0x0090; ocp_reg_write(tp, 0xac46, data); data = ocp_reg_read(tp, 0xad30); data &= ~0x0003; data |= 0x0001; ocp_reg_write(tp, 0xad30, data); fallthrough; case RTL_VER_15: /* EEE parameter */ ocp_reg_write(tp, 0xb87c, 0x80f5); ocp_reg_write(tp, 0xb87e, 0x760e); ocp_reg_write(tp, 0xb87c, 0x8107); ocp_reg_write(tp, 0xb87e, 0x360e); ocp_reg_write(tp, 0xb87c, 0x8551); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0800; ocp_reg_write(tp, 0xb87e, data); /* ADC_PGA parameter */ data = ocp_reg_read(tp, 0xbf00); data &= ~0xe000; data |= 0xa000; ocp_reg_write(tp, 0xbf00, data); data = ocp_reg_read(tp, 0xbf46); data &= ~0x0f00; data |= 0x0300; ocp_reg_write(tp, 0xbf46, data); /* Green Table-PGA, 1G full viterbi */ sram_write(tp, 0x8044, 0x2417); sram_write(tp, 0x804a, 0x2417); sram_write(tp, 0x8050, 0x2417); sram_write(tp, 0x8056, 0x2417); sram_write(tp, 0x805c, 0x2417); sram_write(tp, 0x8062, 0x2417); sram_write(tp, 0x8068, 0x2417); sram_write(tp, 0x806e, 0x2417); sram_write(tp, 0x8074, 0x2417); sram_write(tp, 0x807a, 0x2417); /* XG PLL */ data = ocp_reg_read(tp, 0xbf84); data &= ~0xe000; data |= 0xa000; ocp_reg_write(tp, 0xbf84, data); break; default: break; } /* Notify the MAC when the speed is changed to force mode. */ data = ocp_reg_read(tp, OCP_INTR_EN); data |= INTR_SPEED_FORCE; ocp_reg_write(tp, OCP_INTR_EN, data); if (rtl_phy_patch_request(tp, true, true)) return; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= EEE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data &= ~(EN_EEE_100 | EN_EEE_1000); data |= EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_100 = false; tp->ups_info.eee_plloff_giga = false; data = ocp_reg_read(tp, OCP_POWER_CFG); data &= ~EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = false; rtl_phy_patch_request(tp, false, true); rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = ocp_reg_read(tp, 0xa428); data &= ~BIT(9); ocp_reg_write(tp, 0xa428, data); data = ocp_reg_read(tp, 0xa5ea); data &= ~BIT(0); ocp_reg_write(tp, 0xa5ea, data); tp->ups_info.lite_mode = 0; if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } static void r8156_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ocp_data &= ~EN_ALL_SPEED; ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ocp_data |= BYPASS_MAC_RESET; ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); r8153b_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); if (data == PHY_STAT_EXT_INIT) { data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); r8153b_power_cut_en(tp, false); r8156_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); r8156_mac_clk_spd(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ocp_data |= ACT_ODMA; ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8156b_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ocp_data &= ~EN_ALL_SPEED; ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ocp_data |= BYPASS_MAC_RESET; ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); ocp_data |= RX_DETECT8; ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); r8153b_u1u2en(tp, false); switch (tp->version) { case RTL_VER_13: case RTL_VER_15: r8156b_wait_loading_flash(tp); break; default: break; } for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); if (data == PHY_STAT_EXT_INIT) { data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); data = ocp_reg_read(tp, 0xa466); data &= ~BIT(0); ocp_reg_write(tp, 0xa466, data); } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); r8153b_power_cut_en(tp, false); r8156_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~SLOT_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); ocp_data |= FLOW_CTRL_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); /* enable fc timer and set timer to 600 ms. */ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, CTRL_TIMER_EN | (600 / 8)); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & DACK_DET_EN)) ocp_data |= FLOW_CTRL_PATCH_2; ocp_data &= ~AUTO_SPEEDUP; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); r8156_mac_clk_spd(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static bool rtl_check_vendor_ok(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; struct usb_endpoint_descriptor *in, *out, *intr; if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) { dev_err(&intf->dev, "Expected endpoints are not found\n"); return false; } /* Check Rx endpoint address */ if (usb_endpoint_num(in) != 1) { dev_err(&intf->dev, "Invalid Rx endpoint address\n"); return false; } /* Check Tx endpoint address */ if (usb_endpoint_num(out) != 2) { dev_err(&intf->dev, "Invalid Tx endpoint address\n"); return false; } /* Check interrupt endpoint address */ if (usb_endpoint_num(intr) != 3) { dev_err(&intf->dev, "Invalid interrupt endpoint address\n"); return false; } return true; } static int rtl8152_pre_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; rtnl_lock(); if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) return 0; netdev = tp->netdev; if (!netif_running(netdev)) return 0; netif_stop_queue(netdev); tasklet_disable(&tp->tx_tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); napi_disable(&tp->napi); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); set_bit(IN_PRE_RESET, &tp->flags); tp->rtl_ops.disable(tp); clear_bit(IN_PRE_RESET, &tp->flags); mutex_unlock(&tp->control); } return 0; } static int rtl8152_post_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; struct sockaddr sa; if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) goto exit; rtl_set_accessible(tp); /* reset the MAC address in case of policy change */ if (determine_ethernet_addr(tp, &sa) >= 0) dev_set_mac_address (tp->netdev, &sa, NULL); netdev = tp->netdev; if (!netif_running(netdev)) goto exit; set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); tp->rtl_ops.enable(tp); rtl_start_rx(tp); _rtl8152_set_rx_mode(netdev); mutex_unlock(&tp->control); } napi_enable(&tp->napi); tasklet_enable(&tp->tx_tl); netif_wake_queue(netdev); usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); exit: rtnl_unlock(); return 0; } static bool delay_autosuspend(struct r8152 *tp) { bool sw_linking = !!netif_carrier_ok(tp->netdev); bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS); /* This means a linking change occurs and the driver doesn't detect it, * yet. If the driver has disabled tx/rx and hw is linking on, the * device wouldn't wake up by receiving any packet. */ if (work_busy(&tp->schedule.work) || sw_linking != hw_linking) return true; /* If the linking down is occurred by nway, the device may miss the * linking change event. And it wouldn't wake when linking on. */ if (!sw_linking && tp->rtl_ops.in_nway(tp)) return true; else if (!skb_queue_empty(&tp->tx_queue)) return true; else return false; } static int rtl8152_runtime_resume(struct r8152 *tp) { struct net_device *netdev = tp->netdev; if (netif_running(netdev) && netdev->flags & IFF_UP) { struct napi_struct *napi = &tp->napi; tp->rtl_ops.autosuspend_en(tp, false); napi_disable(napi); set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(netdev)) { if (rtl8152_get_speed(tp) & LINK_STATUS) { rtl_start_rx(tp); } else { netif_carrier_off(netdev); tp->rtl_ops.disable(tp); netif_info(tp, link, netdev, "linking down\n"); } } napi_enable(napi); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); usb_submit_urb(tp->intr_urb, GFP_NOIO); } else { if (netdev->flags & IFF_UP) tp->rtl_ops.autosuspend_en(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); } return 0; } static int rtl8152_system_resume(struct r8152 *tp) { struct net_device *netdev = tp->netdev; netif_device_attach(netdev); if (netif_running(netdev) && (netdev->flags & IFF_UP)) { tp->rtl_ops.up(tp); netif_carrier_off(netdev); set_bit(WORK_ENABLE, &tp->flags); usb_submit_urb(tp->intr_urb, GFP_NOIO); } /* If the device is RTL8152_INACCESSIBLE here then we should do a * reset. This is important because the usb_lock_device_for_reset() * that happens as a result of usb_queue_reset_device() will silently * fail if the device was suspended or if too much time passed. * * NOTE: The device is locked here so we can directly do the reset. * We don't need usb_lock_device_for_reset() because that's just a * wrapper over device_lock() and device_resume() (which calls us) * does that for us. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) usb_reset_device(tp->udev); return 0; } static int rtl8152_runtime_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; int ret = 0; if (!tp->rtl_ops.autosuspend_en) return -EBUSY; set_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { u32 rcr = 0; if (netif_carrier_ok(netdev)) { u32 ocp_data; rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data = rcr & ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rxdy_gated_en(tp, true); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (!(ocp_data & RXFIFO_EMPTY)) { rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); ret = -EBUSY; goto out1; } } clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tp->rtl_ops.autosuspend_en(tp, true); if (netif_carrier_ok(netdev)) { struct napi_struct *napi = &tp->napi; napi_disable(napi); rtl_stop_rx(tp); rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); napi_enable(napi); } if (delay_autosuspend(tp)) { rtl8152_runtime_resume(tp); ret = -EBUSY; } } out1: return ret; } static int rtl8152_system_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; netif_device_detach(netdev); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { struct napi_struct *napi = &tp->napi; clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tasklet_disable(&tp->tx_tl); napi_disable(napi); cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); napi_enable(napi); tasklet_enable(&tp->tx_tl); } /* If we're inaccessible here then some of the work that we did to * get the adapter ready for suspend didn't work. Queue up a wakeup * event so we can try again. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) pm_wakeup_event(&tp->udev->dev, 0); return 0; } static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) { struct r8152 *tp = usb_get_intfdata(intf); int ret; mutex_lock(&tp->control); if (PMSG_IS_AUTO(message)) ret = rtl8152_runtime_suspend(tp); else ret = rtl8152_system_suspend(tp); mutex_unlock(&tp->control); return ret; } static int rtl8152_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); int ret; mutex_lock(&tp->control); rtl_reset_ocp_base(tp); if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) ret = rtl8152_runtime_resume(tp); else ret = rtl8152_system_resume(tp); mutex_unlock(&tp->control); return ret; } static int rtl8152_reset_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); clear_bit(SELECTIVE_SUSPEND, &tp->flags); rtl_reset_ocp_base(tp); tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp, true); return rtl8152_resume(intf); } static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); if (usb_autopm_get_interface(tp->intf) < 0) return; if (!rtl_can_wakeup(tp)) { wol->supported = 0; wol->wolopts = 0; } else { mutex_lock(&tp->control); wol->supported = WAKE_ANY; wol->wolopts = __rtl_get_wol(tp); mutex_unlock(&tp->control); } usb_autopm_put_interface(tp->intf); } static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); int ret; if (!rtl_can_wakeup(tp)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_ANY) return -EINVAL; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out_set_wol; mutex_lock(&tp->control); __rtl_set_wol(tp, wol->wolopts); tp->saved_wolopts = wol->wolopts & WAKE_ANY; mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out_set_wol: return ret; } static u32 rtl8152_get_msglevel(struct net_device *dev) { struct r8152 *tp = netdev_priv(dev); return tp->msg_enable; } static void rtl8152_set_msglevel(struct net_device *dev, u32 value) { struct r8152 *tp = netdev_priv(dev); tp->msg_enable = value; } static void rtl8152_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct r8152 *tp = netdev_priv(netdev); strscpy(info->driver, MODULENAME, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info)); if (!IS_ERR_OR_NULL(tp->rtl_fw.fw)) strscpy(info->fw_version, tp->rtl_fw.version, sizeof(info->fw_version)); } static int rtl8152_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(netdev); int ret; if (!tp->mii.mdio_read) return -EOPNOTSUPP; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); mii_ethtool_get_link_ksettings(&tp->mii, cmd); linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.supported, tp->support_2500full); if (tp->support_2500full) { linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.advertising, ocp_reg_read(tp, OCP_10GBT_CTRL) & MDIO_AN_10GBT_CTRL_ADV2_5G); linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.lp_advertising, ocp_reg_read(tp, OCP_10GBT_STAT) & MDIO_AN_10GBT_STAT_LP2_5G); if (is_speed_2500(rtl8152_get_speed(tp))) cmd->base.speed = SPEED_2500; } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(dev); u32 advertising = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; if (test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_10_HALF; if (test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_10_FULL; if (test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_100_HALF; if (test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_100_FULL; if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_HALF; if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_FULL; if (test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_2500_FULL; mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); if (!ret) { tp->autoneg = cmd->base.autoneg; tp->speed = cmd->base.speed; tp->duplex = cmd->base.duplex; tp->advertising = advertising; } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_packets", "tx_errors", "rx_errors", "rx_missed", "align_errors", "tx_single_collisions", "tx_multi_collisions", "rx_unicast", "rx_broadcast", "rx_multicast", "tx_aborted", "tx_underrun", }; static int rtl8152_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(rtl8152_gstrings); default: return -EOPNOTSUPP; } } static void rtl8152_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct r8152 *tp = netdev_priv(dev); struct tally_counter tally; if (usb_autopm_get_interface(tp->intf) < 0) return; generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); usb_autopm_put_interface(tp->intf); data[0] = le64_to_cpu(tally.tx_packets); data[1] = le64_to_cpu(tally.rx_packets); data[2] = le64_to_cpu(tally.tx_errors); data[3] = le32_to_cpu(tally.rx_errors); data[4] = le16_to_cpu(tally.rx_missed); data[5] = le16_to_cpu(tally.align_errors); data[6] = le32_to_cpu(tally.tx_one_collision); data[7] = le32_to_cpu(tally.tx_multi_collision); data[8] = le64_to_cpu(tally.rx_unicast); data[9] = le64_to_cpu(tally.rx_broadcast); data[10] = le32_to_cpu(tally.rx_multicast); data[11] = le16_to_cpu(tally.tx_aborted); data[12] = le16_to_cpu(tally.tx_underrun); } static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings)); break; } } static int r8152_get_eee(struct r8152 *tp, struct ethtool_keee *eee) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); u16 val; val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); mii_eee_cap1_mod_linkmode_t(eee->supported, val); val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV); mii_eee_cap1_mod_linkmode_t(eee->advertised, val); val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val); eee->eee_enabled = tp->eee_en; linkmode_and(common, eee->advertised, eee->lp_advertised); eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common); return 0; } static int r8152_set_eee(struct r8152 *tp, struct ethtool_keee *eee) { u16 val = linkmode_to_mii_eee_cap1_t(eee->advertised); tp->eee_en = eee->eee_enabled; tp->eee_adv = val; rtl_eee_enable(tp, tp->eee_en); return 0; } static int r8153_get_eee(struct r8152 *tp, struct ethtool_keee *eee) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); u16 val; val = ocp_reg_read(tp, OCP_EEE_ABLE); mii_eee_cap1_mod_linkmode_t(eee->supported, val); val = ocp_reg_read(tp, OCP_EEE_ADV); mii_eee_cap1_mod_linkmode_t(eee->advertised, val); val = ocp_reg_read(tp, OCP_EEE_LPABLE); mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val); eee->eee_enabled = tp->eee_en; linkmode_and(common, eee->advertised, eee->lp_advertised); eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common); return 0; } static int rtl_ethtool_get_eee(struct net_device *net, struct ethtool_keee *edata) { struct r8152 *tp = netdev_priv(net); int ret; if (!tp->rtl_ops.eee_get) { ret = -EOPNOTSUPP; goto out; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = tp->rtl_ops.eee_get(tp, edata); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl_ethtool_set_eee(struct net_device *net, struct ethtool_keee *edata) { struct r8152 *tp = netdev_priv(net); int ret; if (!tp->rtl_ops.eee_set) { ret = -EOPNOTSUPP; goto out; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = tp->rtl_ops.eee_set(tp, edata); if (!ret) ret = mii_nway_restart(&tp->mii); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_nway_reset(struct net_device *dev) { struct r8152 *tp = netdev_priv(dev); int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = mii_nway_restart(&tp->mii); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: return -EOPNOTSUPP; default: break; } coalesce->rx_coalesce_usecs = tp->coalesce; return 0; } static int rtl8152_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); int ret; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: return -EOPNOTSUPP; default: break; } if (coalesce->rx_coalesce_usecs > COALESCE_SLOW) return -EINVAL; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); if (tp->coalesce != coalesce->rx_coalesce_usecs) { tp->coalesce = coalesce->rx_coalesce_usecs; if (netif_running(netdev) && netif_carrier_ok(netdev)) { netif_stop_queue(netdev); napi_disable(&tp->napi); tp->rtl_ops.disable(tp); tp->rtl_ops.enable(tp); rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); napi_enable(&tp->napi); netif_wake_queue(netdev); } } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static int rtl8152_get_tunable(struct net_device *netdev, const struct ethtool_tunable *tunable, void *d) { struct r8152 *tp = netdev_priv(netdev); switch (tunable->id) { case ETHTOOL_RX_COPYBREAK: *(u32 *)d = tp->rx_copybreak; break; default: return -EOPNOTSUPP; } return 0; } static int rtl8152_set_tunable(struct net_device *netdev, const struct ethtool_tunable *tunable, const void *d) { struct r8152 *tp = netdev_priv(netdev); u32 val; switch (tunable->id) { case ETHTOOL_RX_COPYBREAK: val = *(u32 *)d; if (val < ETH_ZLEN) { netif_err(tp, rx_err, netdev, "Invalid rx copy break value\n"); return -EINVAL; } if (tp->rx_copybreak != val) { if (netdev->flags & IFF_UP) { mutex_lock(&tp->control); napi_disable(&tp->napi); tp->rx_copybreak = val; napi_enable(&tp->napi); mutex_unlock(&tp->control); } else { tp->rx_copybreak = val; } } break; default: return -EOPNOTSUPP; } return 0; } static void rtl8152_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); ring->rx_max_pending = RTL8152_RX_MAX_PENDING; ring->rx_pending = tp->rx_pending; } static int rtl8152_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); if (ring->rx_pending < (RTL8152_MAX_RX * 2)) return -EINVAL; if (tp->rx_pending != ring->rx_pending) { if (netdev->flags & IFF_UP) { mutex_lock(&tp->control); napi_disable(&tp->napi); tp->rx_pending = ring->rx_pending; napi_enable(&tp->napi); mutex_unlock(&tp->control); } else { tp->rx_pending = ring->rx_pending; } } return 0; } static void rtl8152_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct r8152 *tp = netdev_priv(netdev); u16 bmcr, lcladv, rmtadv; u8 cap; if (usb_autopm_get_interface(tp->intf) < 0) return; mutex_lock(&tp->control); bmcr = r8152_mdio_read(tp, MII_BMCR); lcladv = r8152_mdio_read(tp, MII_ADVERTISE); rmtadv = r8152_mdio_read(tp, MII_LPA); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); if (!(bmcr & BMCR_ANENABLE)) { pause->autoneg = 0; pause->rx_pause = 0; pause->tx_pause = 0; return; } pause->autoneg = 1; cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_RX) pause->rx_pause = 1; if (cap & FLOW_CTRL_TX) pause->tx_pause = 1; } static int rtl8152_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct r8152 *tp = netdev_priv(netdev); u16 old, new1; u8 cap = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); if (pause->autoneg && !(r8152_mdio_read(tp, MII_BMCR) & BMCR_ANENABLE)) { ret = -EINVAL; goto out; } if (pause->rx_pause) cap |= FLOW_CTRL_RX; if (pause->tx_pause) cap |= FLOW_CTRL_TX; old = r8152_mdio_read(tp, MII_ADVERTISE); new1 = (old & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) | mii_advertise_flowctrl(cap); if (old != new1) r8152_mdio_write(tp, MII_ADVERTISE, new1); out: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static const struct ethtool_ops ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = rtl8152_get_drvinfo, .get_link = ethtool_op_get_link, .nway_reset = rtl8152_nway_reset, .get_msglevel = rtl8152_get_msglevel, .set_msglevel = rtl8152_set_msglevel, .get_wol = rtl8152_get_wol, .set_wol = rtl8152_set_wol, .get_strings = rtl8152_get_strings, .get_sset_count = rtl8152_get_sset_count, .get_ethtool_stats = rtl8152_get_ethtool_stats, .get_coalesce = rtl8152_get_coalesce, .set_coalesce = rtl8152_set_coalesce, .get_eee = rtl_ethtool_get_eee, .set_eee = rtl_ethtool_set_eee, .get_link_ksettings = rtl8152_get_link_ksettings, .set_link_ksettings = rtl8152_set_link_ksettings, .get_tunable = rtl8152_get_tunable, .set_tunable = rtl8152_set_tunable, .get_ringparam = rtl8152_get_ringparam, .set_ringparam = rtl8152_set_ringparam, .get_pauseparam = rtl8152_get_pauseparam, .set_pauseparam = rtl8152_set_pauseparam, }; static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct r8152 *tp = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(rq); int res; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; res = usb_autopm_get_interface(tp->intf); if (res < 0) goto out; switch (cmd) { case SIOCGMIIPHY: data->phy_id = R8152_PHY_ID; /* Internal PHY */ break; case SIOCGMIIREG: mutex_lock(&tp->control); data->val_out = r8152_mdio_read(tp, data->reg_num); mutex_unlock(&tp->control); break; case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } mutex_lock(&tp->control); r8152_mdio_write(tp, data->reg_num, data->val_in); mutex_unlock(&tp->control); break; default: res = -EOPNOTSUPP; } usb_autopm_put_interface(tp->intf); out: return res; } static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) { struct r8152 *tp = netdev_priv(dev); int ret; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: WRITE_ONCE(dev->mtu, new_mtu); return 0; default: break; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); WRITE_ONCE(dev->mtu, new_mtu); if (netif_running(dev)) { if (tp->rtl_ops.change_mtu) tp->rtl_ops.change_mtu(tp); if (netif_carrier_ok(dev)) { netif_stop_queue(dev); napi_disable(&tp->napi); tasklet_disable(&tp->tx_tl); tp->rtl_ops.disable(tp); tp->rtl_ops.enable(tp); rtl_start_rx(tp); tasklet_enable(&tp->tx_tl); napi_enable(&tp->napi); rtl8152_set_rx_mode(dev); netif_wake_queue(dev); } } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static const struct net_device_ops rtl8152_netdev_ops = { .ndo_open = rtl8152_open, .ndo_stop = rtl8152_close, .ndo_eth_ioctl = rtl8152_ioctl, .ndo_start_xmit = rtl8152_start_xmit, .ndo_tx_timeout = rtl8152_tx_timeout, .ndo_set_features = rtl8152_set_features, .ndo_set_rx_mode = rtl8152_set_rx_mode, .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_features_check = rtl8152_features_check, }; static void rtl8152_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (tp->version != RTL_VER_01) r8152_power_cut_en(tp, true); } static void rtl8153_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_power_cut_en(tp, false); } static void rtl8153b_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_power_cut_en(tp, false); } static int rtl_ops_init(struct r8152 *tp) { struct rtl_ops *ops = &tp->rtl_ops; int ret = 0; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: ops->init = r8152b_init; ops->enable = rtl8152_enable; ops->disable = rtl8152_disable; ops->up = rtl8152_up; ops->down = rtl8152_down; ops->unload = rtl8152_unload; ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; tp->rx_buf_sz = 16 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_100TX; break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ops->init = r8153_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; ops->change_mtu = rtl8153_change_mtu; if (tp->udev->speed < USB_SPEED_SUPER) tp->rx_buf_sz = 16 * 1024; else tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_08: case RTL_VER_09: ops->init = r8153b_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153b_up; ops->down = rtl8153b_down; ops->unload = rtl8153b_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; ops->change_mtu = rtl8153_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_11: tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; fallthrough; case RTL_VER_10: ops->init = r8156_init; ops->enable = rtl8156_enable; ops->disable = rtl8156_disable; ops->up = rtl8156_up; ops->down = rtl8156_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8156_hw_phy_cfg; ops->autosuspend_en = rtl8156_runtime_enable; ops->change_mtu = rtl8156_change_mtu; tp->rx_buf_sz = 48 * 1024; tp->support_2500full = 1; break; case RTL_VER_12: case RTL_VER_13: tp->support_2500full = 1; fallthrough; case RTL_VER_15: tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; ops->init = r8156b_init; ops->enable = rtl8156b_enable; ops->disable = rtl8153_disable; ops->up = rtl8156_up; ops->down = rtl8156_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8156b_hw_phy_cfg; ops->autosuspend_en = rtl8156_runtime_enable; ops->change_mtu = rtl8156_change_mtu; tp->rx_buf_sz = 48 * 1024; break; case RTL_VER_14: ops->init = r8153c_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153c_up; ops->down = rtl8153b_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153c_hw_phy_cfg; ops->autosuspend_en = rtl8153c_runtime_enable; ops->change_mtu = rtl8153c_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; default: ret = -ENODEV; dev_err(&tp->intf->dev, "Unknown Device\n"); break; } return ret; } #define FIRMWARE_8153A_2 "rtl_nic/rtl8153a-2.fw" #define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw" #define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw" #define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw" #define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw" #define FIRMWARE_8156A_2 "rtl_nic/rtl8156a-2.fw" #define FIRMWARE_8156B_2 "rtl_nic/rtl8156b-2.fw" MODULE_FIRMWARE(FIRMWARE_8153A_2); MODULE_FIRMWARE(FIRMWARE_8153A_3); MODULE_FIRMWARE(FIRMWARE_8153A_4); MODULE_FIRMWARE(FIRMWARE_8153B_2); MODULE_FIRMWARE(FIRMWARE_8153C_1); MODULE_FIRMWARE(FIRMWARE_8156A_2); MODULE_FIRMWARE(FIRMWARE_8156B_2); static int rtl_fw_init(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; switch (tp->version) { case RTL_VER_04: rtl_fw->fw_name = FIRMWARE_8153A_2; rtl_fw->pre_fw = r8153_pre_firmware_1; rtl_fw->post_fw = r8153_post_firmware_1; break; case RTL_VER_05: rtl_fw->fw_name = FIRMWARE_8153A_3; rtl_fw->pre_fw = r8153_pre_firmware_2; rtl_fw->post_fw = r8153_post_firmware_2; break; case RTL_VER_06: rtl_fw->fw_name = FIRMWARE_8153A_4; rtl_fw->post_fw = r8153_post_firmware_3; break; case RTL_VER_09: rtl_fw->fw_name = FIRMWARE_8153B_2; rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153b_post_firmware_1; break; case RTL_VER_11: rtl_fw->fw_name = FIRMWARE_8156A_2; rtl_fw->post_fw = r8156a_post_firmware_1; break; case RTL_VER_13: case RTL_VER_15: rtl_fw->fw_name = FIRMWARE_8156B_2; break; case RTL_VER_14: rtl_fw->fw_name = FIRMWARE_8153C_1; rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153c_post_firmware_1; break; default: break; } return 0; } static u8 __rtl_get_hw_ver(struct usb_device *udev) { u32 ocp_data = 0; __le32 *tmp; u8 version; int ret; int i; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return 0; /* Retry up to 3 times in case there is a transitory error. We do this * since retrying a read of the version is always safe and this * function doesn't take advantage of r8152_control_msg(). */ for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), USB_CTRL_GET_TIMEOUT); if (ret > 0) { ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; break; } } if (i != 0 && ret > 0) dev_warn(&udev->dev, "Needed %d retries to read version\n", i); kfree(tmp); switch (ocp_data) { case 0x4c00: version = RTL_VER_01; break; case 0x4c10: version = RTL_VER_02; break; case 0x5c00: version = RTL_VER_03; break; case 0x5c10: version = RTL_VER_04; break; case 0x5c20: version = RTL_VER_05; break; case 0x5c30: version = RTL_VER_06; break; case 0x4800: version = RTL_VER_07; break; case 0x6000: version = RTL_VER_08; break; case 0x6010: version = RTL_VER_09; break; case 0x7010: version = RTL_TEST_01; break; case 0x7020: version = RTL_VER_10; break; case 0x7030: version = RTL_VER_11; break; case 0x7400: version = RTL_VER_12; break; case 0x7410: version = RTL_VER_13; break; case 0x6400: version = RTL_VER_14; break; case 0x7420: version = RTL_VER_15; break; default: version = RTL_VER_UNKNOWN; dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data); break; } return version; } u8 rtl8152_get_version(struct usb_interface *intf) { u8 version; version = __rtl_get_hw_ver(interface_to_usbdev(intf)); dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); return version; } EXPORT_SYMBOL_GPL(rtl8152_get_version); static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) { int parent_vendor_id = le16_to_cpu(udev->parent->descriptor.idVendor); int product_id = le16_to_cpu(udev->descriptor.idProduct); int vendor_id = le16_to_cpu(udev->descriptor.idVendor); if (vendor_id == VENDOR_ID_LENOVO) { switch (product_id) { case DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB: case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK: case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3: case DEVICE_ID_THINKPAD_USB_C_DONGLE: return 1; } } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) { switch (product_id) { case 0x8153: return 1; } } return 0; } static int rtl8152_probe_once(struct usb_interface *intf, const struct usb_device_id *id, u8 version) { struct usb_device *udev = interface_to_usbdev(intf); struct r8152 *tp; struct net_device *netdev; int ret; usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { dev_err(&intf->dev, "Out of memory\n"); return -ENOMEM; } SET_NETDEV_DEV(netdev, &intf->dev); tp = netdev_priv(netdev); tp->msg_enable = 0x7FFF; tp->udev = udev; tp->netdev = netdev; tp->intf = intf; tp->version = version; tp->pipe_ctrl_in = usb_rcvctrlpipe(udev, 0); tp->pipe_ctrl_out = usb_sndctrlpipe(udev, 0); tp->pipe_in = usb_rcvbulkpipe(udev, 1); tp->pipe_out = usb_sndbulkpipe(udev, 2); tp->pipe_intr = usb_rcvintpipe(udev, 3); switch (version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: tp->mii.supports_gmii = 0; break; default: tp->mii.supports_gmii = 1; break; } ret = rtl_ops_init(tp); if (ret) goto out; rtl_fw_init(tp); mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); tasklet_setup(&tp->tx_tl, bottom_half); tasklet_disable(&tp->tx_tl); netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; if (tp->version == RTL_VER_01) { netdev->features &= ~NETIF_F_RXCSUM; netdev->hw_features &= ~NETIF_F_RXCSUM; } tp->lenovo_macpassthru = rtl8152_supports_lenovo_macpassthru(udev); if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); tp->dell_tb_rx_agg_bug = 1; } netdev->ethtool_ops = &ops; netif_set_tso_max_size(netdev, RTL_LIMITED_TSO_SIZE); /* MTU range: 68 - 1500 or 9194 */ netdev->min_mtu = ETH_MIN_MTU; switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: netdev->max_mtu = size_to_mtu(9 * 1024); break; case RTL_VER_10: case RTL_VER_11: netdev->max_mtu = size_to_mtu(15 * 1024); break; case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: netdev->max_mtu = size_to_mtu(16 * 1024); break; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: default: netdev->max_mtu = ETH_DATA_LEN; break; } tp->mii.dev = netdev; tp->mii.mdio_read = read_mii_word; tp->mii.mdio_write = write_mii_word; tp->mii.phy_id_mask = 0x3f; tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; tp->autoneg = AUTONEG_ENABLE; tp->speed = SPEED_100; tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { if (tp->support_2500full && tp->udev->speed >= USB_SPEED_SUPER) { tp->speed = SPEED_2500; tp->advertising |= RTL_ADVERTISED_2500_FULL; } else { tp->speed = SPEED_1000; } tp->advertising |= RTL_ADVERTISED_1000_FULL; } tp->duplex = DUPLEX_FULL; tp->rx_copybreak = RTL8152_RXFG_HEADSZ; tp->rx_pending = 10 * RTL8152_MAX_RX; intf->needs_remote_wakeup = 1; if (!rtl_can_wakeup(tp)) __rtl_set_wol(tp, 0); else tp->saved_wolopts = __rtl_get_wol(tp); tp->rtl_ops.init(tp); #if IS_BUILTIN(CONFIG_USB_RTL8152) /* Retry in case request_firmware() is not ready yet. */ tp->rtl_fw.retry = true; #endif queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp, false); usb_set_intfdata(intf, tp); netif_napi_add(netdev, &tp->napi, r8152_poll); ret = register_netdev(netdev); if (ret != 0) { dev_err(&intf->dev, "couldn't register the device\n"); goto out1; } if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); else device_set_wakeup_enable(&udev->dev, false); /* If we saw a control transfer error while probing then we may * want to try probe() again. Consider this an error. */ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) goto out2; set_bit(PROBED_WITH_NO_ERRORS, &tp->flags); netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); return 0; out2: unregister_netdev(netdev); out1: tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); if (tp->rtl_ops.unload) tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); usb_set_intfdata(intf, NULL); out: if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) ret = -EAGAIN; free_netdev(netdev); return ret; } #define RTL8152_PROBE_TRIES 3 static int rtl8152_probe(struct usb_interface *intf, const struct usb_device_id *id) { u8 version; int ret; int i; if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) return -ENODEV; if (!rtl_check_vendor_ok(intf)) return -ENODEV; version = rtl8152_get_version(intf); if (version == RTL_VER_UNKNOWN) return -ENODEV; for (i = 0; i < RTL8152_PROBE_TRIES; i++) { ret = rtl8152_probe_once(intf, id, version); if (ret != -EAGAIN) break; } if (ret == -EAGAIN) { dev_err(&intf->dev, "r8152 failed probe after %d tries; giving up\n", i); return -ENODEV; } return ret; } static void rtl8152_disconnect(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (tp) { rtl_set_unplug(tp); unregister_netdev(tp->netdev); tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); if (tp->rtl_ops.unload) tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); free_netdev(tp->netdev); } } /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { /* Realtek */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) }, /* Microsoft */ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) }, { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3098) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) }, { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; MODULE_DEVICE_TABLE(usb, rtl8152_table); static struct usb_driver rtl8152_driver = { .name = MODULENAME, .id_table = rtl8152_table, .probe = rtl8152_probe, .disconnect = rtl8152_disconnect, .suspend = rtl8152_suspend, .resume = rtl8152_resume, .reset_resume = rtl8152_reset_resume, .pre_reset = rtl8152_pre_reset, .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev) { struct usb_host_config *c; int i, num_configs; /* Switch the device to vendor mode, if and only if the vendor mode * driver supports it. */ if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN) return -ENODEV; /* The vendor mode is not always config #1, so to find it out. */ c = udev->config; num_configs = udev->descriptor.bNumConfigurations; for (i = 0; i < num_configs; (i++, c++)) { struct usb_interface_descriptor *desc = NULL; if (!c->desc.bNumInterfaces) continue; desc = &c->intf_cache[0]->altsetting->desc; if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) break; } if (i == num_configs) return -ENODEV; return c->desc.bConfigurationValue; } static struct usb_device_driver rtl8152_cfgselector_driver = { .name = MODULENAME "-cfgselector", .choose_configuration = rtl8152_cfgselector_choose_configuration, .id_table = rtl8152_table, .generic_subclass = 1, .supports_autosuspend = 1, }; static int __init rtl8152_driver_init(void) { int ret; ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE); if (ret) return ret; return usb_register(&rtl8152_driver); } static void __exit rtl8152_driver_exit(void) { usb_deregister(&rtl8152_driver); usb_deregister_device_driver(&rtl8152_cfgselector_driver); } module_init(rtl8152_driver_init); module_exit(rtl8152_driver_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); |
| 2 1 1 2 5 5 5 5 5 6 6 1 6 6 5 6 4 2 5 5 6 4 2 2 1 1 1 5 6 6 6 6 6 6 6 6 1 6 1 6 2 6 6 4 4 3 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 | // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "btree_iter.h" #include "error.h" #include "journal.h" #include "recovery_passes.h" #include "super.h" #include "thread_with_file.h" #define FSCK_ERR_RATELIMIT_NR 10 bool bch2_inconsistent_error(struct bch_fs *c) { set_bit(BCH_FS_error, &c->flags); switch (c->opts.errors) { case BCH_ON_ERROR_continue: return false; case BCH_ON_ERROR_fix_safe: case BCH_ON_ERROR_ro: if (bch2_fs_emergency_read_only(c)) bch_err(c, "inconsistency detected - emergency read only at journal seq %llu", journal_cur_seq(&c->journal)); return true; case BCH_ON_ERROR_panic: panic(bch2_fmt(c, "panic after error")); return true; default: BUG(); } } int bch2_topology_error(struct bch_fs *c) { set_bit(BCH_FS_topology_error, &c->flags); if (!test_bit(BCH_FS_fsck_running, &c->flags)) { bch2_inconsistent_error(c); return -BCH_ERR_btree_need_topology_repair; } else { return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?: -BCH_ERR_btree_node_read_validate_error; } } void bch2_fatal_error(struct bch_fs *c) { if (bch2_fs_emergency_read_only(c)) bch_err(c, "fatal error - emergency read only"); } void bch2_io_error_work(struct work_struct *work) { struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work); struct bch_fs *c = ca->fs; bool dev; down_write(&c->state_lock); dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro, BCH_FORCE_IF_DEGRADED); if (dev ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro, BCH_FORCE_IF_DEGRADED) : bch2_fs_emergency_read_only(c)) bch_err(ca, "too many IO errors, setting %s RO", dev ? "device" : "filesystem"); up_write(&c->state_lock); } void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type) { atomic64_inc(&ca->errors[type]); //queue_work(system_long_wq, &ca->io_error_work); } enum ask_yn { YN_NO, YN_YES, YN_ALLNO, YN_ALLYES, }; static enum ask_yn parse_yn_response(char *buf) { buf = strim(buf); if (strlen(buf) == 1) switch (buf[0]) { case 'n': return YN_NO; case 'y': return YN_YES; case 'N': return YN_ALLNO; case 'Y': return YN_ALLYES; } return -1; } #ifdef __KERNEL__ static enum ask_yn bch2_fsck_ask_yn(struct bch_fs *c, struct btree_trans *trans) { struct stdio_redirect *stdio = c->stdio; if (c->stdio_filter && c->stdio_filter != current) stdio = NULL; if (!stdio) return YN_NO; if (trans) bch2_trans_unlock(trans); unsigned long unlock_long_at = trans ? jiffies + HZ * 2 : 0; darray_char line = {}; int ret; do { unsigned long t; bch2_print(c, " (y,n, or Y,N for all errors of this type) "); rewait: t = unlock_long_at ? max_t(long, unlock_long_at - jiffies, 0) : MAX_SCHEDULE_TIMEOUT; int r = bch2_stdio_redirect_readline_timeout(stdio, &line, t); if (r == -ETIME) { bch2_trans_unlock_long(trans); unlock_long_at = 0; goto rewait; } if (r < 0) { ret = YN_NO; break; } darray_last(line) = '\0'; } while ((ret = parse_yn_response(line.data)) < 0); darray_exit(&line); return ret; } #else #include "tools-util.h" static enum ask_yn bch2_fsck_ask_yn(struct bch_fs *c, struct btree_trans *trans) { char *buf = NULL; size_t buflen = 0; int ret; do { fputs(" (y,n, or Y,N for all errors of this type) ", stdout); fflush(stdout); if (getline(&buf, &buflen, stdin) < 0) die("error reading from standard input"); } while ((ret = parse_yn_response(buf)) < 0); free(buf); return ret; } #endif static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt) { struct fsck_err_state *s; if (!test_bit(BCH_FS_fsck_running, &c->flags)) return NULL; list_for_each_entry(s, &c->fsck_error_msgs, list) if (s->fmt == fmt) { /* * move it to the head of the list: repeated fsck errors * are common */ list_move(&s->list, &c->fsck_error_msgs); return s; } s = kzalloc(sizeof(*s), GFP_NOFS); if (!s) { if (!c->fsck_alloc_msgs_err) bch_err(c, "kmalloc err, cannot ratelimit fsck errs"); c->fsck_alloc_msgs_err = true; return NULL; } INIT_LIST_HEAD(&s->list); s->fmt = fmt; list_add(&s->list, &c->fsck_error_msgs); return s; } /* s/fix?/fixing/ s/recreate?/recreating/ */ static void prt_actioning(struct printbuf *out, const char *action) { unsigned len = strlen(action); BUG_ON(action[len - 1] != '?'); --len; if (action[len - 1] == 'e') --len; prt_bytes(out, action, len); prt_str(out, "ing"); } static const u8 fsck_flags_extra[] = { #define x(t, n, flags) [BCH_FSCK_ERR_##t] = flags, BCH_SB_ERRS() #undef x }; int __bch2_fsck_err(struct bch_fs *c, struct btree_trans *trans, enum bch_fsck_flags flags, enum bch_sb_error_id err, const char *fmt, ...) { struct fsck_err_state *s = NULL; va_list args; bool print = true, suppressing = false, inconsistent = false; struct printbuf buf = PRINTBUF, *out = &buf; int ret = -BCH_ERR_fsck_ignore; const char *action_orig = "fix?", *action = action_orig; might_sleep(); if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra))) flags |= fsck_flags_extra[err]; if (!c) c = trans->c; /* * Ugly: if there's a transaction in the current task it has to be * passed in to unlock if we prompt for user input. * * But, plumbing a transaction and transaction restarts into * bkey_validate() is problematic. * * So: * - make all bkey errors AUTOFIX, they're simple anyways (we just * delete the key) * - and we don't need to warn if we're not prompting */ WARN_ON((flags & FSCK_CAN_FIX) && !(flags & FSCK_AUTOFIX) && !trans && bch2_current_has_btree_trans(c)); if ((flags & FSCK_CAN_FIX) && test_bit(err, c->sb.errors_silent)) return -BCH_ERR_fsck_fix; bch2_sb_error_count(c, err); va_start(args, fmt); prt_vprintf(out, fmt, args); va_end(args); /* Custom fix/continue/recreate/etc.? */ if (out->buf[out->pos - 1] == '?') { const char *p = strrchr(out->buf, ','); if (p) { out->pos = p - out->buf; action = kstrdup(p + 2, GFP_KERNEL); if (!action) { ret = -ENOMEM; goto err; } } } mutex_lock(&c->fsck_error_msgs_lock); s = fsck_err_get(c, fmt); if (s) { /* * We may be called multiple times for the same error on * transaction restart - this memoizes instead of asking the user * multiple times for the same error: */ if (s->last_msg && !strcmp(buf.buf, s->last_msg)) { ret = s->ret; mutex_unlock(&c->fsck_error_msgs_lock); goto err; } kfree(s->last_msg); s->last_msg = kstrdup(buf.buf, GFP_KERNEL); if (!s->last_msg) { mutex_unlock(&c->fsck_error_msgs_lock); ret = -ENOMEM; goto err; } if (c->opts.ratelimit_errors && !(flags & FSCK_NO_RATELIMIT) && s->nr >= FSCK_ERR_RATELIMIT_NR) { if (s->nr == FSCK_ERR_RATELIMIT_NR) suppressing = true; else print = false; } s->nr++; } #ifdef BCACHEFS_LOG_PREFIX if (!strncmp(fmt, "bcachefs:", 9)) prt_printf(out, bch2_log_msg(c, "")); #endif if ((flags & FSCK_CAN_FIX) && (flags & FSCK_AUTOFIX) && (c->opts.errors == BCH_ON_ERROR_continue || c->opts.errors == BCH_ON_ERROR_fix_safe)) { prt_str(out, ", "); prt_actioning(out, action); ret = -BCH_ERR_fsck_fix; } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) { if (c->opts.errors != BCH_ON_ERROR_continue || !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { prt_str(out, ", shutting down"); inconsistent = true; ret = -BCH_ERR_fsck_errors_not_fixed; } else if (flags & FSCK_CAN_FIX) { prt_str(out, ", "); prt_actioning(out, action); ret = -BCH_ERR_fsck_fix; } else { prt_str(out, ", continuing"); ret = -BCH_ERR_fsck_ignore; } } else if (c->opts.fix_errors == FSCK_FIX_exit) { prt_str(out, ", exiting"); ret = -BCH_ERR_fsck_errors_not_fixed; } else if (flags & FSCK_CAN_FIX) { int fix = s && s->fix ? s->fix : c->opts.fix_errors; if (fix == FSCK_FIX_ask) { prt_str(out, ", "); prt_str(out, action); if (bch2_fs_stdio_redirect(c)) bch2_print(c, "%s", out->buf); else bch2_print_string_as_lines(KERN_ERR, out->buf); print = false; int ask = bch2_fsck_ask_yn(c, trans); if (trans) { ret = bch2_trans_relock(trans); if (ret) { mutex_unlock(&c->fsck_error_msgs_lock); goto err; } } if (ask >= YN_ALLNO && s) s->fix = ask == YN_ALLNO ? FSCK_FIX_no : FSCK_FIX_yes; ret = ask & 1 ? -BCH_ERR_fsck_fix : -BCH_ERR_fsck_ignore; } else if (fix == FSCK_FIX_yes || (c->opts.nochanges && !(flags & FSCK_CAN_IGNORE))) { prt_str(out, ", "); prt_actioning(out, action); ret = -BCH_ERR_fsck_fix; } else { prt_str(out, ", not "); prt_actioning(out, action); } } else if (flags & FSCK_NEED_FSCK) { prt_str(out, " (run fsck to correct)"); } else { prt_str(out, " (repair unimplemented)"); } if (ret == -BCH_ERR_fsck_ignore && (c->opts.fix_errors == FSCK_FIX_exit || !(flags & FSCK_CAN_IGNORE))) ret = -BCH_ERR_fsck_errors_not_fixed; bool exiting = test_bit(BCH_FS_fsck_running, &c->flags) && (ret != -BCH_ERR_fsck_fix && ret != -BCH_ERR_fsck_ignore); if (exiting) print = true; if (print) { if (bch2_fs_stdio_redirect(c)) bch2_print(c, "%s\n", out->buf); else bch2_print_string_as_lines(KERN_ERR, out->buf); } if (exiting) bch_err(c, "Unable to continue, halting"); else if (suppressing) bch_err(c, "Ratelimiting new instances of previous error"); if (s) s->ret = ret; mutex_unlock(&c->fsck_error_msgs_lock); if (inconsistent) bch2_inconsistent_error(c); if (ret == -BCH_ERR_fsck_fix) { set_bit(BCH_FS_errors_fixed, &c->flags); } else { set_bit(BCH_FS_errors_not_fixed, &c->flags); set_bit(BCH_FS_error, &c->flags); } err: if (action != action_orig) kfree(action); printbuf_exit(&buf); return ret; } int __bch2_bkey_fsck_err(struct bch_fs *c, struct bkey_s_c k, enum bch_validate_flags validate_flags, enum bch_sb_error_id err, const char *fmt, ...) { if (validate_flags & BCH_VALIDATE_silent) return -BCH_ERR_fsck_delete_bkey; unsigned fsck_flags = 0; if (!(validate_flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit))) fsck_flags |= FSCK_AUTOFIX|FSCK_CAN_FIX; struct printbuf buf = PRINTBUF; va_list args; prt_str(&buf, "invalid bkey "); bch2_bkey_val_to_text(&buf, c, k); prt_str(&buf, "\n "); va_start(args, fmt); prt_vprintf(&buf, fmt, args); va_end(args); prt_str(&buf, ": delete?"); int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s", buf.buf); printbuf_exit(&buf); return ret; } void bch2_flush_fsck_errs(struct bch_fs *c) { struct fsck_err_state *s, *n; mutex_lock(&c->fsck_error_msgs_lock); list_for_each_entry_safe(s, n, &c->fsck_error_msgs, list) { if (s->ratelimited && s->last_msg) bch_err(c, "Saw %llu errors like:\n %s", s->nr, s->last_msg); list_del(&s->list); kfree(s->last_msg); kfree(s); } mutex_unlock(&c->fsck_error_msgs_lock); } |
| 5 11 11 11 11 11 11 11 11 1 11 11 1 735 738 7 7 7 2 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 1 2 2 2 2 1 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1134 626 233 46 1131 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> #include <net/busy_poll.h> #include <net/net_namespace.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/sock.h> #include <net/xdp.h> #include <net/xdp_sock.h> #include "dev.h" #include "devmem.h" #include "netdev-genl-gen.h" struct netdev_nl_dump_ctx { unsigned long ifindex; unsigned int rxq_idx; unsigned int txq_idx; unsigned int napi_id; }; static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) { NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); return (struct netdev_nl_dump_ctx *)cb->ctx; } static int netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { u64 xsk_features = 0; u64 xdp_rx_meta = 0; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ xdp_rx_meta |= flag; XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC if (netdev->xsk_tx_metadata_ops) { if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; } if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, netdev->xdp_features, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, xdp_rx_meta, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, xsk_features, NETDEV_A_DEV_PAD)) goto err_cancel_msg; if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, netdev->xdp_zc_max_segs)) goto err_cancel_msg; } genlmsg_end(rsp, hdr); return 0; err_cancel_msg: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static void netdev_genl_dev_notify(struct net_device *netdev, int cmd) { struct genl_info info; struct sk_buff *ntf; if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), NETDEV_NLGRP_MGMT)) return; genl_info_init_ntf(&info, &netdev_nl_family, cmd); ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!ntf) return; if (netdev_nl_dev_fill(netdev, ntf, &info)) { nlmsg_free(ntf); return; } genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); } int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net_device *netdev; struct sk_buff *rsp; u32 ifindex; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; rtnl_lock(); netdev = __dev_get_by_index(genl_info_net(info), ifindex); if (netdev) err = netdev_nl_dev_fill(netdev, rsp, info); else err = -ENODEV; rtnl_unlock(); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; int err = 0; rtnl_lock(); for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); if (err < 0) break; } rtnl_unlock(); return err; } static int netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, const struct genl_info *info) { unsigned long irq_suspend_timeout; unsigned long gro_flush_timeout; u32 napi_defer_hard_irqs; void *hdr; pid_t pid; if (!(napi->dev->flags & IFF_UP)) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) goto nla_put_failure; if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) goto nla_put_failure; if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) goto nla_put_failure; if (napi->thread) { pid = task_pid_nr(napi->thread); if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) goto nla_put_failure; } napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, napi_defer_hard_irqs)) goto nla_put_failure; irq_suspend_timeout = napi_get_irq_suspend_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, irq_suspend_timeout)) goto nla_put_failure; gro_flush_timeout = napi_get_gro_flush_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, gro_flush_timeout)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; struct sk_buff *rsp; u32 napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; rtnl_lock(); rcu_read_lock(); napi = netdev_napi_by_id(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_fill_one(rsp, napi, info); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } rcu_read_unlock(); rtnl_unlock(); if (err) { goto err_free_msg; } else if (!rsp->len) { err = -ENOENT; goto err_free_msg; } return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { struct napi_struct *napi; int err = 0; if (!(netdev->flags & IFF_UP)) return err; list_for_each_entry(napi, &netdev->napi_list, dev_list) { if (napi->napi_id < MIN_NAPI_ID) continue; if (ctx->napi_id && napi->napi_id >= ctx->napi_id) continue; err = netdev_nl_napi_fill_one(rsp, napi, info); if (err) return err; ctx->napi_id = napi->napi_id; } return err; } int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_NAPI_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); rtnl_lock(); if (ifindex) { netdev = __dev_get_by_index(net, ifindex); if (netdev) err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); else err = -ENODEV; } else { for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->napi_id = 0; } } rtnl_unlock(); return err; } static int netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) { u64 irq_suspend_timeout = 0; u64 gro_flush_timeout = 0; u32 defer = 0; if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); napi_set_defer_hard_irqs(napi, defer); } if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) { irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]); napi_set_irq_suspend_timeout(napi, irq_suspend_timeout); } if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) { gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]); napi_set_gro_flush_timeout(napi, gro_flush_timeout); } return 0; } int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; unsigned int napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); rtnl_lock(); rcu_read_lock(); napi = netdev_napi_by_id(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_set_config(napi, info); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } rcu_read_unlock(); rtnl_unlock(); return err; } static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { struct net_devmem_dmabuf_binding *binding; struct netdev_rx_queue *rxq; struct netdev_queue *txq; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: rxq = __netif_get_rx_queue(netdev, q_idx); if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, rxq->napi->napi_id)) goto nla_put_failure; binding = rxq->mp_params.mp_priv; if (binding && nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id)) goto nla_put_failure; break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, txq->napi->napi_id)) goto nla_put_failure; } genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, u32 q_type) { switch (q_type) { case NETDEV_QUEUE_TYPE_RX: if (q_id >= netdev->real_num_rx_queues) return -EINVAL; return 0; case NETDEV_QUEUE_TYPE_TX: if (q_id >= netdev->real_num_tx_queues) return -EINVAL; } return 0; } static int netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { int err; if (!(netdev->flags & IFF_UP)) return -ENOENT; err = netdev_nl_queue_validate(netdev, q_idx, q_type); if (err) return err; return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); } int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) { u32 q_id, q_type, ifindex; struct net_device *netdev; struct sk_buff *rsp; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) return -EINVAL; q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; rtnl_lock(); netdev = __dev_get_by_index(genl_info_net(info), ifindex); if (netdev) err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); else err = -ENODEV; rtnl_unlock(); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { int err = 0; if (!(netdev->flags & IFF_UP)) return err; for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, NETDEV_QUEUE_TYPE_RX, info); if (err) return err; } for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, NETDEV_QUEUE_TYPE_TX, info); if (err) return err; } return err; } int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rtnl_lock(); if (ifindex) { netdev = __dev_get_by_index(net, ifindex); if (netdev) err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); else err = -ENODEV; } else { for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->rxq_idx = 0; ctx->txq_idx = 0; } } rtnl_unlock(); return err; } #define NETDEV_STAT_NOT_SET (~0ULL) static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) { const u64 *add = _add; u64 *sum = _sum; while (size) { if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) *sum += *add; sum++; add++; size -= 8; } } static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) { if (value == NETDEV_STAT_NOT_SET) return 0; return nla_put_uint(rsp, attr_id, value); } static int netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, u32 q_type, int i, const struct genl_info *info) { const struct netdev_stat_ops *ops = netdev->stat_ops; struct netdev_queue_stats_rx rx; struct netdev_queue_stats_tx tx; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: memset(&rx, 0xff, sizeof(rx)); ops->get_queue_stats_rx(netdev, i, &rx); if (!memchr_inv(&rx, 0xff, sizeof(rx))) goto nla_cancel; if (netdev_nl_stats_write_rx(rsp, &rx)) goto nla_put_failure; break; case NETDEV_QUEUE_TYPE_TX: memset(&tx, 0xff, sizeof(tx)); ops->get_queue_stats_tx(netdev, i, &tx); if (!memchr_inv(&tx, 0xff, sizeof(tx))) goto nla_cancel; if (netdev_nl_stats_write_tx(rsp, &tx)) goto nla_put_failure; break; } genlmsg_end(rsp, hdr); return 0; nla_cancel: genlmsg_cancel(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { const struct netdev_stat_ops *ops = netdev->stat_ops; int i, err; if (!(netdev->flags & IFF_UP)) return 0; i = ctx->rxq_idx; while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, i, info); if (err) return err; ctx->rxq_idx = ++i; } i = ctx->txq_idx; while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, i, info); if (err) return err; ctx->txq_idx = ++i; } ctx->rxq_idx = 0; ctx->txq_idx = 0; return 0; } static int netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { struct netdev_queue_stats_rx rx_sum, rx; struct netdev_queue_stats_tx tx_sum, tx; const struct netdev_stat_ops *ops; void *hdr; int i; ops = netdev->stat_ops; /* Netdev can't guarantee any complete counters */ if (!ops->get_base_stats) return 0; memset(&rx_sum, 0xff, sizeof(rx_sum)); memset(&tx_sum, 0xff, sizeof(tx_sum)); ops->get_base_stats(netdev, &rx_sum, &tx_sum); /* The op was there, but nothing reported, don't bother */ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) goto nla_put_failure; for (i = 0; i < netdev->real_num_rx_queues; i++) { memset(&rx, 0xff, sizeof(rx)); if (ops->get_queue_stats_rx) ops->get_queue_stats_rx(netdev, i, &rx); netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); } for (i = 0; i < netdev->real_num_tx_queues; i++) { memset(&tx, 0xff, sizeof(tx)); if (ops->get_queue_stats_tx) ops->get_queue_stats_tx(netdev, i, &tx); netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); } if (netdev_nl_stats_write_rx(rsp, &rx_sum) || netdev_nl_stats_write_tx(rsp, &tx_sum)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, struct sk_buff *skb, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { if (!netdev->stat_ops) return 0; switch (scope) { case 0: return netdev_nl_stats_by_netdev(netdev, skb, info); case NETDEV_QSTATS_SCOPE_QUEUE: return netdev_nl_stats_by_queue(netdev, skb, info, ctx); } return -EINVAL; /* Should not happen, per netlink policy */ } int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; unsigned int ifindex; unsigned int scope; int err = 0; scope = 0; if (info->attrs[NETDEV_A_QSTATS_SCOPE]) scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); ifindex = 0; if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); rtnl_lock(); if (ifindex) { netdev = __dev_get_by_index(net, ifindex); if (netdev && netdev->stat_ops) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QSTATS_IFINDEX]); err = netdev ? -EOPNOTSUPP : -ENODEV; } } else { for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); if (err < 0) break; } } rtnl_unlock(); return err; } int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; struct net_devmem_dmabuf_binding *binding; struct list_head *sock_binding_list; u32 ifindex, dmabuf_fd, rxq_idx; struct net_device *netdev; struct sk_buff *rsp; struct nlattr *attr; int rem, err = 0; void *hdr; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); sock_binding_list = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); if (IS_ERR(sock_binding_list)) return PTR_ERR(sock_binding_list); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; hdr = genlmsg_iput(rsp, info); if (!hdr) { err = -EMSGSIZE; goto err_genlmsg_free; } rtnl_lock(); netdev = __dev_get_by_index(genl_info_net(info), ifindex); if (!netdev || !netif_device_present(netdev)) { err = -ENODEV; goto err_unlock; } if (dev_xdp_prog_count(netdev)) { NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached"); err = -EEXIST; goto err_unlock; } binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); goto err_unlock; } nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, genlmsg_data(info->genlhdr), genlmsg_len(info->genlhdr), rem) { err = nla_parse_nested( tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, netdev_queue_id_nl_policy, info->extack); if (err < 0) goto err_unbind; if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { err = -EINVAL; goto err_unbind; } if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); err = -EINVAL; goto err_unbind; } rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, info->extack); if (err) goto err_unbind; } list_add(&binding->list, sock_binding_list); nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); err = genlmsg_reply(rsp, info); if (err) goto err_unbind; rtnl_unlock(); return 0; err_unbind: net_devmem_unbind_dmabuf(binding); err_unlock: rtnl_unlock(); err_genlmsg_free: nlmsg_free(rsp); return err; } void netdev_nl_sock_priv_init(struct list_head *priv) { INIT_LIST_HEAD(priv); } void netdev_nl_sock_priv_destroy(struct list_head *priv) { struct net_devmem_dmabuf_binding *binding; struct net_devmem_dmabuf_binding *temp; list_for_each_entry_safe(binding, temp, priv, list) { rtnl_lock(); net_devmem_unbind_dmabuf(binding); rtnl_unlock(); } } static int netdev_genl_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_REGISTER: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); break; case NETDEV_UNREGISTER: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); break; case NETDEV_XDP_FEAT_CHANGE: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); break; } return NOTIFY_OK; } static struct notifier_block netdev_genl_nb = { .notifier_call = netdev_genl_netdevice_event, }; static int __init netdev_genl_init(void) { int err; err = register_netdevice_notifier(&netdev_genl_nb); if (err) return err; err = genl_register_family(&netdev_nl_family); if (err) goto err_unreg_ntf; return 0; err_unreg_ntf: unregister_netdevice_notifier(&netdev_genl_nb); return err; } subsys_initcall(netdev_genl_init); |
| 5 3 5 5 1 5 3 2 2 2 5 1 2 1 2 2 1 4 2 5 5 5 2 1 5 2 5 5 5 5 5 5 2 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com) * */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/module.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include "pegasus.h" /* * Version Information */ #define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>" #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver" static const char driver_name[] = "pegasus"; #undef PEGASUS_WRITE_EEPROM #define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ BMSR_100FULL | BMSR_ANEGCAPABLE) #define CARRIER_CHECK_DELAY (2 * HZ) static bool loopback; static bool mii_mode; static char *devid; static struct usb_eth_dev usb_dev_id[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.name = pn, .vendor = vid, .device = pid, .private = flags}, #define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ PEGASUS_DEV(pn, vid, pid, flags) #include "pegasus.h" #undef PEGASUS_DEV #undef PEGASUS_DEV_CLASS {NULL, 0, 0, 0}, {NULL, 0, 0, 0} }; static struct usb_device_id pegasus_ids[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, /* * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to * ignore adaptors belonging to the "Wireless" class 0xE0. For this one * case anyway, seeing as the pegasus is for "Wired" adaptors. */ #define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \ .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass}, #include "pegasus.h" #undef PEGASUS_DEV #undef PEGASUS_DEV_CLASS {}, {} }; MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(loopback, bool, 0); module_param(mii_mode, bool, 0); module_param(devid, charp, 0); MODULE_PARM_DESC(loopback, "Enable MAC loopback mode (bit 0)"); MODULE_PARM_DESC(mii_mode, "Enable HomePNA mode (bit 0),default=MII mode = 0"); MODULE_PARM_DESC(devid, "The format is: 'DEV_name:VendorID:DeviceID:Flags'"); /* use ethtool to change the level for any given device */ static int msg_level = -1; module_param(msg_level, int, 0); MODULE_PARM_DESC(msg_level, "Override default message level"); MODULE_DEVICE_TABLE(usb, pegasus_ids); static const struct net_device_ops pegasus_netdev_ops; /*****/ static void async_ctrl_callback(struct urb *urb) { struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; int status = urb->status; if (status < 0) dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status); kfree(req); usb_free_urb(urb); } static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { return usb_control_msg_recv(pegasus->usb, 0, PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, indx, data, size, 1000, GFP_NOIO); } static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, const void *data) { int ret; ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, indx, data, size, 1000, GFP_NOIO); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret); return ret; } /* * There is only one way to write to a single ADM8511 register and this is via * specific control request. 'data' is ignored by the device, but it is here to * not break the API. */ static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { void *buf = &data; int ret; ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, indx, buf, 1, 1000, GFP_NOIO); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret); return ret; } static int update_eth_regs_async(pegasus_t *pegasus) { int ret = -ENOMEM; struct urb *async_urb; struct usb_ctrlrequest *req; req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); if (req == NULL) return ret; async_urb = usb_alloc_urb(0, GFP_ATOMIC); if (async_urb == NULL) { kfree(req); return ret; } req->bRequestType = PEGASUS_REQT_WRITE; req->bRequest = PEGASUS_REQ_SET_REGS; req->wValue = cpu_to_le16(0); req->wIndex = cpu_to_le16(EthCtrl0); req->wLength = cpu_to_le16(3); usb_fill_control_urb(async_urb, pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), (void *)req, pegasus->eth_regs, 3, async_ctrl_callback, req); ret = usb_submit_urb(async_urb, GFP_ATOMIC); if (ret) { if (ret == -ENODEV) netif_device_detach(pegasus->net); netif_err(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); } return ret; } static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd) { int i, ret; __le16 regdi; __u8 data[4] = { phy, 0, 0, indx }; if (cmd & PHY_WRITE) { __le16 *t = (__le16 *) & data[1]; *t = cpu_to_le16(*regd); } set_register(p, PhyCtrl, 0); set_registers(p, PhyAddr, sizeof(data), data); set_register(p, PhyCtrl, (indx | cmd)); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(p, PhyCtrl, 1, data); if (ret < 0) goto fail; if (data[0] & PHY_DONE) break; } if (i >= REG_TIMEOUT) { ret = -ETIMEDOUT; goto fail; } if (cmd & PHY_READ) { ret = get_registers(p, PhyData, 2, ®di); if (ret < 0) goto fail; *regd = le16_to_cpu(regdi); } return 0; fail: netif_dbg(p, drv, p->net, "%s failed\n", __func__); return ret; } /* Returns non-negative int on success, error on failure */ static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd) { return __mii_op(pegasus, phy, indx, regd, PHY_READ); } /* Returns zero on success, error on failure */ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd) { return __mii_op(pegasus, phy, indx, regd, PHY_WRITE); } static int mdio_read(struct net_device *dev, int phy_id, int loc) { pegasus_t *pegasus = netdev_priv(dev); int ret; u16 res; ret = read_mii_word(pegasus, phy_id, loc, &res); if (ret < 0) return ret; return (int)res; } static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) { pegasus_t *pegasus = netdev_priv(dev); u16 data = val; write_mii_word(pegasus, phy_id, loc, &data); } static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) { int ret, i; __le16 retdatai; __u8 tmp = 0; set_register(pegasus, EpromCtrl, 0); set_register(pegasus, EpromOffset, index); set_register(pegasus, EpromCtrl, EPROM_READ); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, EpromCtrl, 1, &tmp); if (ret < 0) goto fail; if (tmp & EPROM_DONE) break; } if (i >= REG_TIMEOUT) { ret = -ETIMEDOUT; goto fail; } ret = get_registers(pegasus, EpromData, 2, &retdatai); if (ret < 0) goto fail; *retdata = le16_to_cpu(retdatai); return ret; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; } #ifdef PEGASUS_WRITE_EEPROM static inline void enable_eprom_write(pegasus_t *pegasus) { __u8 tmp; get_registers(pegasus, EthCtrl2, 1, &tmp); set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE); } static inline void disable_eprom_write(pegasus_t *pegasus) { __u8 tmp; get_registers(pegasus, EthCtrl2, 1, &tmp); set_register(pegasus, EpromCtrl, 0); set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE); } static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data) { int i; __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; int ret; __le16 le_data = cpu_to_le16(data); set_registers(pegasus, EpromOffset, 4, d); enable_eprom_write(pegasus); set_register(pegasus, EpromOffset, index); set_registers(pegasus, EpromData, 2, &le_data); set_register(pegasus, EpromCtrl, EPROM_WRITE); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, EpromCtrl, 1, &tmp); if (ret == -ESHUTDOWN) goto fail; if (tmp & EPROM_DONE) break; } disable_eprom_write(pegasus); if (i >= REG_TIMEOUT) goto fail; return ret; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return -ETIMEDOUT; } #endif /* PEGASUS_WRITE_EEPROM */ static inline int get_node_id(pegasus_t *pegasus, u8 *id) { int i, ret; u16 w16; for (i = 0; i < 3; i++) { ret = read_eprom_word(pegasus, i, &w16); if (ret < 0) return ret; ((__le16 *) id)[i] = cpu_to_le16(w16); } return 0; } static void set_ethernet_addr(pegasus_t *pegasus) { int ret; u8 node_id[6]; if (pegasus->features & PEGASUS_II) { ret = get_registers(pegasus, 0x10, sizeof(node_id), node_id); if (ret < 0) goto err; } else { ret = get_node_id(pegasus, node_id); if (ret < 0) goto err; ret = set_registers(pegasus, EthID, sizeof(node_id), node_id); if (ret < 0) goto err; } eth_hw_addr_set(pegasus->net, node_id); return; err: eth_hw_addr_random(pegasus->net); netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n"); return; } static inline int reset_mac(pegasus_t *pegasus) { int ret, i; __u8 data = 0x8; set_register(pegasus, EthCtrl1, data); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, EthCtrl1, 1, &data); if (ret < 0) goto fail; if (~data & 0x08) { if (loopback) break; if (mii_mode && (pegasus->features & HAS_HOME_PNA)) set_register(pegasus, Gpio1, 0x34); else set_register(pegasus, Gpio1, 0x26); set_register(pegasus, Gpio0, pegasus->features); set_register(pegasus, Gpio0, DEFAULT_GPIO_SET); break; } } if (i == REG_TIMEOUT) return -ETIMEDOUT; if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS || usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { set_register(pegasus, Gpio0, 0x24); set_register(pegasus, Gpio0, 0x26); } if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) { __u16 auxmode; ret = read_mii_word(pegasus, 3, 0x1b, &auxmode); if (ret < 0) goto fail; auxmode |= 4; write_mii_word(pegasus, 3, 0x1b, &auxmode); } return 0; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; } static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) { pegasus_t *pegasus = netdev_priv(dev); int ret; __u16 linkpart; __u8 data[4]; ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); if (ret < 0) goto fail; data[0] = 0xc8; /* TX & RX enable, append status, no CRC */ data[1] = 0; if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) data[1] |= 0x20; /* set full duplex */ if (linkpart & (ADVERTISE_100FULL | ADVERTISE_100HALF)) data[1] |= 0x10; /* set 100 Mbps */ if (mii_mode) data[1] = 0; data[2] = loopback ? 0x09 : 0x01; memcpy(pegasus->eth_regs, data, sizeof(data)); ret = set_registers(pegasus, EthCtrl0, 3, data); if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS || usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 || usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { u16 auxmode; ret = read_mii_word(pegasus, 0, 0x1b, &auxmode); if (ret < 0) goto fail; auxmode |= 4; write_mii_word(pegasus, 0, 0x1b, &auxmode); } return ret; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; } static void read_bulk_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; struct net_device *net; u8 *buf = urb->transfer_buffer; int rx_status, count = urb->actual_length; int status = urb->status; __u16 pkt_len; if (!pegasus) return; net = pegasus->net; if (!netif_device_present(net) || !netif_running(net)) return; switch (status) { case 0: break; case -ETIME: netif_dbg(pegasus, rx_err, net, "reset MAC\n"); pegasus->flags &= ~PEGASUS_RX_BUSY; break; case -EPIPE: /* stall, or disconnect from TT */ /* FIXME schedule work to clear the halt */ netif_warn(pegasus, rx_err, net, "no rx stall recovery\n"); return; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: netif_dbg(pegasus, ifdown, net, "rx unlink, %d\n", status); return; default: netif_dbg(pegasus, rx_err, net, "RX status %d\n", status); goto goon; } if (count < 4) goto goon; rx_status = buf[count - 2]; if (rx_status & 0x1c) { netif_dbg(pegasus, rx_err, net, "RX packet error %x\n", rx_status); net->stats.rx_errors++; if (rx_status & 0x04) /* runt */ net->stats.rx_length_errors++; if (rx_status & 0x08) net->stats.rx_crc_errors++; if (rx_status & 0x10) /* extra bits */ net->stats.rx_frame_errors++; goto goon; } if (pegasus->chip == 0x8513) { pkt_len = le32_to_cpu(*(__le32 *)urb->transfer_buffer); pkt_len &= 0x0fff; pegasus->rx_skb->data += 2; } else { pkt_len = buf[count - 3] << 8; pkt_len += buf[count - 4]; pkt_len &= 0xfff; pkt_len -= 4; } /* * If the packet is unreasonably long, quietly drop it rather than * kernel panicing by calling skb_put. */ if (pkt_len > PEGASUS_MTU) goto goon; /* * at this point we are sure pegasus->rx_skb != NULL * so we go ahead and pass up the packet. */ skb_put(pegasus->rx_skb, pkt_len); pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net); netif_rx(pegasus->rx_skb); net->stats.rx_packets++; net->stats.rx_bytes += pkt_len; if (pegasus->flags & PEGASUS_UNPLUG) return; pegasus->rx_skb = __netdev_alloc_skb_ip_align(pegasus->net, PEGASUS_MTU, GFP_ATOMIC); if (pegasus->rx_skb == NULL) goto tl_sched; goon: usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), pegasus->rx_skb->data, PEGASUS_MTU, read_bulk_callback, pegasus); rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); if (rx_status == -ENODEV) netif_device_detach(pegasus->net); else if (rx_status) { pegasus->flags |= PEGASUS_RX_URB_FAIL; goto tl_sched; } else { pegasus->flags &= ~PEGASUS_RX_URB_FAIL; } return; tl_sched: tasklet_schedule(&pegasus->rx_tl); } static void rx_fixup(struct tasklet_struct *t) { pegasus_t *pegasus = from_tasklet(pegasus, t, rx_tl); int status; if (pegasus->flags & PEGASUS_UNPLUG) return; if (pegasus->flags & PEGASUS_RX_URB_FAIL) if (pegasus->rx_skb) goto try_again; if (pegasus->rx_skb == NULL) pegasus->rx_skb = __netdev_alloc_skb_ip_align(pegasus->net, PEGASUS_MTU, GFP_ATOMIC); if (pegasus->rx_skb == NULL) { netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n"); tasklet_schedule(&pegasus->rx_tl); return; } usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), pegasus->rx_skb->data, PEGASUS_MTU, read_bulk_callback, pegasus); try_again: status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); if (status == -ENODEV) netif_device_detach(pegasus->net); else if (status) { pegasus->flags |= PEGASUS_RX_URB_FAIL; tasklet_schedule(&pegasus->rx_tl); } else { pegasus->flags &= ~PEGASUS_RX_URB_FAIL; } } static void write_bulk_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; struct net_device *net; int status = urb->status; if (!pegasus) return; net = pegasus->net; if (!netif_device_present(net) || !netif_running(net)) return; switch (status) { case -EPIPE: /* FIXME schedule_work() to clear the tx halt */ netif_stop_queue(net); netif_warn(pegasus, tx_err, net, "no tx stall recovery\n"); return; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: netif_dbg(pegasus, ifdown, net, "tx unlink, %d\n", status); return; default: netif_info(pegasus, tx_err, net, "TX status %d\n", status); fallthrough; case 0: break; } netif_trans_update(net); /* prevent tx timeout */ netif_wake_queue(net); } static void intr_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; struct net_device *net; int res, status = urb->status; if (!pegasus) return; net = pegasus->net; switch (status) { case 0: break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; default: /* some Pegasus-I products report LOTS of data * toggle errors... avoid log spamming */ netif_dbg(pegasus, timer, net, "intr status %d\n", status); } if (urb->actual_length >= 6) { u8 *d = urb->transfer_buffer; /* byte 0 == tx_status1, reg 2B */ if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL |LATE_COL|JABBER_TIMEOUT)) { net->stats.tx_errors++; if (d[0] & TX_UNDERRUN) net->stats.tx_fifo_errors++; if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT)) net->stats.tx_aborted_errors++; if (d[0] & LATE_COL) net->stats.tx_window_errors++; } /* d[5].LINK_STATUS lies on some adapters. * d[0].NO_CARRIER kicks in only with failed TX. * ... so monitoring with MII may be safest. */ /* bytes 3-4 == rx_lostpkt, reg 2E/2F */ net->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; } res = usb_submit_urb(urb, GFP_ATOMIC); if (res == -ENODEV) netif_device_detach(pegasus->net); if (res) netif_err(pegasus, timer, net, "can't resubmit interrupt urb, %d\n", res); } static void pegasus_tx_timeout(struct net_device *net, unsigned int txqueue) { pegasus_t *pegasus = netdev_priv(net); netif_warn(pegasus, timer, net, "tx timeout\n"); usb_unlink_urb(pegasus->tx_urb); net->stats.tx_errors++; } static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb, struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); int count = ((skb->len + 2) & 0x3f) ? skb->len + 2 : skb->len + 3; int res; __u16 l16 = skb->len; netif_stop_queue(net); ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len); usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, usb_sndbulkpipe(pegasus->usb, 2), pegasus->tx_buff, count, write_bulk_callback, pegasus); if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) { netif_warn(pegasus, tx_err, net, "fail tx, %d\n", res); switch (res) { case -EPIPE: /* stall, or disconnect from TT */ /* cleanup should already have been scheduled */ break; case -ENODEV: /* disconnect() upcoming */ case -EPERM: netif_device_detach(pegasus->net); break; default: net->stats.tx_errors++; netif_start_queue(net); } } else { net->stats.tx_packets++; net->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static inline void disable_net_traffic(pegasus_t *pegasus) { __le16 tmp = cpu_to_le16(0); set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); } static inline int get_interrupt_interval(pegasus_t *pegasus) { u16 data; u8 interval; int ret; ret = read_eprom_word(pegasus, 4, &data); if (ret < 0) return ret; interval = data >> 8; if (pegasus->usb->speed != USB_SPEED_HIGH) { if (interval < 0x80) { netif_info(pegasus, timer, pegasus->net, "intr interval changed from %ums to %ums\n", interval, 0x80); interval = 0x80; data = (data & 0x00FF) | ((u16)interval << 8); #ifdef PEGASUS_WRITE_EEPROM write_eprom_word(pegasus, 4, data); #endif } } pegasus->intr_interval = interval; return 0; } static void set_carrier(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); u16 tmp; if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) return; if (tmp & BMSR_LSTATUS) netif_carrier_on(net); else netif_carrier_off(net); } static void free_all_urbs(pegasus_t *pegasus) { usb_free_urb(pegasus->intr_urb); usb_free_urb(pegasus->tx_urb); usb_free_urb(pegasus->rx_urb); } static void unlink_all_urbs(pegasus_t *pegasus) { usb_kill_urb(pegasus->intr_urb); usb_kill_urb(pegasus->tx_urb); usb_kill_urb(pegasus->rx_urb); } static int alloc_urbs(pegasus_t *pegasus) { int res = -ENOMEM; pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->rx_urb) { return res; } pegasus->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->tx_urb) { usb_free_urb(pegasus->rx_urb); return res; } pegasus->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->intr_urb) { usb_free_urb(pegasus->tx_urb); usb_free_urb(pegasus->rx_urb); return res; } return 0; } static int pegasus_open(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); int res=-ENOMEM; if (pegasus->rx_skb == NULL) pegasus->rx_skb = __netdev_alloc_skb_ip_align(pegasus->net, PEGASUS_MTU, GFP_KERNEL); if (!pegasus->rx_skb) goto exit; set_registers(pegasus, EthID, 6, net->dev_addr); usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), pegasus->rx_skb->data, PEGASUS_MTU, read_bulk_callback, pegasus); if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(pegasus->net); netif_dbg(pegasus, ifup, net, "failed rx_urb, %d\n", res); goto exit; } usb_fill_int_urb(pegasus->intr_urb, pegasus->usb, usb_rcvintpipe(pegasus->usb, 3), pegasus->intr_buff, sizeof(pegasus->intr_buff), intr_callback, pegasus, pegasus->intr_interval); if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(pegasus->net); netif_dbg(pegasus, ifup, net, "failed intr_urb, %d\n", res); usb_kill_urb(pegasus->rx_urb); goto exit; } res = enable_net_traffic(net, pegasus->usb); if (res < 0) { netif_dbg(pegasus, ifup, net, "can't enable_net_traffic() - %d\n", res); res = -EIO; usb_kill_urb(pegasus->rx_urb); usb_kill_urb(pegasus->intr_urb); goto exit; } set_carrier(net); netif_start_queue(net); netif_dbg(pegasus, ifup, net, "open\n"); res = 0; exit: return res; } static int pegasus_close(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); netif_stop_queue(net); if (!(pegasus->flags & PEGASUS_UNPLUG)) disable_net_traffic(pegasus); tasklet_kill(&pegasus->rx_tl); unlink_all_urbs(pegasus); return 0; } static void pegasus_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { pegasus_t *pegasus = netdev_priv(dev); strscpy(info->driver, driver_name, sizeof(info->driver)); usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info)); } /* also handles three patterns of some kind in hardware */ #define WOL_SUPPORTED (WAKE_MAGIC|WAKE_PHY) static void pegasus_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { pegasus_t *pegasus = netdev_priv(dev); wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = pegasus->wolopts; } static int pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { pegasus_t *pegasus = netdev_priv(dev); u8 reg78 = 0x04; int ret; if (wol->wolopts & ~WOL_SUPPORTED) return -EINVAL; if (wol->wolopts & WAKE_MAGIC) reg78 |= 0x80; if (wol->wolopts & WAKE_PHY) reg78 |= 0x40; /* FIXME this 0x10 bit still needs to get set in the chip... */ if (wol->wolopts) pegasus->eth_regs[0] |= 0x10; else pegasus->eth_regs[0] &= ~0x10; pegasus->wolopts = wol->wolopts; ret = set_register(pegasus, WakeupControl, reg78); if (!ret) ret = device_set_wakeup_enable(&pegasus->usb->dev, wol->wolopts); return ret; } static inline void pegasus_reset_wol(struct net_device *dev) { struct ethtool_wolinfo wol; memset(&wol, 0, sizeof wol); (void) pegasus_set_wol(dev, &wol); } static int pegasus_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *ecmd) { pegasus_t *pegasus; pegasus = netdev_priv(dev); mii_ethtool_get_link_ksettings(&pegasus->mii, ecmd); return 0; } static int pegasus_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *ecmd) { pegasus_t *pegasus = netdev_priv(dev); return mii_ethtool_set_link_ksettings(&pegasus->mii, ecmd); } static int pegasus_nway_reset(struct net_device *dev) { pegasus_t *pegasus = netdev_priv(dev); return mii_nway_restart(&pegasus->mii); } static u32 pegasus_get_link(struct net_device *dev) { pegasus_t *pegasus = netdev_priv(dev); return mii_link_ok(&pegasus->mii); } static u32 pegasus_get_msglevel(struct net_device *dev) { pegasus_t *pegasus = netdev_priv(dev); return pegasus->msg_enable; } static void pegasus_set_msglevel(struct net_device *dev, u32 v) { pegasus_t *pegasus = netdev_priv(dev); pegasus->msg_enable = v; } static const struct ethtool_ops ops = { .get_drvinfo = pegasus_get_drvinfo, .nway_reset = pegasus_nway_reset, .get_link = pegasus_get_link, .get_msglevel = pegasus_get_msglevel, .set_msglevel = pegasus_set_msglevel, .get_wol = pegasus_get_wol, .set_wol = pegasus_set_wol, .get_link_ksettings = pegasus_get_link_ksettings, .set_link_ksettings = pegasus_set_link_ksettings, }; static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq, void __user *udata, int cmd) { __u16 *data = (__u16 *) &rq->ifr_ifru; pegasus_t *pegasus = netdev_priv(net); int res; switch (cmd) { case SIOCDEVPRIVATE: data[0] = pegasus->phy; fallthrough; case SIOCDEVPRIVATE + 1: res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); break; case SIOCDEVPRIVATE + 2: if (!capable(CAP_NET_ADMIN)) return -EPERM; write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, &data[2]); res = 0; break; default: res = -EOPNOTSUPP; } return res; } static void pegasus_set_multicast(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); if (net->flags & IFF_PROMISC) { pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS; netif_info(pegasus, link, net, "Promiscuous mode enabled\n"); } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) { pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST; pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; netif_dbg(pegasus, link, net, "set allmulti\n"); } else { pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST; pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; } update_eth_regs_async(pegasus); } static __u8 mii_phy_probe(pegasus_t *pegasus) { int i, ret; __u16 tmp; for (i = 0; i < 32; i++) { ret = read_mii_word(pegasus, i, MII_BMSR, &tmp); if (ret < 0) goto fail; if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0) continue; else return i; } fail: return 0xff; } static inline void setup_pegasus_II(pegasus_t *pegasus) { int ret; __u8 data = 0xa5; set_register(pegasus, Reg1d, 0); set_register(pegasus, Reg7b, 1); msleep(100); if ((pegasus->features & HAS_HOME_PNA) && mii_mode) set_register(pegasus, Reg7b, 0); else set_register(pegasus, Reg7b, 2); set_register(pegasus, 0x83, data); ret = get_registers(pegasus, 0x83, 1, &data); if (ret < 0) goto fail; if (data == 0xa5) pegasus->chip = 0x8513; else pegasus->chip = 0; set_register(pegasus, 0x80, 0xc0); set_register(pegasus, 0x83, 0xff); set_register(pegasus, 0x84, 0x01); if (pegasus->features & HAS_HOME_PNA && mii_mode) set_register(pegasus, Reg81, 6); else set_register(pegasus, Reg81, 2); return; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); } static void check_carrier(struct work_struct *work) { pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); set_carrier(pegasus->net); if (!(pegasus->flags & PEGASUS_UNPLUG)) { queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); } } static int pegasus_blacklisted(struct usb_device *udev) { struct usb_device_descriptor *udd = &udev->descriptor; /* Special quirk to keep the driver from handling the Belkin Bluetooth * dongle which happens to have the same ID. */ if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) && (udd->idProduct == cpu_to_le16(0x0121)) && (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && (udd->bDeviceProtocol == 1)) return 1; return 0; } static int pegasus_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct net_device *net; pegasus_t *pegasus; int dev_index = id - pegasus_ids; int res = -ENOMEM; if (pegasus_blacklisted(dev)) return -ENODEV; net = alloc_etherdev(sizeof(struct pegasus)); if (!net) goto out; pegasus = netdev_priv(net); pegasus->dev_index = dev_index; res = alloc_urbs(pegasus); if (res < 0) { dev_err(&intf->dev, "can't allocate %s\n", "urbs"); goto out1; } tasklet_setup(&pegasus->rx_tl, rx_fixup); INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier); pegasus->intf = intf; pegasus->usb = dev; pegasus->net = net; net->watchdog_timeo = PEGASUS_TX_TIMEOUT; net->netdev_ops = &pegasus_netdev_ops; net->ethtool_ops = &ops; pegasus->mii.dev = net; pegasus->mii.mdio_read = mdio_read; pegasus->mii.mdio_write = mdio_write; pegasus->mii.phy_id_mask = 0x1f; pegasus->mii.reg_num_mask = 0x1f; pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); pegasus->features = usb_dev_id[dev_index].private; res = get_interrupt_interval(pegasus); if (res) goto out2; if (reset_mac(pegasus)) { dev_err(&intf->dev, "can't reset MAC\n"); res = -EIO; goto out2; } set_ethernet_addr(pegasus); if (pegasus->features & PEGASUS_II) { dev_info(&intf->dev, "setup Pegasus II specific registers\n"); setup_pegasus_II(pegasus); } pegasus->phy = mii_phy_probe(pegasus); if (pegasus->phy == 0xff) { dev_warn(&intf->dev, "can't locate MII phy, using default\n"); pegasus->phy = 1; } pegasus->mii.phy_id = pegasus->phy; usb_set_intfdata(intf, pegasus); SET_NETDEV_DEV(net, &intf->dev); pegasus_reset_wol(net); res = register_netdev(net); if (res) goto out3; queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); dev_info(&intf->dev, "%s, %s, %pM\n", net->name, usb_dev_id[dev_index].name, net->dev_addr); return 0; out3: usb_set_intfdata(intf, NULL); out2: free_all_urbs(pegasus); out1: free_netdev(net); out: return res; } static void pegasus_disconnect(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!pegasus) { dev_dbg(&intf->dev, "unregistering non-bound device?\n"); return; } pegasus->flags |= PEGASUS_UNPLUG; cancel_delayed_work_sync(&pegasus->carrier_check); unregister_netdev(pegasus->net); unlink_all_urbs(pegasus); free_all_urbs(pegasus); if (pegasus->rx_skb != NULL) { dev_kfree_skb(pegasus->rx_skb); pegasus->rx_skb = NULL; } free_netdev(pegasus->net); } static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) { struct pegasus *pegasus = usb_get_intfdata(intf); netif_device_detach(pegasus->net); cancel_delayed_work_sync(&pegasus->carrier_check); if (netif_running(pegasus->net)) { usb_kill_urb(pegasus->rx_urb); usb_kill_urb(pegasus->intr_urb); } return 0; } static int pegasus_resume(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); netif_device_attach(pegasus->net); if (netif_running(pegasus->net)) { pegasus->rx_urb->status = 0; pegasus->rx_urb->actual_length = 0; read_bulk_callback(pegasus->rx_urb); pegasus->intr_urb->status = 0; pegasus->intr_urb->actual_length = 0; intr_callback(pegasus->intr_urb); } queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); return 0; } static const struct net_device_ops pegasus_netdev_ops = { .ndo_open = pegasus_open, .ndo_stop = pegasus_close, .ndo_siocdevprivate = pegasus_siocdevprivate, .ndo_start_xmit = pegasus_start_xmit, .ndo_set_rx_mode = pegasus_set_multicast, .ndo_tx_timeout = pegasus_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct usb_driver pegasus_driver = { .name = driver_name, .probe = pegasus_probe, .disconnect = pegasus_disconnect, .id_table = pegasus_ids, .suspend = pegasus_suspend, .resume = pegasus_resume, .disable_hub_initiated_lpm = 1, }; static void __init parse_id(char *id) { unsigned int vendor_id = 0, device_id = 0, flags = 0, i = 0; char *token, *name = NULL; if ((token = strsep(&id, ":")) != NULL) name = token; /* name now points to a null terminated string*/ if ((token = strsep(&id, ":")) != NULL) vendor_id = simple_strtoul(token, NULL, 16); if ((token = strsep(&id, ":")) != NULL) device_id = simple_strtoul(token, NULL, 16); flags = simple_strtoul(id, NULL, 16); pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n", driver_name, name, vendor_id, device_id, flags); if (vendor_id > 0x10000 || vendor_id == 0) return; if (device_id > 0x10000 || device_id == 0) return; for (i = 0; usb_dev_id[i].name; i++); usb_dev_id[i].name = name; usb_dev_id[i].vendor = vendor_id; usb_dev_id[i].device = device_id; usb_dev_id[i].private = flags; pegasus_ids[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE; pegasus_ids[i].idVendor = vendor_id; pegasus_ids[i].idProduct = device_id; } static int __init pegasus_init(void) { pr_info("%s: " DRIVER_DESC "\n", driver_name); if (devid) parse_id(devid); return usb_register(&pegasus_driver); } static void __exit pegasus_exit(void) { usb_deregister(&pegasus_driver); } module_init(pegasus_init); module_exit(pegasus_exit); |
| 56 56 56 56 56 56 56 34 34 34 56 56 56 56 56 56 56 56 34 34 34 34 34 34 34 34 34 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 | // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de> #include <linux/spinlock.h> #include <linux/seq_file.h> #include <linux/bitmap.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/irq.h> struct cpumap { unsigned int available; unsigned int allocated; unsigned int managed; unsigned int managed_allocated; bool initialized; bool online; unsigned long *managed_map; unsigned long alloc_map[]; }; struct irq_matrix { unsigned int matrix_bits; unsigned int alloc_start; unsigned int alloc_end; unsigned int alloc_size; unsigned int global_available; unsigned int global_reserved; unsigned int systembits_inalloc; unsigned int total_allocated; unsigned int online_maps; struct cpumap __percpu *maps; unsigned long *system_map; unsigned long scratch_map[]; }; #define CREATE_TRACE_POINTS #include <trace/events/irq_matrix.h> /** * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS * @alloc_start: From which bit the allocation search starts * @alloc_end: At which bit the allocation search ends, i.e first * invalid bit */ __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, unsigned int alloc_start, unsigned int alloc_end) { unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits); struct irq_matrix *m; m = kzalloc(struct_size(m, scratch_map, matrix_size * 2), GFP_KERNEL); if (!m) return NULL; m->system_map = &m->scratch_map[matrix_size]; m->matrix_bits = matrix_bits; m->alloc_start = alloc_start; m->alloc_end = alloc_end; m->alloc_size = alloc_end - alloc_start; m->maps = __alloc_percpu(struct_size(m->maps, alloc_map, matrix_size * 2), __alignof__(*m->maps)); if (!m->maps) { kfree(m); return NULL; } for_each_possible_cpu(cpu) { struct cpumap *cm = per_cpu_ptr(m->maps, cpu); cm->managed_map = &cm->alloc_map[matrix_size]; } return m; } /** * irq_matrix_online - Bring the local CPU matrix online * @m: Matrix pointer */ void irq_matrix_online(struct irq_matrix *m) { struct cpumap *cm = this_cpu_ptr(m->maps); BUG_ON(cm->online); if (!cm->initialized) { cm->available = m->alloc_size; cm->available -= cm->managed + m->systembits_inalloc; cm->initialized = true; } m->global_available += cm->available; cm->online = true; m->online_maps++; trace_irq_matrix_online(m); } /** * irq_matrix_offline - Bring the local CPU matrix offline * @m: Matrix pointer */ void irq_matrix_offline(struct irq_matrix *m) { struct cpumap *cm = this_cpu_ptr(m->maps); /* Update the global available size */ m->global_available -= cm->available; cm->online = false; m->online_maps--; trace_irq_matrix_offline(m); } static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm, unsigned int num, bool managed) { unsigned int area, start = m->alloc_start; unsigned int end = m->alloc_end; bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end); bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end); area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0); if (area >= end) return area; if (managed) bitmap_set(cm->managed_map, area, num); else bitmap_set(cm->alloc_map, area, num); return area; } /* Find the best CPU which has the lowest vector allocation count */ static unsigned int matrix_find_best_cpu(struct irq_matrix *m, const struct cpumask *msk) { unsigned int cpu, best_cpu, maxavl = 0; struct cpumap *cm; best_cpu = UINT_MAX; for_each_cpu(cpu, msk) { cm = per_cpu_ptr(m->maps, cpu); if (!cm->online || cm->available <= maxavl) continue; best_cpu = cpu; maxavl = cm->available; } return best_cpu; } /* Find the best CPU which has the lowest number of managed IRQs allocated */ static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m, const struct cpumask *msk) { unsigned int cpu, best_cpu, allocated = UINT_MAX; struct cpumap *cm; best_cpu = UINT_MAX; for_each_cpu(cpu, msk) { cm = per_cpu_ptr(m->maps, cpu); if (!cm->online || cm->managed_allocated > allocated) continue; best_cpu = cpu; allocated = cm->managed_allocated; } return best_cpu; } /** * irq_matrix_assign_system - Assign system wide entry in the matrix * @m: Matrix pointer * @bit: Which bit to reserve * @replace: Replace an already allocated vector with a system * vector at the same bit position. * * The BUG_ON()s below are on purpose. If this goes wrong in the * early boot process, then the chance to survive is about zero. * If this happens when the system is life, it's not much better. */ void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace) { struct cpumap *cm = this_cpu_ptr(m->maps); BUG_ON(bit > m->matrix_bits); BUG_ON(m->online_maps > 1 || (m->online_maps && !replace)); set_bit(bit, m->system_map); if (replace) { BUG_ON(!test_and_clear_bit(bit, cm->alloc_map)); cm->allocated--; m->total_allocated--; } if (bit >= m->alloc_start && bit < m->alloc_end) m->systembits_inalloc++; trace_irq_matrix_assign_system(bit, m); } /** * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map * @m: Matrix pointer * @msk: On which CPUs the bits should be reserved. * * Can be called for offline CPUs. Note, this will only reserve one bit * on all CPUs in @msk, but it's not guaranteed that the bits are at the * same offset on all CPUs */ int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk) { unsigned int cpu, failed_cpu; for_each_cpu(cpu, msk) { struct cpumap *cm = per_cpu_ptr(m->maps, cpu); unsigned int bit; bit = matrix_alloc_area(m, cm, 1, true); if (bit >= m->alloc_end) goto cleanup; cm->managed++; if (cm->online) { cm->available--; m->global_available--; } trace_irq_matrix_reserve_managed(bit, cpu, m, cm); } return 0; cleanup: failed_cpu = cpu; for_each_cpu(cpu, msk) { if (cpu == failed_cpu) break; irq_matrix_remove_managed(m, cpumask_of(cpu)); } return -ENOSPC; } /** * irq_matrix_remove_managed - Remove managed interrupts in a CPU map * @m: Matrix pointer * @msk: On which CPUs the bits should be removed * * Can be called for offline CPUs * * This removes not allocated managed interrupts from the map. It does * not matter which one because the managed interrupts free their * allocation when they shut down. If not, the accounting is screwed, * but all what can be done at this point is warn about it. */ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) { unsigned int cpu; for_each_cpu(cpu, msk) { struct cpumap *cm = per_cpu_ptr(m->maps, cpu); unsigned int bit, end = m->alloc_end; if (WARN_ON_ONCE(!cm->managed)) continue; /* Get managed bit which are not allocated */ bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); bit = find_first_bit(m->scratch_map, end); if (WARN_ON_ONCE(bit >= end)) continue; clear_bit(bit, cm->managed_map); cm->managed--; if (cm->online) { cm->available++; m->global_available++; } trace_irq_matrix_remove_managed(bit, cpu, m, cm); } } /** * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map * @m: Matrix pointer * @msk: Which CPUs to search in * @mapped_cpu: Pointer to store the CPU for which the irq was allocated */ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, unsigned int *mapped_cpu) { unsigned int bit, cpu, end; struct cpumap *cm; if (cpumask_empty(msk)) return -EINVAL; cpu = matrix_find_best_cpu_managed(m, msk); if (cpu == UINT_MAX) return -ENOSPC; cm = per_cpu_ptr(m->maps, cpu); end = m->alloc_end; /* Get managed bit which are not allocated */ bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); bit = find_first_bit(m->scratch_map, end); if (bit >= end) return -ENOSPC; set_bit(bit, cm->alloc_map); cm->allocated++; cm->managed_allocated++; m->total_allocated++; *mapped_cpu = cpu; trace_irq_matrix_alloc_managed(bit, cpu, m, cm); return bit; } /** * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map * @m: Matrix pointer * @bit: Which bit to mark * * This should only be used to mark preallocated vectors */ void irq_matrix_assign(struct irq_matrix *m, unsigned int bit) { struct cpumap *cm = this_cpu_ptr(m->maps); if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) return; if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map))) return; cm->allocated++; m->total_allocated++; cm->available--; m->global_available--; trace_irq_matrix_assign(bit, smp_processor_id(), m, cm); } /** * irq_matrix_reserve - Reserve interrupts * @m: Matrix pointer * * This is merely a book keeping call. It increments the number of globally * reserved interrupt bits w/o actually allocating them. This allows to * setup interrupt descriptors w/o assigning low level resources to it. * The actual allocation happens when the interrupt gets activated. */ void irq_matrix_reserve(struct irq_matrix *m) { if (m->global_reserved == m->global_available) pr_warn("Interrupt reservation exceeds available resources\n"); m->global_reserved++; trace_irq_matrix_reserve(m); } /** * irq_matrix_remove_reserved - Remove interrupt reservation * @m: Matrix pointer * * This is merely a book keeping call. It decrements the number of globally * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the * interrupt was never in use and a real vector allocated, which undid the * reservation. */ void irq_matrix_remove_reserved(struct irq_matrix *m) { m->global_reserved--; trace_irq_matrix_remove_reserved(m); } /** * irq_matrix_alloc - Allocate a regular interrupt in a CPU map * @m: Matrix pointer * @msk: Which CPUs to search in * @reserved: Allocate previously reserved interrupts * @mapped_cpu: Pointer to store the CPU for which the irq was allocated */ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, bool reserved, unsigned int *mapped_cpu) { unsigned int cpu, bit; struct cpumap *cm; /* * Not required in theory, but matrix_find_best_cpu() uses * for_each_cpu() which ignores the cpumask on UP . */ if (cpumask_empty(msk)) return -EINVAL; cpu = matrix_find_best_cpu(m, msk); if (cpu == UINT_MAX) return -ENOSPC; cm = per_cpu_ptr(m->maps, cpu); bit = matrix_alloc_area(m, cm, 1, false); if (bit >= m->alloc_end) return -ENOSPC; cm->allocated++; cm->available--; m->total_allocated++; m->global_available--; if (reserved) m->global_reserved--; *mapped_cpu = cpu; trace_irq_matrix_alloc(bit, cpu, m, cm); return bit; } /** * irq_matrix_free - Free allocated interrupt in the matrix * @m: Matrix pointer * @cpu: Which CPU map needs be updated * @bit: The bit to remove * @managed: If true, the interrupt is managed and not accounted * as available. */ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, unsigned int bit, bool managed) { struct cpumap *cm = per_cpu_ptr(m->maps, cpu); if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) return; if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map))) return; cm->allocated--; if(managed) cm->managed_allocated--; if (cm->online) m->total_allocated--; if (!managed) { cm->available++; if (cm->online) m->global_available++; } trace_irq_matrix_free(bit, cpu, m, cm); } /** * irq_matrix_available - Get the number of globally available irqs * @m: Pointer to the matrix to query * @cpudown: If true, the local CPU is about to go down, adjust * the number of available irqs accordingly */ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown) { struct cpumap *cm = this_cpu_ptr(m->maps); if (!cpudown) return m->global_available; return m->global_available - cm->available; } /** * irq_matrix_reserved - Get the number of globally reserved irqs * @m: Pointer to the matrix to query */ unsigned int irq_matrix_reserved(struct irq_matrix *m) { return m->global_reserved; } /** * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU * @m: Pointer to the matrix to search * * This returns number of allocated non-managed interrupts. */ unsigned int irq_matrix_allocated(struct irq_matrix *m) { struct cpumap *cm = this_cpu_ptr(m->maps); return cm->allocated - cm->managed_allocated; } #ifdef CONFIG_GENERIC_IRQ_DEBUGFS /** * irq_matrix_debug_show - Show detailed allocation information * @sf: Pointer to the seq_file to print to * @m: Pointer to the matrix allocator * @ind: Indentation for the print format * * Note, this is a lockless snapshot. */ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind) { unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits); int cpu; seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps); seq_printf(sf, "Global available: %6u\n", m->global_available); seq_printf(sf, "Global reserved: %6u\n", m->global_reserved); seq_printf(sf, "Total allocated: %6u\n", m->total_allocated); seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits, m->system_map); seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " "); cpus_read_lock(); for_each_online_cpu(cpu) { struct cpumap *cm = per_cpu_ptr(m->maps, cpu); seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ", cpu, cm->available, cm->managed, cm->managed_allocated, cm->allocated, m->matrix_bits, cm->alloc_map); } cpus_read_unlock(); } #endif |
| 63 63 62 62 55 62 1 12 12 11 25 10 10 10 24 24 24 13 12 24 9 9 13 12 6 5 3 3 1 9 7 9 13 15 15 14 13 4 3 1 11 9 11 12 15 13 12 1 12 1 28 28 25 21 21 16 15 15 14 14 16 1 7 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 | // SPDX-License-Identifier: GPL-2.0-only /* * fs/eventfd.c * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * */ #include <linux/file.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/anon_inodes.h> #include <linux/syscalls.h> #include <linux/export.h> #include <linux/kref.h> #include <linux/eventfd.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/idr.h> #include <linux/uio.h> static DEFINE_IDA(eventfd_ida); struct eventfd_ctx { struct kref kref; wait_queue_head_t wqh; /* * Every time that a write(2) is performed on an eventfd, the * value of the __u64 being written is added to "count" and a * wakeup is performed on "wqh". If EFD_SEMAPHORE flag was not * specified, a read(2) will return the "count" value to userspace, * and will reset "count" to zero. The kernel side eventfd_signal() * also, adds to the "count" counter and issue a wakeup. */ __u64 count; unsigned int flags; int id; }; /** * eventfd_signal_mask - Increment the event counter * @ctx: [in] Pointer to the eventfd context. * @mask: [in] poll mask * * This function is supposed to be called by the kernel in paths that do not * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX * value, and we signal this as overflow condition by returning a EPOLLERR * to poll(2). */ void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) { unsigned long flags; /* * Deadlock or stack overflow issues can happen if we recurse here * through waitqueue wakeup handlers. If the caller users potentially * nested waitqueues with custom wakeup handlers, then it should * check eventfd_signal_allowed() before calling this function. If * it returns false, the eventfd_signal() call should be deferred to a * safe context. */ if (WARN_ON_ONCE(current->in_eventfd)) return; spin_lock_irqsave(&ctx->wqh.lock, flags); current->in_eventfd = 1; if (ctx->count < ULLONG_MAX) ctx->count++; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); current->in_eventfd = 0; spin_unlock_irqrestore(&ctx->wqh.lock, flags); } EXPORT_SYMBOL_GPL(eventfd_signal_mask); static void eventfd_free_ctx(struct eventfd_ctx *ctx) { if (ctx->id >= 0) ida_free(&eventfd_ida, ctx->id); kfree(ctx); } static void eventfd_free(struct kref *kref) { struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); eventfd_free_ctx(ctx); } /** * eventfd_ctx_put - Releases a reference to the internal eventfd context. * @ctx: [in] Pointer to eventfd context. * * The eventfd context reference must have been previously acquired either * with eventfd_ctx_fdget() or eventfd_ctx_fileget(). */ void eventfd_ctx_put(struct eventfd_ctx *ctx) { kref_put(&ctx->kref, eventfd_free); } EXPORT_SYMBOL_GPL(eventfd_ctx_put); static int eventfd_release(struct inode *inode, struct file *file) { struct eventfd_ctx *ctx = file->private_data; wake_up_poll(&ctx->wqh, EPOLLHUP); eventfd_ctx_put(ctx); return 0; } static __poll_t eventfd_poll(struct file *file, poll_table *wait) { struct eventfd_ctx *ctx = file->private_data; __poll_t events = 0; u64 count; poll_wait(file, &ctx->wqh, wait); /* * All writes to ctx->count occur within ctx->wqh.lock. This read * can be done outside ctx->wqh.lock because we know that poll_wait * takes that lock (through add_wait_queue) if our caller will sleep. * * The read _can_ therefore seep into add_wait_queue's critical * section, but cannot move above it! add_wait_queue's spin_lock acts * as an acquire barrier and ensures that the read be ordered properly * against the writes. The following CAN happen and is safe: * * poll write * ----------------- ------------ * lock ctx->wqh.lock (in poll_wait) * count = ctx->count * __add_wait_queue * unlock ctx->wqh.lock * lock ctx->qwh.lock * ctx->count += n * if (waitqueue_active) * wake_up_locked_poll * unlock ctx->qwh.lock * eventfd_poll returns 0 * * but the following, which would miss a wakeup, cannot happen: * * poll write * ----------------- ------------ * count = ctx->count (INVALID!) * lock ctx->qwh.lock * ctx->count += n * **waitqueue_active is false** * **no wake_up_locked_poll!** * unlock ctx->qwh.lock * lock ctx->wqh.lock (in poll_wait) * __add_wait_queue * unlock ctx->wqh.lock * eventfd_poll returns 0 */ count = READ_ONCE(ctx->count); if (count > 0) events |= EPOLLIN; if (count == ULLONG_MAX) events |= EPOLLERR; if (ULLONG_MAX - 1 > count) events |= EPOLLOUT; return events; } void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) { lockdep_assert_held(&ctx->wqh.lock); *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count; ctx->count -= *cnt; } EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); /** * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. * @ctx: [in] Pointer to eventfd context. * @wait: [in] Wait queue to be removed. * @cnt: [out] Pointer to the 64-bit counter value. * * Returns %0 if successful, or the following error codes: * * -EAGAIN : The operation would have blocked. * * This is used to atomically remove a wait queue entry from the eventfd wait * queue head, and read/reset the counter value. */ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt) { unsigned long flags; spin_lock_irqsave(&ctx->wqh.lock, flags); eventfd_ctx_do_read(ctx, cnt); __remove_wait_queue(&ctx->wqh, wait); if (*cnt != 0 && waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLOUT); spin_unlock_irqrestore(&ctx->wqh.lock, flags); return *cnt != 0 ? 0 : -EAGAIN; } EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct eventfd_ctx *ctx = file->private_data; __u64 ucnt = 0; if (iov_iter_count(to) < sizeof(ucnt)) return -EINVAL; spin_lock_irq(&ctx->wqh.lock); if (!ctx->count) { if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) { spin_unlock_irq(&ctx->wqh.lock); return -EAGAIN; } if (wait_event_interruptible_locked_irq(ctx->wqh, ctx->count)) { spin_unlock_irq(&ctx->wqh.lock); return -ERESTARTSYS; } } eventfd_ctx_do_read(ctx, &ucnt); current->in_eventfd = 1; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLOUT); current->in_eventfd = 0; spin_unlock_irq(&ctx->wqh.lock); if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt))) return -EFAULT; return sizeof(ucnt); } static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct eventfd_ctx *ctx = file->private_data; ssize_t res; __u64 ucnt; if (count != sizeof(ucnt)) return -EINVAL; if (copy_from_user(&ucnt, buf, sizeof(ucnt))) return -EFAULT; if (ucnt == ULLONG_MAX) return -EINVAL; spin_lock_irq(&ctx->wqh.lock); res = -EAGAIN; if (ULLONG_MAX - ctx->count > ucnt) res = sizeof(ucnt); else if (!(file->f_flags & O_NONBLOCK)) { res = wait_event_interruptible_locked_irq(ctx->wqh, ULLONG_MAX - ctx->count > ucnt); if (!res) res = sizeof(ucnt); } if (likely(res > 0)) { ctx->count += ucnt; current->in_eventfd = 1; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLIN); current->in_eventfd = 0; } spin_unlock_irq(&ctx->wqh.lock); return res; } #ifdef CONFIG_PROC_FS static void eventfd_show_fdinfo(struct seq_file *m, struct file *f) { struct eventfd_ctx *ctx = f->private_data; __u64 cnt; spin_lock_irq(&ctx->wqh.lock); cnt = ctx->count; spin_unlock_irq(&ctx->wqh.lock); seq_printf(m, "eventfd-count: %16llx\n" "eventfd-id: %d\n" "eventfd-semaphore: %d\n", cnt, ctx->id, !!(ctx->flags & EFD_SEMAPHORE)); } #endif static const struct file_operations eventfd_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = eventfd_show_fdinfo, #endif .release = eventfd_release, .poll = eventfd_poll, .read_iter = eventfd_read, .write = eventfd_write, .llseek = noop_llseek, }; /** * eventfd_fget - Acquire a reference of an eventfd file descriptor. * @fd: [in] Eventfd file descriptor. * * Returns a pointer to the eventfd file structure in case of success, or the * following error pointer: * * -EBADF : Invalid @fd file descriptor. * -EINVAL : The @fd file descriptor is not an eventfd file. */ struct file *eventfd_fget(int fd) { struct file *file; file = fget(fd); if (!file) return ERR_PTR(-EBADF); if (file->f_op != &eventfd_fops) { fput(file); return ERR_PTR(-EINVAL); } return file; } EXPORT_SYMBOL_GPL(eventfd_fget); /** * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. * @fd: [in] Eventfd file descriptor. * * Returns a pointer to the internal eventfd context, otherwise the error * pointers returned by the following functions: * * eventfd_fget */ struct eventfd_ctx *eventfd_ctx_fdget(int fd) { CLASS(fd, f)(fd); if (fd_empty(f)) return ERR_PTR(-EBADF); return eventfd_ctx_fileget(fd_file(f)); } EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); /** * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. * @file: [in] Eventfd file pointer. * * Returns a pointer to the internal eventfd context, otherwise the error * pointer: * * -EINVAL : The @fd file descriptor is not an eventfd file. */ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) { struct eventfd_ctx *ctx; if (file->f_op != &eventfd_fops) return ERR_PTR(-EINVAL); ctx = file->private_data; kref_get(&ctx->kref); return ctx; } EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); static int do_eventfd(unsigned int count, int flags) { struct eventfd_ctx *ctx; struct file *file; int fd; /* Check the EFD_* constants for consistency. */ BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); BUILD_BUG_ON(EFD_SEMAPHORE != (1 << 0)); if (flags & ~EFD_FLAGS_SET) return -EINVAL; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; kref_init(&ctx->kref); init_waitqueue_head(&ctx->wqh); ctx->count = count; ctx->flags = flags; ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL); flags &= EFD_SHARED_FCNTL_FLAGS; flags |= O_RDWR; fd = get_unused_fd_flags(flags); if (fd < 0) goto err; file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags); if (IS_ERR(file)) { put_unused_fd(fd); fd = PTR_ERR(file); goto err; } file->f_mode |= FMODE_NOWAIT; fd_install(fd, file); return fd; err: eventfd_free_ctx(ctx); return fd; } SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) { return do_eventfd(count, flags); } SYSCALL_DEFINE1(eventfd, unsigned int, count) { return do_eventfd(count, 0); } |
| 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | // SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <drm/drm_edid.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" #include "drm_displayid_internal.h" static const struct displayid_header * displayid_get_header(const u8 *displayid, int length, int index) { const struct displayid_header *base; if (sizeof(*base) > length - index) return ERR_PTR(-EINVAL); base = (const struct displayid_header *)&displayid[index]; return base; } static const struct displayid_header * validate_displayid(const u8 *displayid, int length, int idx) { int i, dispid_length; u8 csum = 0; const struct displayid_header *base; base = displayid_get_header(displayid, length, idx); if (IS_ERR(base)) return base; /* +1 for DispID checksum */ dispid_length = sizeof(*base) + base->bytes + 1; if (dispid_length > length - idx) return ERR_PTR(-EINVAL); for (i = 0; i < dispid_length; i++) csum += displayid[idx + i]; if (csum) { DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); return ERR_PTR(-EINVAL); } return base; } static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, int *length, int *idx, int *ext_index) { const struct displayid_header *base; const u8 *displayid; displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index); if (!displayid) return NULL; /* EDID extensions block checksum isn't for us */ *length = EDID_LENGTH - 1; *idx = 1; base = validate_displayid(displayid, *length, *idx); if (IS_ERR(base)) return NULL; *length = *idx + sizeof(*base) + base->bytes; return displayid; } void displayid_iter_edid_begin(const struct drm_edid *drm_edid, struct displayid_iter *iter) { memset(iter, 0, sizeof(*iter)); iter->drm_edid = drm_edid; } static const struct displayid_block * displayid_iter_block(const struct displayid_iter *iter) { const struct displayid_block *block; if (!iter->section) return NULL; block = (const struct displayid_block *)&iter->section[iter->idx]; if (iter->idx + sizeof(*block) <= iter->length && iter->idx + sizeof(*block) + block->num_bytes <= iter->length) return block; return NULL; } const struct displayid_block * __displayid_iter_next(struct displayid_iter *iter) { const struct displayid_block *block; if (!iter->drm_edid) return NULL; if (iter->section) { /* current block should always be valid */ block = displayid_iter_block(iter); if (WARN_ON(!block)) { iter->section = NULL; iter->drm_edid = NULL; return NULL; } /* next block in section */ iter->idx += sizeof(*block) + block->num_bytes; block = displayid_iter_block(iter); if (block) return block; } for (;;) { /* The first section we encounter is the base section */ bool base_section = !iter->section; iter->section = drm_find_displayid_extension(iter->drm_edid, &iter->length, &iter->idx, &iter->ext_index); if (!iter->section) { iter->drm_edid = NULL; return NULL; } /* Save the structure version and primary use case. */ if (base_section) { const struct displayid_header *base; base = displayid_get_header(iter->section, iter->length, iter->idx); if (!IS_ERR(base)) { iter->version = base->rev; iter->primary_use = base->prod_id; } } iter->idx += sizeof(struct displayid_header); block = displayid_iter_block(iter); if (block) return block; } } void displayid_iter_end(struct displayid_iter *iter) { memset(iter, 0, sizeof(*iter)); } /* DisplayID Structure Version/Revision from the Base Section. */ u8 displayid_version(const struct displayid_iter *iter) { return iter->version; } /* * DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from * the Base Section. */ u8 displayid_primary_use(const struct displayid_iter *iter) { return iter->primary_use; } |
| 1 1 1 18 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | // SPDX-License-Identifier: GPL-2.0-only /* * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/slab.h> #include <net/ip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("iptables filter table"); #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT)) static const struct xt_table packet_filter = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, .priority = NF_IP_PRI_FILTER, }; static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ static bool forward __read_mostly = true; module_param(forward, bool, 0000); static int iptable_filter_table_init(struct net *net) { struct ipt_replace *repl; int err; repl = ipt_alloc_initial_table(&packet_filter); if (repl == NULL) return -ENOMEM; /* Entry 1 is the FORWARD hook */ ((struct ipt_standard *)repl->entries)[1].target.verdict = forward ? -NF_ACCEPT - 1 : NF_DROP - 1; err = ipt_register_table(net, &packet_filter, repl, filter_ops); kfree(repl); return err; } static int __net_init iptable_filter_net_init(struct net *net) { if (!forward) return iptable_filter_table_init(net); return 0; } static void __net_exit iptable_filter_net_pre_exit(struct net *net) { ipt_unregister_table_pre_exit(net, "filter"); } static void __net_exit iptable_filter_net_exit(struct net *net) { ipt_unregister_table_exit(net, "filter"); } static struct pernet_operations iptable_filter_net_ops = { .init = iptable_filter_net_init, .pre_exit = iptable_filter_net_pre_exit, .exit = iptable_filter_net_exit, }; static int __init iptable_filter_init(void) { int ret = xt_register_template(&packet_filter, iptable_filter_table_init); if (ret < 0) return ret; filter_ops = xt_hook_ops_alloc(&packet_filter, ipt_do_table); if (IS_ERR(filter_ops)) { xt_unregister_template(&packet_filter); return PTR_ERR(filter_ops); } ret = register_pernet_subsys(&iptable_filter_net_ops); if (ret < 0) { xt_unregister_template(&packet_filter); kfree(filter_ops); return ret; } return 0; } static void __exit iptable_filter_fini(void) { unregister_pernet_subsys(&iptable_filter_net_ops); xt_unregister_template(&packet_filter); kfree(filter_ops); } module_init(iptable_filter_init); module_exit(iptable_filter_fini); |
| 97 83 81 84 95 3 84 83 84 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | // SPDX-License-Identifier: GPL-2.0-only /* * VMware vSockets Driver * * Copyright (C) 2007-2012 VMware, Inc. All rights reserved. */ #include <linux/types.h> #include <linux/socket.h> #include <linux/stddef.h> #include <net/sock.h> #include <net/vsock_addr.h> void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port) { memset(addr, 0, sizeof(*addr)); addr->svm_family = AF_VSOCK; addr->svm_cid = cid; addr->svm_port = port; } EXPORT_SYMBOL_GPL(vsock_addr_init); int vsock_addr_validate(const struct sockaddr_vm *addr) { __u8 svm_valid_flags = VMADDR_FLAG_TO_HOST; if (!addr) return -EFAULT; if (addr->svm_family != AF_VSOCK) return -EAFNOSUPPORT; if (addr->svm_flags & ~svm_valid_flags) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(vsock_addr_validate); bool vsock_addr_bound(const struct sockaddr_vm *addr) { return addr->svm_port != VMADDR_PORT_ANY; } EXPORT_SYMBOL_GPL(vsock_addr_bound); void vsock_addr_unbind(struct sockaddr_vm *addr) { vsock_addr_init(addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); } EXPORT_SYMBOL_GPL(vsock_addr_unbind); bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, const struct sockaddr_vm *other) { return addr->svm_cid == other->svm_cid && addr->svm_port == other->svm_port; } EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); int vsock_addr_cast(const struct sockaddr *addr, size_t len, struct sockaddr_vm **out_addr) { if (len < sizeof(**out_addr)) return -EFAULT; *out_addr = (struct sockaddr_vm *)addr; return vsock_addr_validate(*out_addr); } EXPORT_SYMBOL_GPL(vsock_addr_cast); |
| 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | /* * Copyright (c) Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMMON_CPU_H #define ZSTD_COMMON_CPU_H /* * Implementation taken from folly/CpuId.h * https://github.com/facebook/folly/blob/master/folly/CpuId.h */ #include "mem.h" typedef struct { U32 f1c; U32 f1d; U32 f7b; U32 f7c; } ZSTD_cpuid_t; MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { U32 f1c = 0; U32 f1d = 0; U32 f7b = 0; U32 f7c = 0; #if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) /* The following block like the normal cpuid branch below, but gcc * reserves ebx for use of its pic register so we must specially * handle the save and restore to avoid clobbering the register */ U32 n; __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "popl %%ebx\n\t" : "=a"(n) : "a"(0) : "ecx", "edx"); if (n >= 1) { U32 f1a; __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "popl %%ebx\n\t" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1)); } if (n >= 7) { __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "movl %%ebx, %%eax\n\t" "popl %%ebx" : "=a"(f7b), "=c"(f7c) : "a"(7), "c"(0) : "edx"); } #elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) U32 n; __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); if (n >= 1) { U32 f1a; __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); } if (n >= 7) { U32 f7a; __asm__("cpuid" : "=a"(f7a), "=b"(f7b), "=c"(f7c) : "a"(7), "c"(0) : "edx"); } #endif { ZSTD_cpuid_t cpuid; cpuid.f1c = f1c; cpuid.f1d = f1d; cpuid.f7b = f7b; cpuid.f7c = f7c; return cpuid; } } #define X(name, r, bit) \ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \ return ((cpuid.r) & (1U << bit)) != 0; \ } /* cpuid(1): Processor Info and Feature Bits. */ #define C(name, bit) X(name, f1c, bit) C(sse3, 0) C(pclmuldq, 1) C(dtes64, 2) C(monitor, 3) C(dscpl, 4) C(vmx, 5) C(smx, 6) C(eist, 7) C(tm2, 8) C(ssse3, 9) C(cnxtid, 10) C(fma, 12) C(cx16, 13) C(xtpr, 14) C(pdcm, 15) C(pcid, 17) C(dca, 18) C(sse41, 19) C(sse42, 20) C(x2apic, 21) C(movbe, 22) C(popcnt, 23) C(tscdeadline, 24) C(aes, 25) C(xsave, 26) C(osxsave, 27) C(avx, 28) C(f16c, 29) C(rdrand, 30) #undef C #define D(name, bit) X(name, f1d, bit) D(fpu, 0) D(vme, 1) D(de, 2) D(pse, 3) D(tsc, 4) D(msr, 5) D(pae, 6) D(mce, 7) D(cx8, 8) D(apic, 9) D(sep, 11) D(mtrr, 12) D(pge, 13) D(mca, 14) D(cmov, 15) D(pat, 16) D(pse36, 17) D(psn, 18) D(clfsh, 19) D(ds, 21) D(acpi, 22) D(mmx, 23) D(fxsr, 24) D(sse, 25) D(sse2, 26) D(ss, 27) D(htt, 28) D(tm, 29) D(pbe, 31) #undef D /* cpuid(7): Extended Features. */ #define B(name, bit) X(name, f7b, bit) B(bmi1, 3) B(hle, 4) B(avx2, 5) B(smep, 7) B(bmi2, 8) B(erms, 9) B(invpcid, 10) B(rtm, 11) B(mpx, 14) B(avx512f, 16) B(avx512dq, 17) B(rdseed, 18) B(adx, 19) B(smap, 20) B(avx512ifma, 21) B(pcommit, 22) B(clflushopt, 23) B(clwb, 24) B(avx512pf, 26) B(avx512er, 27) B(avx512cd, 28) B(sha, 29) B(avx512bw, 30) B(avx512vl, 31) #undef B #define C(name, bit) X(name, f7c, bit) C(prefetchwt1, 0) C(avx512vbmi, 1) #undef C #undef X #endif /* ZSTD_COMMON_CPU_H */ |
| 19 24 5 4 4 3 2 1 2 5 5 24 24 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/usb.h> #include <linux/usb/hcd.h> #include "usb.h" static int uas_is_interface(struct usb_host_interface *intf) { return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE && intf->desc.bInterfaceSubClass == USB_SC_SCSI && intf->desc.bInterfaceProtocol == USB_PR_UAS); } static struct usb_host_interface *uas_find_uas_alt_setting( struct usb_interface *intf) { int i; for (i = 0; i < intf->num_altsetting; i++) { struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) return alt; } return NULL; } static int uas_find_endpoints(struct usb_host_interface *alt, struct usb_host_endpoint *eps[]) { struct usb_host_endpoint *endpoint = alt->endpoint; unsigned i, n_endpoints = alt->desc.bNumEndpoints; for (i = 0; i < n_endpoints; i++) { unsigned char *extra = endpoint[i].extra; int len = endpoint[i].extralen; while (len >= 3) { if (extra[1] == USB_DT_PIPE_USAGE) { unsigned pipe_id = extra[2]; if (pipe_id > 0 && pipe_id < 5) eps[pipe_id - 1] = &endpoint[i]; break; } len -= extra[0]; extra += extra[0]; } } if (!eps[0] || !eps[1] || !eps[2] || !eps[3]) return -ENODEV; return 0; } static int uas_use_uas_driver(struct usb_interface *intf, const struct usb_device_id *id, u64 *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); u64 flags = id->driver_info; struct usb_host_interface *alt; int r; alt = uas_find_uas_alt_setting(intf); if (!alt) return 0; r = uas_find_endpoints(alt, eps); if (r < 0) return 0; /* * ASMedia has a number of usb3 to sata bridge chips, at the time of * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the * entire line, so the device-id is useless to determine if we're * dealing with an ASM1051 (which we want to avoid). * * The ASM1153 can be identified by config.MaxPower == 0, * where as the ASM105x models have config.MaxPower == 36. * * Differentiating between the ASM1053 and ASM1051 is trickier, when * connected over USB-3 we can look at the number of streams supported, * ASM1051 supports 32 streams, where as early ASM1053 versions support * 16 streams, newer ASM1053-s also support 32 streams, but have a * different prod-id. * * (*) ASM1051 chips do work with UAS with some disks (with the * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 || le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) { if (udev->actconfig->desc.bMaxPower == 0) { /* ASM1153, do nothing */ } else if (udev->speed < USB_SPEED_SUPER) { /* No streams info, assume ASM1051 */ flags |= US_FL_IGNORE_UAS; } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; } else { /* ASM1053, these have issues with large transfers */ flags |= US_FL_MAX_SECTORS_240; } } /* All Seagate disk enclosures have broken ATA pass-through support */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) flags |= US_FL_NO_ATA_1X; /* * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues * with UAS. This isn't distinguishable with just idVendor and * idProduct, use manufacturer and product too. * * Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda && le16_to_cpu(udev->descriptor.idProduct) == 0x9210 && (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) && (udev->product && !strcmp(udev->product, "MD202"))) flags |= US_FL_IGNORE_UAS; usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { dev_warn(&udev->dev, "UAS is ignored for this device, using usb-storage instead\n"); return 0; } if (udev->bus->sg_tablesize == 0) { dev_warn(&udev->dev, "The driver for the USB controller %s does not support scatter-gather which is\n", hcd->driver->description); dev_warn(&udev->dev, "required by the UAS driver. Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams) { dev_warn(&udev->dev, "USB controller %s does not support streams, which are required by the UAS driver.\n", hcd_to_bus(hcd)->bus_name); dev_warn(&udev->dev, "Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (flags_ret) *flags_ret = flags; return 1; } |
| 6 3 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/befs/debug.c * * Copyright (C) 2001 Will Dyson (will_dyson at pobox.com) * * With help from the ntfs-tng driver by Anton Altparmakov * * Copyright (C) 1999 Makoto Kato (m_kato@ga2.so-net.ne.jp) * * debug functions */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #ifdef __KERNEL__ #include <linux/stdarg.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #endif /* __KERNEL__ */ #include "befs.h" void befs_error(const struct super_block *sb, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("(%s): %pV\n", sb->s_id, &vaf); va_end(args); } void befs_warning(const struct super_block *sb, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_warn("(%s): %pV\n", sb->s_id, &vaf); va_end(args); } void befs_debug(const struct super_block *sb, const char *fmt, ...) { #ifdef CONFIG_BEFS_DEBUG struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_debug("(%s): %pV\n", sb->s_id, &vaf); va_end(args); #endif //CONFIG_BEFS_DEBUG } void befs_dump_inode(const struct super_block *sb, befs_inode *inode) { #ifdef CONFIG_BEFS_DEBUG befs_block_run tmp_run; befs_debug(sb, "befs_inode information"); befs_debug(sb, " magic1 %08x", fs32_to_cpu(sb, inode->magic1)); tmp_run = fsrun_to_cpu(sb, inode->inode_num); befs_debug(sb, " inode_num %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " uid %u", fs32_to_cpu(sb, inode->uid)); befs_debug(sb, " gid %u", fs32_to_cpu(sb, inode->gid)); befs_debug(sb, " mode %08x", fs32_to_cpu(sb, inode->mode)); befs_debug(sb, " flags %08x", fs32_to_cpu(sb, inode->flags)); befs_debug(sb, " create_time %llu", fs64_to_cpu(sb, inode->create_time)); befs_debug(sb, " last_modified_time %llu", fs64_to_cpu(sb, inode->last_modified_time)); tmp_run = fsrun_to_cpu(sb, inode->parent); befs_debug(sb, " parent [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); tmp_run = fsrun_to_cpu(sb, inode->attributes); befs_debug(sb, " attributes [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " type %08x", fs32_to_cpu(sb, inode->type)); befs_debug(sb, " inode_size %u", fs32_to_cpu(sb, inode->inode_size)); if (S_ISLNK(fs32_to_cpu(sb, inode->mode))) { befs_debug(sb, " Symbolic link [%s]", inode->data.symlink); } else { int i; for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; i++) { tmp_run = fsrun_to_cpu(sb, inode->data.datastream.direct[i]); befs_debug(sb, " direct %d [%u, %hu, %hu]", i, tmp_run.allocation_group, tmp_run.start, tmp_run.len); } befs_debug(sb, " max_direct_range %llu", fs64_to_cpu(sb, inode->data.datastream. max_direct_range)); tmp_run = fsrun_to_cpu(sb, inode->data.datastream.indirect); befs_debug(sb, " indirect [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " max_indirect_range %llu", fs64_to_cpu(sb, inode->data.datastream. max_indirect_range)); tmp_run = fsrun_to_cpu(sb, inode->data.datastream.double_indirect); befs_debug(sb, " double indirect [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " max_double_indirect_range %llu", fs64_to_cpu(sb, inode->data.datastream. max_double_indirect_range)); befs_debug(sb, " size %llu", fs64_to_cpu(sb, inode->data.datastream.size)); } #endif //CONFIG_BEFS_DEBUG } /* * Display super block structure for debug. */ void befs_dump_super_block(const struct super_block *sb, befs_super_block *sup) { #ifdef CONFIG_BEFS_DEBUG befs_block_run tmp_run; befs_debug(sb, "befs_super_block information"); befs_debug(sb, " name %s", sup->name); befs_debug(sb, " magic1 %08x", fs32_to_cpu(sb, sup->magic1)); befs_debug(sb, " fs_byte_order %08x", fs32_to_cpu(sb, sup->fs_byte_order)); befs_debug(sb, " block_size %u", fs32_to_cpu(sb, sup->block_size)); befs_debug(sb, " block_shift %u", fs32_to_cpu(sb, sup->block_shift)); befs_debug(sb, " num_blocks %llu", fs64_to_cpu(sb, sup->num_blocks)); befs_debug(sb, " used_blocks %llu", fs64_to_cpu(sb, sup->used_blocks)); befs_debug(sb, " inode_size %u", fs32_to_cpu(sb, sup->inode_size)); befs_debug(sb, " magic2 %08x", fs32_to_cpu(sb, sup->magic2)); befs_debug(sb, " blocks_per_ag %u", fs32_to_cpu(sb, sup->blocks_per_ag)); befs_debug(sb, " ag_shift %u", fs32_to_cpu(sb, sup->ag_shift)); befs_debug(sb, " num_ags %u", fs32_to_cpu(sb, sup->num_ags)); befs_debug(sb, " flags %08x", fs32_to_cpu(sb, sup->flags)); tmp_run = fsrun_to_cpu(sb, sup->log_blocks); befs_debug(sb, " log_blocks %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " log_start %lld", fs64_to_cpu(sb, sup->log_start)); befs_debug(sb, " log_end %lld", fs64_to_cpu(sb, sup->log_end)); befs_debug(sb, " magic3 %08x", fs32_to_cpu(sb, sup->magic3)); tmp_run = fsrun_to_cpu(sb, sup->root_dir); befs_debug(sb, " root_dir %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); tmp_run = fsrun_to_cpu(sb, sup->indices); befs_debug(sb, " indices %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); #endif //CONFIG_BEFS_DEBUG } #if 0 /* unused */ void befs_dump_small_data(const struct super_block *sb, befs_small_data *sd) { } /* unused */ void befs_dump_run(const struct super_block *sb, befs_disk_block_run run) { #ifdef CONFIG_BEFS_DEBUG befs_block_run n = fsrun_to_cpu(sb, run); befs_debug(sb, "[%u, %hu, %hu]", n.allocation_group, n.start, n.len); #endif //CONFIG_BEFS_DEBUG } #endif /* 0 */ void befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super *super) { #ifdef CONFIG_BEFS_DEBUG befs_debug(sb, "Btree super structure"); befs_debug(sb, " magic %08x", fs32_to_cpu(sb, super->magic)); befs_debug(sb, " node_size %u", fs32_to_cpu(sb, super->node_size)); befs_debug(sb, " max_depth %08x", fs32_to_cpu(sb, super->max_depth)); befs_debug(sb, " data_type %08x", fs32_to_cpu(sb, super->data_type)); befs_debug(sb, " root_node_pointer %016LX", fs64_to_cpu(sb, super->root_node_ptr)); befs_debug(sb, " free_node_pointer %016LX", fs64_to_cpu(sb, super->free_node_ptr)); befs_debug(sb, " maximum size %016LX", fs64_to_cpu(sb, super->max_size)); #endif //CONFIG_BEFS_DEBUG } void befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *node) { #ifdef CONFIG_BEFS_DEBUG befs_debug(sb, "Btree node structure"); befs_debug(sb, " left %016LX", fs64_to_cpu(sb, node->left)); befs_debug(sb, " right %016LX", fs64_to_cpu(sb, node->right)); befs_debug(sb, " overflow %016LX", fs64_to_cpu(sb, node->overflow)); befs_debug(sb, " all_key_count %hu", fs16_to_cpu(sb, node->all_key_count)); befs_debug(sb, " all_key_length %hu", fs16_to_cpu(sb, node->all_key_length)); #endif //CONFIG_BEFS_DEBUG } |
| 524 498 308 18 312 312 297 54 312 493 493 495 81 80 19 19 19 19 19 1 1 19 1 12 14 61 302 10 14 15 6 1 6 6 14 14 14 14 14 14 12 12 2 2 2 497 497 498 389 389 308 110 308 307 495 389 388 389 389 387 389 387 389 389 497 496 497 2492 2491 110 2495 379 378 378 379 199 388 389 388 16 16 16 16 16 388 389 389 388 18 389 389 40 35 35 35 2 297 297 295 2 2 2 34 34 34 34 2 2 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 | // SPDX-License-Identifier: GPL-2.0 /* * drivers/base/devres.c - device resource management * * Copyright (c) 2006 SUSE Linux Products GmbH * Copyright (c) 2006 Tejun Heo <teheo@suse.de> */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/percpu.h> #include <asm/sections.h> #include "base.h" #include "trace.h" struct devres_node { struct list_head entry; dr_release_t release; const char *name; size_t size; }; struct devres { struct devres_node node; /* * Some archs want to perform DMA into kmalloc caches * and need a guaranteed alignment larger than * the alignment of a 64-bit integer. * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same * alignment for struct devres when allocated by kmalloc(). */ u8 __aligned(ARCH_DMA_MINALIGN) data[]; }; struct devres_group { struct devres_node node[2]; void *id; int color; /* -- 8 pointers */ }; static void set_node_dbginfo(struct devres_node *node, const char *name, size_t size) { node->name = name; node->size = size; } #ifdef CONFIG_DEBUG_DEVRES static int log_devres = 0; module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); static void devres_dbg(struct device *dev, struct devres_node *node, const char *op) { if (unlikely(log_devres)) dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n", op, node, node->name, node->size); } #else /* CONFIG_DEBUG_DEVRES */ #define devres_dbg(dev, node, op) do {} while (0) #endif /* CONFIG_DEBUG_DEVRES */ static void devres_log(struct device *dev, struct devres_node *node, const char *op) { trace_devres_log(dev, op, node, node->name, node->size); devres_dbg(dev, node, op); } /* * Release functions for devres group. These callbacks are used only * for identification. */ static void group_open_release(struct device *dev, void *res) { /* noop */ } static void group_close_release(struct device *dev, void *res) { /* noop */ } static struct devres_group *node_to_group(struct devres_node *node) { if (node->release == &group_open_release) return container_of(node, struct devres_group, node[0]); if (node->release == &group_close_release) return container_of(node, struct devres_group, node[1]); return NULL; } static bool check_dr_size(size_t size, size_t *tot_size) { /* We must catch any near-SIZE_MAX cases that could overflow. */ if (unlikely(check_add_overflow(sizeof(struct devres), size, tot_size))) return false; /* Actually allocate the full kmalloc bucket size. */ *tot_size = kmalloc_size_roundup(*tot_size); return true; } static __always_inline struct devres *alloc_dr(dr_release_t release, size_t size, gfp_t gfp, int nid) { size_t tot_size; struct devres *dr; if (!check_dr_size(size, &tot_size)) return NULL; dr = kmalloc_node_track_caller(tot_size, gfp, nid); if (unlikely(!dr)) return NULL; /* No need to clear memory twice */ if (!(gfp & __GFP_ZERO)) memset(dr, 0, offsetof(struct devres, data)); INIT_LIST_HEAD(&dr->node.entry); dr->node.release = release; return dr; } static void add_dr(struct device *dev, struct devres_node *node) { devres_log(dev, node, "ADD"); BUG_ON(!list_empty(&node->entry)); list_add_tail(&node->entry, &dev->devres_head); } static void replace_dr(struct device *dev, struct devres_node *old, struct devres_node *new) { devres_log(dev, old, "REPLACE"); BUG_ON(!list_empty(&new->entry)); list_replace(&old->entry, &new->entry); } /** * __devres_alloc_node - Allocate device resource data * @release: Release function devres will be associated with * @size: Allocation size * @gfp: Allocation flags * @nid: NUMA node * @name: Name of the resource * * Allocate devres of @size bytes. The allocated area is zeroed, then * associated with @release. The returned pointer can be passed to * other devres_*() functions. * * RETURNS: * Pointer to allocated devres on success, NULL on failure. */ void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name) { struct devres *dr; dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); if (unlikely(!dr)) return NULL; set_node_dbginfo(&dr->node, name, size); return dr->data; } EXPORT_SYMBOL_GPL(__devres_alloc_node); /** * devres_for_each_res - Resource iterator * @dev: Device to iterate resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * @fn: Function to be called for each matched resource. * @data: Data for @fn, the 3rd parameter of @fn * * Call @fn for each devres of @dev which is associated with @release * and for which @match returns 1. * * RETURNS: * void */ void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, void (*fn)(struct device *, void *, void *), void *data) { struct devres_node *node; struct devres_node *tmp; unsigned long flags; if (!fn) return; spin_lock_irqsave(&dev->devres_lock, flags); list_for_each_entry_safe_reverse(node, tmp, &dev->devres_head, entry) { struct devres *dr = container_of(node, struct devres, node); if (node->release != release) continue; if (match && !match(dev, dr->data, match_data)) continue; fn(dev, dr->data, data); } spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_for_each_res); /** * devres_free - Free device resource data * @res: Pointer to devres data to free * * Free devres created with devres_alloc(). */ void devres_free(void *res) { if (res) { struct devres *dr = container_of(res, struct devres, data); BUG_ON(!list_empty(&dr->node.entry)); kfree(dr); } } EXPORT_SYMBOL_GPL(devres_free); /** * devres_add - Register device resource * @dev: Device to add resource to * @res: Resource to register * * Register devres @res to @dev. @res should have been allocated * using devres_alloc(). On driver detach, the associated release * function will be invoked and devres will be freed automatically. */ void devres_add(struct device *dev, void *res) { struct devres *dr = container_of(res, struct devres, data); unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); add_dr(dev, &dr->node); spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_add); static struct devres *find_dr(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres_node *node; list_for_each_entry_reverse(node, &dev->devres_head, entry) { struct devres *dr = container_of(node, struct devres, node); if (node->release != release) continue; if (match && !match(dev, dr->data, match_data)) continue; return dr; } return NULL; } /** * devres_find - Find device resource * @dev: Device to lookup resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev which is associated with @release * and for which @match returns 1. If @match is NULL, it's considered * to match all. * * RETURNS: * Pointer to found devres, NULL if not found. */ void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, release, match, match_data); spin_unlock_irqrestore(&dev->devres_lock, flags); if (dr) return dr->data; return NULL; } EXPORT_SYMBOL_GPL(devres_find); /** * devres_get - Find devres, if non-existent, add one atomically * @dev: Device to lookup or add devres for * @new_res: Pointer to new initialized devres to add if not found * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev which has the same release function * as @new_res and for which @match return 1. If found, @new_res is * freed; otherwise, @new_res is added atomically. * * RETURNS: * Pointer to found or added devres. */ void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data) { struct devres *new_dr = container_of(new_res, struct devres, data); struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, new_dr->node.release, match, match_data); if (!dr) { add_dr(dev, &new_dr->node); dr = new_dr; new_res = NULL; } spin_unlock_irqrestore(&dev->devres_lock, flags); devres_free(new_res); return dr->data; } EXPORT_SYMBOL_GPL(devres_get); /** * devres_remove - Find a device resource and remove it * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically and * returned. * * RETURNS: * Pointer to removed devres on success, NULL if not found. */ void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, release, match, match_data); if (dr) { list_del_init(&dr->node.entry); devres_log(dev, &dr->node, "REM"); } spin_unlock_irqrestore(&dev->devres_lock, flags); if (dr) return dr->data; return NULL; } EXPORT_SYMBOL_GPL(devres_remove); /** * devres_destroy - Find a device resource and destroy it * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically and freed. * * Note that the release function for the resource will not be called, * only the devres-allocated data will be freed. The caller becomes * responsible for freeing any other data. * * RETURNS: * 0 if devres is found and freed, -ENOENT if not found. */ int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { void *res; res = devres_remove(dev, release, match, match_data); if (unlikely(!res)) return -ENOENT; devres_free(res); return 0; } EXPORT_SYMBOL_GPL(devres_destroy); /** * devres_release - Find a device resource and destroy it, calling release * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically, the * release function called and the resource freed. * * RETURNS: * 0 if devres is found and freed, -ENOENT if not found. */ int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { void *res; res = devres_remove(dev, release, match, match_data); if (unlikely(!res)) return -ENOENT; (*release)(dev, res); devres_free(res); return 0; } EXPORT_SYMBOL_GPL(devres_release); static int remove_nodes(struct device *dev, struct list_head *first, struct list_head *end, struct list_head *todo) { struct devres_node *node, *n; int cnt = 0, nr_groups = 0; /* First pass - move normal devres entries to @todo and clear * devres_group colors. */ node = list_entry(first, struct devres_node, entry); list_for_each_entry_safe_from(node, n, end, entry) { struct devres_group *grp; grp = node_to_group(node); if (grp) { /* clear color of group markers in the first pass */ grp->color = 0; nr_groups++; } else { /* regular devres entry */ if (&node->entry == first) first = first->next; list_move_tail(&node->entry, todo); cnt++; } } if (!nr_groups) return cnt; /* Second pass - Scan groups and color them. A group gets * color value of two iff the group is wholly contained in * [current node, end). That is, for a closed group, both opening * and closing markers should be in the range, while just the * opening marker is enough for an open group. */ node = list_entry(first, struct devres_node, entry); list_for_each_entry_safe_from(node, n, end, entry) { struct devres_group *grp; grp = node_to_group(node); BUG_ON(!grp || list_empty(&grp->node[0].entry)); grp->color++; if (list_empty(&grp->node[1].entry)) grp->color++; BUG_ON(grp->color <= 0 || grp->color > 2); if (grp->color == 2) { /* No need to update current node or end. The removed * nodes are always before both. */ list_move_tail(&grp->node[0].entry, todo); list_del_init(&grp->node[1].entry); } } return cnt; } static void release_nodes(struct device *dev, struct list_head *todo) { struct devres *dr, *tmp; /* Release. Note that both devres and devres_group are * handled as devres in the following loop. This is safe. */ list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) { devres_log(dev, &dr->node, "REL"); dr->node.release(dev, dr->data); kfree(dr); } } /** * devres_release_all - Release all managed resources * @dev: Device to release resources for * * Release all resources associated with @dev. This function is * called on driver detach. */ int devres_release_all(struct device *dev) { unsigned long flags; LIST_HEAD(todo); int cnt; /* Looks like an uninitialized device structure */ if (WARN_ON(dev->devres_head.next == NULL)) return -ENODEV; /* Nothing to release if list is empty */ if (list_empty(&dev->devres_head)) return 0; spin_lock_irqsave(&dev->devres_lock, flags); cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo); spin_unlock_irqrestore(&dev->devres_lock, flags); release_nodes(dev, &todo); return cnt; } /** * devres_open_group - Open a new devres group * @dev: Device to open devres group for * @id: Separator ID * @gfp: Allocation flags * * Open a new devres group for @dev with @id. For @id, using a * pointer to an object which won't be used for another group is * recommended. If @id is NULL, address-wise unique ID is created. * * RETURNS: * ID of the new group, NULL on failure. */ void *devres_open_group(struct device *dev, void *id, gfp_t gfp) { struct devres_group *grp; unsigned long flags; grp = kmalloc(sizeof(*grp), gfp); if (unlikely(!grp)) return NULL; grp->node[0].release = &group_open_release; grp->node[1].release = &group_close_release; INIT_LIST_HEAD(&grp->node[0].entry); INIT_LIST_HEAD(&grp->node[1].entry); set_node_dbginfo(&grp->node[0], "grp<", 0); set_node_dbginfo(&grp->node[1], "grp>", 0); grp->id = grp; if (id) grp->id = id; grp->color = 0; spin_lock_irqsave(&dev->devres_lock, flags); add_dr(dev, &grp->node[0]); spin_unlock_irqrestore(&dev->devres_lock, flags); return grp->id; } EXPORT_SYMBOL_GPL(devres_open_group); /* Find devres group with ID @id. If @id is NULL, look for the latest. */ static struct devres_group *find_group(struct device *dev, void *id) { struct devres_node *node; list_for_each_entry_reverse(node, &dev->devres_head, entry) { struct devres_group *grp; if (node->release != &group_open_release) continue; grp = container_of(node, struct devres_group, node[0]); if (id) { if (grp->id == id) return grp; } else if (list_empty(&grp->node[1].entry)) return grp; } return NULL; } /** * devres_close_group - Close a devres group * @dev: Device to close devres group for * @id: ID of target group, can be NULL * * Close the group identified by @id. If @id is NULL, the latest open * group is selected. */ void devres_close_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) add_dr(dev, &grp->node[1]); else WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_close_group); /** * devres_remove_group - Remove a devres group * @dev: Device to remove group for * @id: ID of target group, can be NULL * * Remove the group identified by @id. If @id is NULL, the latest * open group is selected. Note that removing a group doesn't affect * any other resources. */ void devres_remove_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) { list_del_init(&grp->node[0].entry); list_del_init(&grp->node[1].entry); devres_log(dev, &grp->node[0], "REM"); } else WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); kfree(grp); } EXPORT_SYMBOL_GPL(devres_remove_group); /** * devres_release_group - Release resources in a devres group * @dev: Device to release group for * @id: ID of target group, can be NULL * * Release all resources in the group identified by @id. If @id is * NULL, the latest open group is selected. The selected group and * groups properly nested inside the selected group are removed. * * RETURNS: * The number of released non-group resources. */ int devres_release_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; LIST_HEAD(todo); int cnt = 0; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) { struct list_head *first = &grp->node[0].entry; struct list_head *end = &dev->devres_head; if (!list_empty(&grp->node[1].entry)) end = grp->node[1].entry.next; cnt = remove_nodes(dev, first, end, &todo); spin_unlock_irqrestore(&dev->devres_lock, flags); release_nodes(dev, &todo); } else { WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); } return cnt; } EXPORT_SYMBOL_GPL(devres_release_group); /* * Custom devres actions allow inserting a simple function call * into the teardown sequence. */ struct action_devres { void *data; void (*action)(void *); }; static int devm_action_match(struct device *dev, void *res, void *p) { struct action_devres *devres = res; struct action_devres *target = p; return devres->action == target->action && devres->data == target->data; } static void devm_action_release(struct device *dev, void *res) { struct action_devres *devres = res; devres->action(devres->data); } /** * __devm_add_action() - add a custom action to list of managed resources * @dev: Device that owns the action * @action: Function that should be called * @data: Pointer to data passed to @action implementation * @name: Name of the resource (for debugging purposes) * * This adds a custom action to the list of managed resources so that * it gets executed as part of standard resource unwinding. */ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name) { struct action_devres *devres; devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres), GFP_KERNEL, NUMA_NO_NODE, name); if (!devres) return -ENOMEM; devres->data = data; devres->action = action; devres_add(dev, devres); return 0; } EXPORT_SYMBOL_GPL(__devm_add_action); /** * devm_remove_action() - removes previously added custom action * @dev: Device that owns the action * @action: Function implementing the action * @data: Pointer to data passed to @action implementation * * Removes instance of @action previously added by devm_add_action(). * Both action and data should match one of the existing entries. */ void devm_remove_action(struct device *dev, void (*action)(void *), void *data) { struct action_devres devres = { .data = data, .action = action, }; WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, &devres)); } EXPORT_SYMBOL_GPL(devm_remove_action); /** * devm_release_action() - release previously added custom action * @dev: Device that owns the action * @action: Function implementing the action * @data: Pointer to data passed to @action implementation * * Releases and removes instance of @action previously added by * devm_add_action(). Both action and data should match one of the * existing entries. */ void devm_release_action(struct device *dev, void (*action)(void *), void *data) { struct action_devres devres = { .data = data, .action = action, }; WARN_ON(devres_release(dev, devm_action_release, devm_action_match, &devres)); } EXPORT_SYMBOL_GPL(devm_release_action); /* * Managed kmalloc/kfree */ static void devm_kmalloc_release(struct device *dev, void *res) { /* noop */ } static int devm_kmalloc_match(struct device *dev, void *res, void *data) { return res == data; } /** * devm_kmalloc - Resource-managed kmalloc * @dev: Device to allocate memory for * @size: Allocation size * @gfp: Allocation gfp flags * * Managed kmalloc. Memory allocated with this function is * automatically freed on driver detach. Like all other devres * resources, guaranteed alignment is unsigned long long. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) { struct devres *dr; if (unlikely(!size)) return ZERO_SIZE_PTR; /* use raw alloc_dr for kmalloc caller tracing */ dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); if (unlikely(!dr)) return NULL; /* * This is named devm_kzalloc_release for historical reasons * The initial implementation did not support kmalloc, only kzalloc */ set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); devres_add(dev, dr->data); return dr->data; } EXPORT_SYMBOL_GPL(devm_kmalloc); /** * devm_krealloc - Resource-managed krealloc() * @dev: Device to re-allocate memory for * @ptr: Pointer to the memory chunk to re-allocate * @new_size: New allocation size * @gfp: Allocation gfp flags * * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc(). * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR, * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't * change the order in which the release callback for the re-alloc'ed devres * will be called (except when falling back to devm_kmalloc() or when freeing * resources when new_size is zero). The contents of the memory are preserved * up to the lesser of new and old sizes. */ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) { size_t total_new_size, total_old_size; struct devres *old_dr, *new_dr; unsigned long flags; if (unlikely(!new_size)) { devm_kfree(dev, ptr); return ZERO_SIZE_PTR; } if (unlikely(ZERO_OR_NULL_PTR(ptr))) return devm_kmalloc(dev, new_size, gfp); if (WARN_ON(is_kernel_rodata((unsigned long)ptr))) /* * We cannot reliably realloc a const string returned by * devm_kstrdup_const(). */ return NULL; if (!check_dr_size(new_size, &total_new_size)) return NULL; total_old_size = ksize(container_of(ptr, struct devres, data)); if (total_old_size == 0) { WARN(1, "Pointer doesn't point to dynamically allocated memory."); return NULL; } /* * If new size is smaller or equal to the actual number of bytes * allocated previously - just return the same pointer. */ if (total_new_size <= total_old_size) return ptr; /* * Otherwise: allocate new, larger chunk. We need to allocate before * taking the lock as most probably the caller uses GFP_KERNEL. * alloc_dr() will call check_dr_size() to reserve extra memory * for struct devres automatically, so size @new_size user request * is delivered to it directly as devm_kmalloc() does. */ new_dr = alloc_dr(devm_kmalloc_release, new_size, gfp, dev_to_node(dev)); if (!new_dr) return NULL; /* * The spinlock protects the linked list against concurrent * modifications but not the resource itself. */ spin_lock_irqsave(&dev->devres_lock, flags); old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr); if (!old_dr) { spin_unlock_irqrestore(&dev->devres_lock, flags); kfree(new_dr); WARN(1, "Memory chunk not managed or managed by a different device."); return NULL; } replace_dr(dev, &old_dr->node, &new_dr->node); spin_unlock_irqrestore(&dev->devres_lock, flags); /* * We can copy the memory contents after releasing the lock as we're * no longer modifying the list links. */ memcpy(new_dr->data, old_dr->data, total_old_size - offsetof(struct devres, data)); /* * Same for releasing the old devres - it's now been removed from the * list. This is also the reason why we must not use devm_kfree() - the * links are no longer valid. */ kfree(old_dr); return new_dr->data; } EXPORT_SYMBOL_GPL(devm_krealloc); /** * devm_kstrdup - Allocate resource managed space and * copy an existing string into that. * @dev: Device to allocate memory for * @s: the string to duplicate * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) { size_t size; char *buf; if (!s) return NULL; size = strlen(s) + 1; buf = devm_kmalloc(dev, size, gfp); if (buf) memcpy(buf, s, size); return buf; } EXPORT_SYMBOL_GPL(devm_kstrdup); /** * devm_kstrdup_const - resource managed conditional string duplication * @dev: device for which to duplicate the string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Strings allocated by devm_kstrdup_const will be automatically freed when * the associated device is detached. * * RETURNS: * Source string if it is in .rodata section otherwise it falls back to * devm_kstrdup. */ const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) { if (is_kernel_rodata((unsigned long)s)) return s; return devm_kstrdup(dev, s, gfp); } EXPORT_SYMBOL_GPL(devm_kstrdup_const); /** * devm_kvasprintf - Allocate resource managed space and format a string * into that. * @dev: Device to allocate memory for * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * @fmt: The printf()-style format string * @ap: Arguments for the format string * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = devm_kmalloc(dev, len+1, gfp); if (!p) return NULL; vsnprintf(p, len+1, fmt, ap); return p; } EXPORT_SYMBOL(devm_kvasprintf); /** * devm_kasprintf - Allocate resource managed space and format a string * into that. * @dev: Device to allocate memory for * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * @fmt: The printf()-style format string * @...: Arguments for the format string * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = devm_kvasprintf(dev, gfp, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL_GPL(devm_kasprintf); /** * devm_kfree - Resource-managed kfree * @dev: Device this memory belongs to * @p: Memory to free * * Free memory allocated with devm_kmalloc(). */ void devm_kfree(struct device *dev, const void *p) { int rc; /* * Special cases: pointer to a string in .rodata returned by * devm_kstrdup_const() or NULL/ZERO ptr. */ if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) return; rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, (void *)p); WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_kfree); /** * devm_kmemdup - Resource-managed kmemdup * @dev: Device this memory belongs to * @src: Memory region to duplicate * @len: Memory region length * @gfp: GFP mask to use * * Duplicate region of a memory using resource managed kmalloc */ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) { void *p; p = devm_kmalloc(dev, len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL_GPL(devm_kmemdup); struct pages_devres { unsigned long addr; unsigned int order; }; static int devm_pages_match(struct device *dev, void *res, void *p) { struct pages_devres *devres = res; struct pages_devres *target = p; return devres->addr == target->addr; } static void devm_pages_release(struct device *dev, void *res) { struct pages_devres *devres = res; free_pages(devres->addr, devres->order); } /** * devm_get_free_pages - Resource-managed __get_free_pages * @dev: Device to allocate memory for * @gfp_mask: Allocation gfp flags * @order: Allocation size is (1 << order) pages * * Managed get_free_pages. Memory allocated with this function is * automatically freed on driver detach. * * RETURNS: * Address of allocated memory on success, 0 on failure. */ unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order) { struct pages_devres *devres; unsigned long addr; addr = __get_free_pages(gfp_mask, order); if (unlikely(!addr)) return 0; devres = devres_alloc(devm_pages_release, sizeof(struct pages_devres), GFP_KERNEL); if (unlikely(!devres)) { free_pages(addr, order); return 0; } devres->addr = addr; devres->order = order; devres_add(dev, devres); return addr; } EXPORT_SYMBOL_GPL(devm_get_free_pages); /** * devm_free_pages - Resource-managed free_pages * @dev: Device this memory belongs to * @addr: Memory to free * * Free memory allocated with devm_get_free_pages(). Unlike free_pages, * there is no need to supply the @order. */ void devm_free_pages(struct device *dev, unsigned long addr) { struct pages_devres devres = { .addr = addr }; WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, &devres)); } EXPORT_SYMBOL_GPL(devm_free_pages); static void devm_percpu_release(struct device *dev, void *pdata) { void __percpu *p; p = *(void __percpu **)pdata; free_percpu(p); } static int devm_percpu_match(struct device *dev, void *data, void *p) { struct devres *devr = container_of(data, struct devres, data); return *(void **)devr->data == p; } /** * __devm_alloc_percpu - Resource-managed alloc_percpu * @dev: Device to allocate per-cpu memory for * @size: Size of per-cpu memory to allocate * @align: Alignment of per-cpu memory to allocate * * Managed alloc_percpu. Per-cpu memory allocated with this function is * automatically freed on driver detach. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align) { void *p; void __percpu *pcpu; pcpu = __alloc_percpu(size, align); if (!pcpu) return NULL; p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); if (!p) { free_percpu(pcpu); return NULL; } *(void __percpu **)p = pcpu; devres_add(dev, p); return pcpu; } EXPORT_SYMBOL_GPL(__devm_alloc_percpu); /** * devm_free_percpu - Resource-managed free_percpu * @dev: Device this memory belongs to * @pdata: Per-cpu memory to free * * Free memory allocated with devm_alloc_percpu(). */ void devm_free_percpu(struct device *dev, void __percpu *pdata) { /* * Use devres_release() to prevent memory leakage as * devm_free_pages() does. */ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match, (void *)(__force unsigned long)pdata)); } EXPORT_SYMBOL_GPL(devm_free_percpu); |
| 3 2 3 3 3 2 2 2 7 7 6 5 4 4 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #include <linux/unaligned.h> /* * Pressure-threshold modules param code from Alex Perry <alex.perry@ieee.org> */ MODULE_AUTHOR("Josh Myer <josh@joshisanerd.com>"); MODULE_DESCRIPTION("USB KB Gear JamStudio Tablet driver"); MODULE_LICENSE("GPL"); #define USB_VENDOR_ID_KBGEAR 0x084e static int kb_pressure_click = 0x10; module_param(kb_pressure_click, int, 0); MODULE_PARM_DESC(kb_pressure_click, "pressure threshold for clicks"); struct kbtab { unsigned char *data; dma_addr_t data_dma; struct input_dev *dev; struct usb_interface *intf; struct urb *irq; char phys[32]; }; static void kbtab_irq(struct urb *urb) { struct kbtab *kbtab = urb->context; unsigned char *data = kbtab->data; struct input_dev *dev = kbtab->dev; int pressure; int retval; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&kbtab->intf->dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(&kbtab->intf->dev, "%s - nonzero urb status received: %d\n", __func__, urb->status); goto exit; } input_report_key(dev, BTN_TOOL_PEN, 1); input_report_abs(dev, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(dev, ABS_Y, get_unaligned_le16(&data[3])); /*input_report_key(dev, BTN_TOUCH , data[0] & 0x01);*/ input_report_key(dev, BTN_RIGHT, data[0] & 0x02); pressure = data[5]; if (kb_pressure_click == -1) input_report_abs(dev, ABS_PRESSURE, pressure); else input_report_key(dev, BTN_LEFT, pressure > kb_pressure_click ? 1 : 0); input_sync(dev); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&kbtab->intf->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static const struct usb_device_id kbtab_ids[] = { { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, { } }; MODULE_DEVICE_TABLE(usb, kbtab_ids); static int kbtab_open(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); struct usb_device *udev = interface_to_usbdev(kbtab->intf); kbtab->irq->dev = udev; if (usb_submit_urb(kbtab->irq, GFP_KERNEL)) return -EIO; return 0; } static void kbtab_close(struct input_dev *dev) { struct kbtab *kbtab = input_get_drvdata(dev); usb_kill_urb(kbtab->irq); } static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct kbtab *kbtab; struct input_dev *input_dev; int error = -ENOMEM; if (intf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; endpoint = &intf->cur_altsetting->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; kbtab = kzalloc(sizeof(*kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) goto fail1; kbtab->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbtab->data_dma); if (!kbtab->data) goto fail1; kbtab->irq = usb_alloc_urb(0, GFP_KERNEL); if (!kbtab->irq) goto fail2; kbtab->intf = intf; kbtab->dev = input_dev; usb_make_path(dev, kbtab->phys, sizeof(kbtab->phys)); strlcat(kbtab->phys, "/input0", sizeof(kbtab->phys)); input_dev->name = "KB Gear Tablet"; input_dev->phys = kbtab->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, kbtab); input_dev->open = kbtab_open; input_dev->close = kbtab_close; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, 0x2000, 4, 0); input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); usb_fill_int_urb(kbtab->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), kbtab->data, 8, kbtab_irq, kbtab, endpoint->bInterval); kbtab->irq->transfer_dma = kbtab->data_dma; kbtab->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(kbtab->dev); if (error) goto fail3; usb_set_intfdata(intf, kbtab); return 0; fail3: usb_free_urb(kbtab->irq); fail2: usb_free_coherent(dev, 8, kbtab->data, kbtab->data_dma); fail1: input_free_device(input_dev); kfree(kbtab); return error; } static void kbtab_disconnect(struct usb_interface *intf) { struct kbtab *kbtab = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); usb_set_intfdata(intf, NULL); input_unregister_device(kbtab->dev); usb_free_urb(kbtab->irq); usb_free_coherent(udev, 8, kbtab->data, kbtab->data_dma); kfree(kbtab); } static struct usb_driver kbtab_driver = { .name = "kbtab", .probe = kbtab_probe, .disconnect = kbtab_disconnect, .id_table = kbtab_ids, }; module_usb_driver(kbtab_driver); |
| 8 99 2 2 26 141 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM nilfs2 #if !defined(_TRACE_NILFS2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NILFS2_H #include <linux/tracepoint.h> struct nilfs_sc_info; #define show_collection_stage(type) \ __print_symbolic(type, \ { NILFS_ST_INIT, "ST_INIT" }, \ { NILFS_ST_GC, "ST_GC" }, \ { NILFS_ST_FILE, "ST_FILE" }, \ { NILFS_ST_IFILE, "ST_IFILE" }, \ { NILFS_ST_CPFILE, "ST_CPFILE" }, \ { NILFS_ST_SUFILE, "ST_SUFILE" }, \ { NILFS_ST_DAT, "ST_DAT" }, \ { NILFS_ST_SR, "ST_SR" }, \ { NILFS_ST_DSYNC, "ST_DSYNC" }, \ { NILFS_ST_DONE, "ST_DONE"}) TRACE_EVENT(nilfs2_collection_stage_transition, TP_PROTO(struct nilfs_sc_info *sci), TP_ARGS(sci), TP_STRUCT__entry( __field(void *, sci) __field(int, stage) ), TP_fast_assign( __entry->sci = sci; __entry->stage = sci->sc_stage.scnt; ), TP_printk("sci = %p stage = %s", __entry->sci, show_collection_stage(__entry->stage)) ); #ifndef TRACE_HEADER_MULTI_READ enum nilfs2_transaction_transition_state { TRACE_NILFS2_TRANSACTION_BEGIN, TRACE_NILFS2_TRANSACTION_COMMIT, TRACE_NILFS2_TRANSACTION_ABORT, TRACE_NILFS2_TRANSACTION_TRYLOCK, TRACE_NILFS2_TRANSACTION_LOCK, TRACE_NILFS2_TRANSACTION_UNLOCK, }; #endif #define show_transaction_state(type) \ __print_symbolic(type, \ { TRACE_NILFS2_TRANSACTION_BEGIN, "BEGIN" }, \ { TRACE_NILFS2_TRANSACTION_COMMIT, "COMMIT" }, \ { TRACE_NILFS2_TRANSACTION_ABORT, "ABORT" }, \ { TRACE_NILFS2_TRANSACTION_TRYLOCK, "TRYLOCK" }, \ { TRACE_NILFS2_TRANSACTION_LOCK, "LOCK" }, \ { TRACE_NILFS2_TRANSACTION_UNLOCK, "UNLOCK" }) TRACE_EVENT(nilfs2_transaction_transition, TP_PROTO(struct super_block *sb, struct nilfs_transaction_info *ti, int count, unsigned int flags, enum nilfs2_transaction_transition_state state), TP_ARGS(sb, ti, count, flags, state), TP_STRUCT__entry( __field(void *, sb) __field(void *, ti) __field(int, count) __field(unsigned int, flags) __field(int, state) ), TP_fast_assign( __entry->sb = sb; __entry->ti = ti; __entry->count = count; __entry->flags = flags; __entry->state = state; ), TP_printk("sb = %p ti = %p count = %d flags = %x state = %s", __entry->sb, __entry->ti, __entry->count, __entry->flags, show_transaction_state(__entry->state)) ); TRACE_EVENT(nilfs2_segment_usage_check, TP_PROTO(struct inode *sufile, __u64 segnum, unsigned long cnt), TP_ARGS(sufile, segnum, cnt), TP_STRUCT__entry( __field(struct inode *, sufile) __field(__u64, segnum) __field(unsigned long, cnt) ), TP_fast_assign( __entry->sufile = sufile; __entry->segnum = segnum; __entry->cnt = cnt; ), TP_printk("sufile = %p segnum = %llu cnt = %lu", __entry->sufile, __entry->segnum, __entry->cnt) ); TRACE_EVENT(nilfs2_segment_usage_allocated, TP_PROTO(struct inode *sufile, __u64 segnum), TP_ARGS(sufile, segnum), TP_STRUCT__entry( __field(struct inode *, sufile) __field(__u64, segnum) ), TP_fast_assign( __entry->sufile = sufile; __entry->segnum = segnum; ), TP_printk("sufile = %p segnum = %llu", __entry->sufile, __entry->segnum) ); TRACE_EVENT(nilfs2_segment_usage_freed, TP_PROTO(struct inode *sufile, __u64 segnum), TP_ARGS(sufile, segnum), TP_STRUCT__entry( __field(struct inode *, sufile) __field(__u64, segnum) ), TP_fast_assign( __entry->sufile = sufile; __entry->segnum = segnum; ), TP_printk("sufile = %p segnum = %llu", __entry->sufile, __entry->segnum) ); TRACE_EVENT(nilfs2_mdt_insert_new_block, TP_PROTO(struct inode *inode, unsigned long ino, unsigned long block), TP_ARGS(inode, ino, block), TP_STRUCT__entry( __field(struct inode *, inode) __field(unsigned long, ino) __field(unsigned long, block) ), TP_fast_assign( __entry->inode = inode; __entry->ino = ino; __entry->block = block; ), TP_printk("inode = %p ino = %lu block = %lu", __entry->inode, __entry->ino, __entry->block) ); TRACE_EVENT(nilfs2_mdt_submit_block, TP_PROTO(struct inode *inode, unsigned long ino, unsigned long blkoff, enum req_op mode), TP_ARGS(inode, ino, blkoff, mode), TP_STRUCT__entry( __field(struct inode *, inode) __field(unsigned long, ino) __field(unsigned long, blkoff) /* * Use field_struct() to avoid is_signed_type() on the * bitwise type enum req_op. */ __field_struct(enum req_op, mode) ), TP_fast_assign( __entry->inode = inode; __entry->ino = ino; __entry->blkoff = blkoff; __entry->mode = mode; ), TP_printk("inode = %p ino = %lu blkoff = %lu mode = %x", __entry->inode, __entry->ino, __entry->blkoff, __entry->mode) ); #endif /* _TRACE_NILFS2_H */ /* This part must be outside protection */ #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE nilfs2 #include <trace/define_trace.h> |
| 5752 5750 5750 5750 47 3 3 238 237 238 237 236 238 2 3 3 3 3 3 121 121 3 3 3 3 3 121 7244 2468 7241 3523 9284 9273 13 5171 2 112 238 237 2 238 238 237 238 237 235 235 234 233 235 237 237 236 238 238 238 237 49 49 49 49 49 48 49 49 102 49 49 47 49 49 48 1 9238 9238 9235 9234 9253 9232 9253 9238 9179 3 9191 9189 9208 9195 9228 25 9212 5182 5171 5164 5094 2794 8598 8613 8603 8600 8519 8602 8606 8616 2845 2844 2837 2816 2818 2821 30 30 581 581 4 4 2 576 579 567 568 568 539 131 579 578 581 580 580 12 12 12 581 583 579 12 582 2252 414 415 415 415 415 414 35090 35067 34867 34854 34844 34865 10 35361 35379 35374 2420 2385 35 112 112 111 5 5 5 4 5 5 33817 278 257 3106 29664 33610 33404 33612 707 1826 1819 5369 5307 5324 1820 5328 1775 4 4 4 25 25 25 115 115 112 114 115 115 115 113 115 4 4 3 4 23 23 6 23 17 22 19 19 4 2 4 2 2 117 117 115 115 115 112 119 9 86 40 7 7 7 7 5 5 33 18 16 15 1 13 12 1 12 12 3 3 3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/file.c * * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes * * Manage the dynamic fd arrays in the process files_struct. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/close_range.h> #include <linux/file_ref.h> #include <net/sock.h> #include <linux/init_task.h> #include "internal.h" /** * __file_ref_put - Slowpath of file_ref_put() * @ref: Pointer to the reference count * @cnt: Current reference count * * Invoked when the reference count is outside of the valid zone. * * Return: * True if this was the last reference with no future references * possible. This signals the caller that it can safely schedule the * object, which is protected by the reference counter, for * deconstruction. * * False if there are still active references or the put() raced * with a concurrent get()/put() pair. Caller is not allowed to * deconstruct the protected object. */ bool __file_ref_put(file_ref_t *ref, unsigned long cnt) { /* Did this drop the last reference? */ if (likely(cnt == FILE_REF_NOREF)) { /* * Carefully try to set the reference count to FILE_REF_DEAD. * * This can fail if a concurrent get() operation has * elevated it again or the corresponding put() even marked * it dead already. Both are valid situations and do not * require a retry. If this fails the caller is not * allowed to deconstruct the object. */ if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD)) return false; /* * The caller can safely schedule the object for * deconstruction. Provide acquire ordering. */ smp_acquire__after_ctrl_dep(); return true; } /* * If the reference count was already in the dead zone, then this * put() operation is imbalanced. Warn, put the reference count back to * DEAD and tell the caller to not deconstruct the object. */ if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) { atomic_long_set(&ref->refcnt, FILE_REF_DEAD); return false; } /* * This is a put() operation on a saturated refcount. Restore the * mean saturation value and tell the caller to not deconstruct the * object. */ if (cnt > FILE_REF_MAXREF) atomic_long_set(&ref->refcnt, FILE_REF_SATURATED); return false; } EXPORT_SYMBOL_GPL(__file_ref_put); unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; /* our min() is unusable in constant expressions ;-/ */ #define __const_min(x, y) ((x) < (y) ? (x) : (y)) unsigned int sysctl_nr_open_max = __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; static void __free_fdtable(struct fdtable *fdt) { kvfree(fdt->fd); kvfree(fdt->open_fds); kfree(fdt); } static void free_fdtable_rcu(struct rcu_head *rcu) { __free_fdtable(container_of(rcu, struct fdtable, rcu)); } #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds /* * Copy 'count' fd bits from the old table to the new table and clear the extra * space if any. This does not copy the file pointers. Called with the files * spinlock held for write. */ static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, unsigned int copy_words) { unsigned int nwords = fdt_words(nfdt); bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds, copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec, copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits, copy_words, nwords); } /* * Copy all file descriptors from the old table to the new, expanded table and * clear the extra space. Called with the files spinlock held for write. */ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) { size_t cpy, set; BUG_ON(nfdt->max_fds < ofdt->max_fds); cpy = ofdt->max_fds * sizeof(struct file *); set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); memcpy(nfdt->fd, ofdt->fd, cpy); memset((char *)nfdt->fd + cpy, 0, set); copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt)); } /* * Note how the fdtable bitmap allocations very much have to be a multiple of * BITS_PER_LONG. This is not only because we walk those things in chunks of * 'unsigned long' in some places, but simply because that is how the Linux * kernel bitmaps are defined to work: they are not "bits in an array of bytes", * they are very much "bits in an array of unsigned long". */ static struct fdtable *alloc_fdtable(unsigned int slots_wanted) { struct fdtable *fdt; unsigned int nr; void *data; /* * Figure out how many fds we actually want to support in this fdtable. * Allocation steps are keyed to the size of the fdarray, since it * grows far faster than any of the other dynamic data. We try to fit * the fdarray into comfortable page-tuned chunks: starting at 1024B * and growing in powers of two from there on. Since we called only * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab * already gives BITS_PER_LONG slots), the above boils down to * 1. use the smallest power of two large enough to give us that many * slots. * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is * 256 slots (i.e. 1Kb fd array). * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there * and we are never going to be asked for 64 or less. */ if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256) nr = 256; else nr = roundup_pow_of_two(slots_wanted); /* * Note that this can drive nr *below* what we had passed if sysctl_nr_open * had been set lower between the check in expand_files() and here. * * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise * bitmaps handling below becomes unpleasant, to put it mildly... */ if (unlikely(nr > sysctl_nr_open)) { nr = round_down(sysctl_nr_open, BITS_PER_LONG); if (nr < slots_wanted) return ERR_PTR(-EMFILE); } fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); if (!fdt) goto out; fdt->max_fds = nr; data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); if (!data) goto out_fdt; fdt->fd = data; data = kvmalloc(max_t(size_t, 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), GFP_KERNEL_ACCOUNT); if (!data) goto out_arr; fdt->open_fds = data; data += nr / BITS_PER_BYTE; fdt->close_on_exec = data; data += nr / BITS_PER_BYTE; fdt->full_fds_bits = data; return fdt; out_arr: kvfree(fdt->fd); out_fdt: kfree(fdt); out: return ERR_PTR(-ENOMEM); } /* * Expand the file descriptor table. * This function will allocate a new fdtable and both fd array and fdset, of * the given size. * Return <0 error code on error; 0 on successful completion. * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_fdtable(struct files_struct *files, unsigned int nr) __releases(files->file_lock) __acquires(files->file_lock) { struct fdtable *new_fdt, *cur_fdt; spin_unlock(&files->file_lock); new_fdt = alloc_fdtable(nr + 1); /* make sure all fd_install() have seen resize_in_progress * or have finished their rcu_read_lock_sched() section. */ if (atomic_read(&files->count) > 1) synchronize_rcu(); spin_lock(&files->file_lock); if (IS_ERR(new_fdt)) return PTR_ERR(new_fdt); cur_fdt = files_fdtable(files); BUG_ON(nr < cur_fdt->max_fds); copy_fdtable(new_fdt, cur_fdt); rcu_assign_pointer(files->fdt, new_fdt); if (cur_fdt != &files->fdtab) call_rcu(&cur_fdt->rcu, free_fdtable_rcu); /* coupled with smp_rmb() in fd_install() */ smp_wmb(); return 0; } /* * Expand files. * This function will expand the file structures, if the requested size exceeds * the current capacity and there is room for expansion. * Return <0 error code on error; 0 on success. * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_files(struct files_struct *files, unsigned int nr) __releases(files->file_lock) __acquires(files->file_lock) { struct fdtable *fdt; int error; repeat: fdt = files_fdtable(files); /* Do we need to expand? */ if (nr < fdt->max_fds) return 0; /* Can we expand? */ if (nr >= sysctl_nr_open) return -EMFILE; if (unlikely(files->resize_in_progress)) { spin_unlock(&files->file_lock); wait_event(files->resize_wait, !files->resize_in_progress); spin_lock(&files->file_lock); goto repeat; } /* All good, so we try */ files->resize_in_progress = true; error = expand_fdtable(files, nr); files->resize_in_progress = false; wake_up_all(&files->resize_wait); return error; } static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt, bool set) { if (set) { __set_bit(fd, fdt->close_on_exec); } else { if (test_bit(fd, fdt->close_on_exec)) __clear_bit(fd, fdt->close_on_exec); } } static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set) { __set_bit(fd, fdt->open_fds); __set_close_on_exec(fd, fdt, set); fd /= BITS_PER_LONG; if (!~fdt->open_fds[fd]) __set_bit(fd, fdt->full_fds_bits); } static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) { __clear_bit(fd, fdt->open_fds); fd /= BITS_PER_LONG; if (test_bit(fd, fdt->full_fds_bits)) __clear_bit(fd, fdt->full_fds_bits); } static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) { return test_bit(fd, fdt->open_fds); } /* * Note that a sane fdtable size always has to be a multiple of * BITS_PER_LONG, since we have bitmaps that are sized by this. * * punch_hole is optional - when close_range() is asked to unshare * and close, we don't need to copy descriptors in that range, so * a smaller cloned descriptor table might suffice if the last * currently opened descriptor falls into that range. */ static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole) { unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds); if (last == fdt->max_fds) return NR_OPEN_DEFAULT; if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) { last = find_last_bit(fdt->open_fds, punch_hole->from); if (last == punch_hole->from) return NR_OPEN_DEFAULT; } return ALIGN(last + 1, BITS_PER_LONG); } /* * Allocate a new descriptor table and copy contents from the passed in * instance. Returns a pointer to cloned table on success, ERR_PTR() * on failure. For 'punch_hole' see sane_fdtable_size(). */ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole) { struct files_struct *newf; struct file **old_fds, **new_fds; unsigned int open_files, i; struct fdtable *old_fdt, *new_fdt; newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); if (!newf) return ERR_PTR(-ENOMEM); atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); newf->resize_in_progress = false; init_waitqueue_head(&newf->resize_wait); newf->next_fd = 0; new_fdt = &newf->fdtab; new_fdt->max_fds = NR_OPEN_DEFAULT; new_fdt->close_on_exec = newf->close_on_exec_init; new_fdt->open_fds = newf->open_fds_init; new_fdt->full_fds_bits = newf->full_fds_bits_init; new_fdt->fd = &newf->fd_array[0]; spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = sane_fdtable_size(old_fdt, punch_hole); /* * Check whether we need to allocate a larger fd array and fd set. */ while (unlikely(open_files > new_fdt->max_fds)) { spin_unlock(&oldf->file_lock); if (new_fdt != &newf->fdtab) __free_fdtable(new_fdt); new_fdt = alloc_fdtable(open_files); if (IS_ERR(new_fdt)) { kmem_cache_free(files_cachep, newf); return ERR_CAST(new_fdt); } /* * Reacquire the oldf lock and a pointer to its fd table * who knows it may have a new bigger fd table. We need * the latest pointer. */ spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); open_files = sane_fdtable_size(old_fdt, punch_hole); } copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG); old_fds = old_fdt->fd; new_fds = new_fdt->fd; for (i = open_files; i != 0; i--) { struct file *f = *old_fds++; if (f) { get_file(f); } else { /* * The fd may be claimed in the fd bitmap but not yet * instantiated in the files array if a sibling thread * is partway through open(). So make sure that this * fd is available to the new process. */ __clear_open_fd(open_files - i, new_fdt); } rcu_assign_pointer(*new_fds++, f); } spin_unlock(&oldf->file_lock); /* clear the remainder */ memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); rcu_assign_pointer(newf->fdt, new_fdt); return newf; } static struct fdtable *close_files(struct files_struct * files) { /* * It is safe to dereference the fd table without RCU or * ->file_lock because this is the last reference to the * files structure. */ struct fdtable *fdt = rcu_dereference_raw(files->fdt); unsigned int i, j = 0; for (;;) { unsigned long set; i = j * BITS_PER_LONG; if (i >= fdt->max_fds) break; set = fdt->open_fds[j++]; while (set) { if (set & 1) { struct file *file = fdt->fd[i]; if (file) { filp_close(file, files); cond_resched(); } } i++; set >>= 1; } } return fdt; } void put_files_struct(struct files_struct *files) { if (atomic_dec_and_test(&files->count)) { struct fdtable *fdt = close_files(files); /* free the arrays if they are not embedded */ if (fdt != &files->fdtab) __free_fdtable(fdt); kmem_cache_free(files_cachep, files); } } void exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; if (files) { task_lock(tsk); tsk->files = NULL; task_unlock(tsk); put_files_struct(files); } } struct files_struct init_files = { .count = ATOMIC_INIT(1), .fdt = &init_files.fdtab, .fdtab = { .max_fds = NR_OPEN_DEFAULT, .fd = &init_files.fd_array[0], .close_on_exec = init_files.close_on_exec_init, .open_fds = init_files.open_fds_init, .full_fds_bits = init_files.full_fds_bits_init, }, .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), }; static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) { unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */ unsigned int maxbit = maxfd / BITS_PER_LONG; unsigned int bitbit = start / BITS_PER_LONG; unsigned int bit; /* * Try to avoid looking at the second level bitmap */ bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG, start & (BITS_PER_LONG - 1)); if (bit < BITS_PER_LONG) return bit + bitbit * BITS_PER_LONG; bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; if (bitbit >= maxfd) return maxfd; if (bitbit > start) start = bitbit; return find_next_zero_bit(fdt->open_fds, maxfd, start); } /* * allocate a file descriptor, mark it busy. */ static int alloc_fd(unsigned start, unsigned end, unsigned flags) { struct files_struct *files = current->files; unsigned int fd; int error; struct fdtable *fdt; spin_lock(&files->file_lock); repeat: fdt = files_fdtable(files); fd = start; if (fd < files->next_fd) fd = files->next_fd; if (likely(fd < fdt->max_fds)) fd = find_next_fd(fdt, fd); /* * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ error = -EMFILE; if (unlikely(fd >= end)) goto out; if (unlikely(fd >= fdt->max_fds)) { error = expand_files(files, fd); if (error < 0) goto out; goto repeat; } if (start <= files->next_fd) files->next_fd = fd + 1; __set_open_fd(fd, fdt, flags & O_CLOEXEC); error = fd; out: spin_unlock(&files->file_lock); return error; } int __get_unused_fd_flags(unsigned flags, unsigned long nofile) { return alloc_fd(0, nofile, flags); } int get_unused_fd_flags(unsigned flags) { return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); } EXPORT_SYMBOL(get_unused_fd_flags); static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __clear_open_fd(fd, fdt); if (fd < files->next_fd) files->next_fd = fd; } void put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); } EXPORT_SYMBOL(put_unused_fd); /* * Install a file pointer in the fd array. * * The VFS is full of places where we drop the files lock between * setting the open_fds bitmap and installing the file in the file * array. At any such point, we are vulnerable to a dup2() race * installing a file in the array before us. We need to detect this and * fput() the struct file we are about to overwrite in this case. * * It should never happen - if we allow dup2() do it, _really_ bad things * will follow. * * This consumes the "file" refcount, so callers should treat it * as if they had called fput(file). */ void fd_install(unsigned int fd, struct file *file) { struct files_struct *files = current->files; struct fdtable *fdt; if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING))) return; rcu_read_lock_sched(); if (unlikely(files->resize_in_progress)) { rcu_read_unlock_sched(); spin_lock(&files->file_lock); fdt = files_fdtable(files); WARN_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); spin_unlock(&files->file_lock); return; } /* coupled with smp_wmb() in expand_fdtable() */ smp_rmb(); fdt = rcu_dereference_sched(files->fdt); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); rcu_read_unlock_sched(); } EXPORT_SYMBOL(fd_install); /** * file_close_fd_locked - return file associated with fd * @files: file struct to retrieve file from * @fd: file descriptor to retrieve file for * * Doesn't take a separate reference count. * * Context: files_lock must be held. * * Returns: The file associated with @fd (NULL if @fd is not open) */ struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) { struct fdtable *fdt = files_fdtable(files); struct file *file; lockdep_assert_held(&files->file_lock); if (fd >= fdt->max_fds) return NULL; fd = array_index_nospec(fd, fdt->max_fds); file = fdt->fd[fd]; if (file) { rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); } return file; } int close_fd(unsigned fd) { struct files_struct *files = current->files; struct file *file; spin_lock(&files->file_lock); file = file_close_fd_locked(files, fd); spin_unlock(&files->file_lock); if (!file) return -EBADF; return filp_close(file, files); } EXPORT_SYMBOL(close_fd); /** * last_fd - return last valid index into fd table * @fdt: File descriptor table. * * Context: Either rcu read lock or files_lock must be held. * * Returns: Last valid index into fdtable. */ static inline unsigned last_fd(struct fdtable *fdt) { return fdt->max_fds - 1; } static inline void __range_cloexec(struct files_struct *cur_fds, unsigned int fd, unsigned int max_fd) { struct fdtable *fdt; /* make sure we're using the correct maximum value */ spin_lock(&cur_fds->file_lock); fdt = files_fdtable(cur_fds); max_fd = min(last_fd(fdt), max_fd); if (fd <= max_fd) bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); spin_unlock(&cur_fds->file_lock); } static inline void __range_close(struct files_struct *files, unsigned int fd, unsigned int max_fd) { struct file *file; unsigned n; spin_lock(&files->file_lock); n = last_fd(files_fdtable(files)); max_fd = min(max_fd, n); for (; fd <= max_fd; fd++) { file = file_close_fd_locked(files, fd); if (file) { spin_unlock(&files->file_lock); filp_close(file, files); cond_resched(); spin_lock(&files->file_lock); } else if (need_resched()) { spin_unlock(&files->file_lock); cond_resched(); spin_lock(&files->file_lock); } } spin_unlock(&files->file_lock); } /** * sys_close_range() - Close all file descriptors in a given range. * * @fd: starting file descriptor to close * @max_fd: last file descriptor to close * @flags: CLOSE_RANGE flags. * * This closes a range of file descriptors. All file descriptors * from @fd up to and including @max_fd are closed. * Currently, errors to close a given file descriptor are ignored. */ SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, unsigned int, flags) { struct task_struct *me = current; struct files_struct *cur_fds = me->files, *fds = NULL; if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC)) return -EINVAL; if (fd > max_fd) return -EINVAL; if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) { struct fd_range range = {fd, max_fd}, *punch_hole = ⦥ /* * If the caller requested all fds to be made cloexec we always * copy all of the file descriptors since they still want to * use them. */ if (flags & CLOSE_RANGE_CLOEXEC) punch_hole = NULL; fds = dup_fd(cur_fds, punch_hole); if (IS_ERR(fds)) return PTR_ERR(fds); /* * We used to share our file descriptor table, and have now * created a private one, make sure we're using it below. */ swap(cur_fds, fds); } if (flags & CLOSE_RANGE_CLOEXEC) __range_cloexec(cur_fds, fd, max_fd); else __range_close(cur_fds, fd, max_fd); if (fds) { /* * We're done closing the files we were supposed to. Time to install * the new file descriptor table and drop the old one. */ task_lock(me); me->files = cur_fds; task_unlock(me); put_files_struct(fds); } return 0; } /** * file_close_fd - return file associated with fd * @fd: file descriptor to retrieve file for * * Doesn't take a separate reference count. * * Returns: The file associated with @fd (NULL if @fd is not open) */ struct file *file_close_fd(unsigned int fd) { struct files_struct *files = current->files; struct file *file; spin_lock(&files->file_lock); file = file_close_fd_locked(files, fd); spin_unlock(&files->file_lock); return file; } void do_close_on_exec(struct files_struct *files) { unsigned i; struct fdtable *fdt; /* exec unshares first */ spin_lock(&files->file_lock); for (i = 0; ; i++) { unsigned long set; unsigned fd = i * BITS_PER_LONG; fdt = files_fdtable(files); if (fd >= fdt->max_fds) break; set = fdt->close_on_exec[i]; if (!set) continue; fdt->close_on_exec[i] = 0; for ( ; set ; fd++, set >>= 1) { struct file *file; if (!(set & 1)) continue; file = fdt->fd[fd]; if (!file) continue; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); filp_close(file, files); cond_resched(); spin_lock(&files->file_lock); } } spin_unlock(&files->file_lock); } static struct file *__get_file_rcu(struct file __rcu **f) { struct file __rcu *file; struct file __rcu *file_reloaded; struct file __rcu *file_reloaded_cmp; file = rcu_dereference_raw(*f); if (!file) return NULL; if (unlikely(!file_ref_get(&file->f_ref))) return ERR_PTR(-EAGAIN); file_reloaded = rcu_dereference_raw(*f); /* * Ensure that all accesses have a dependency on the load from * rcu_dereference_raw() above so we get correct ordering * between reuse/allocation and the pointer check below. */ file_reloaded_cmp = file_reloaded; OPTIMIZER_HIDE_VAR(file_reloaded_cmp); /* * file_ref_get() above provided a full memory barrier when we * acquired a reference. * * This is paired with the write barrier from assigning to the * __rcu protected file pointer so that if that pointer still * matches the current file, we know we have successfully * acquired a reference to the right file. * * If the pointers don't match the file has been reallocated by * SLAB_TYPESAFE_BY_RCU. */ if (file == file_reloaded_cmp) return file_reloaded; fput(file); return ERR_PTR(-EAGAIN); } /** * get_file_rcu - try go get a reference to a file under rcu * @f: the file to get a reference on * * This function tries to get a reference on @f carefully verifying that * @f hasn't been reused. * * This function should rarely have to be used and only by users who * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. * * Return: Returns @f with the reference count increased or NULL. */ struct file *get_file_rcu(struct file __rcu **f) { for (;;) { struct file __rcu *file; file = __get_file_rcu(f); if (!IS_ERR(file)) return file; } } EXPORT_SYMBOL_GPL(get_file_rcu); /** * get_file_active - try go get a reference to a file * @f: the file to get a reference on * * In contast to get_file_rcu() the pointer itself isn't part of the * reference counting. * * This function should rarely have to be used and only by users who * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. * * Return: Returns @f with the reference count increased or NULL. */ struct file *get_file_active(struct file **f) { struct file __rcu *file; rcu_read_lock(); file = __get_file_rcu(f); rcu_read_unlock(); if (IS_ERR(file)) file = NULL; return file; } EXPORT_SYMBOL_GPL(get_file_active); static inline struct file *__fget_files_rcu(struct files_struct *files, unsigned int fd, fmode_t mask) { for (;;) { struct file *file; struct fdtable *fdt = rcu_dereference_raw(files->fdt); struct file __rcu **fdentry; unsigned long nospec_mask; /* Mask is a 0 for invalid fd's, ~0 for valid ones */ nospec_mask = array_index_mask_nospec(fd, fdt->max_fds); /* * fdentry points to the 'fd' offset, or fdt->fd[0]. * Loading from fdt->fd[0] is always safe, because the * array always exists. */ fdentry = fdt->fd + (fd & nospec_mask); /* Do the load, then mask any invalid result */ file = rcu_dereference_raw(*fdentry); file = (void *)(nospec_mask & (unsigned long)file); if (unlikely(!file)) return NULL; /* * Ok, we have a file pointer that was valid at * some point, but it might have become stale since. * * We need to confirm it by incrementing the refcount * and then check the lookup again. * * file_ref_get() gives us a full memory barrier. We * only really need an 'acquire' one to protect the * loads below, but we don't have that. */ if (unlikely(!file_ref_get(&file->f_ref))) continue; /* * Such a race can take two forms: * * (a) the file ref already went down to zero and the * file hasn't been reused yet or the file count * isn't zero but the file has already been reused. * * (b) the file table entry has changed under us. * Note that we don't need to re-check the 'fdt->fd' * pointer having changed, because it always goes * hand-in-hand with 'fdt'. * * If so, we need to put our ref and try again. */ if (unlikely(file != rcu_dereference_raw(*fdentry)) || unlikely(rcu_dereference_raw(files->fdt) != fdt)) { fput(file); continue; } /* * This isn't the file we're looking for or we're not * allowed to get a reference to it. */ if (unlikely(file->f_mode & mask)) { fput(file); return NULL; } /* * Ok, we have a ref to the file, and checked that it * still exists. */ return file; } } static struct file *__fget_files(struct files_struct *files, unsigned int fd, fmode_t mask) { struct file *file; rcu_read_lock(); file = __fget_files_rcu(files, fd, mask); rcu_read_unlock(); return file; } static inline struct file *__fget(unsigned int fd, fmode_t mask) { return __fget_files(current->files, fd, mask); } struct file *fget(unsigned int fd) { return __fget(fd, FMODE_PATH); } EXPORT_SYMBOL(fget); struct file *fget_raw(unsigned int fd) { return __fget(fd, 0); } EXPORT_SYMBOL(fget_raw); struct file *fget_task(struct task_struct *task, unsigned int fd) { struct file *file = NULL; task_lock(task); if (task->files) file = __fget_files(task->files, fd, 0); task_unlock(task); return file; } struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd) { /* Must be called with rcu_read_lock held */ struct files_struct *files; unsigned int fd = *ret_fd; struct file *file = NULL; task_lock(task); files = task->files; if (files) { rcu_read_lock(); for (; fd < files_fdtable(files)->max_fds; fd++) { file = __fget_files_rcu(files, fd, 0); if (file) break; } rcu_read_unlock(); } task_unlock(task); *ret_fd = fd; return file; } EXPORT_SYMBOL(fget_task_next); /* * Lightweight file lookup - no refcnt increment if fd table isn't shared. * * You can use this instead of fget if you satisfy all of the following * conditions: * 1) You must call fput_light before exiting the syscall and returning control * to userspace (i.e. you cannot remember the returned struct file * after * returning to userspace). * 2) You must not call filp_close on the returned struct file * in between * calls to fget_light and fput_light. * 3) You must not clone the current task in between the calls to fget_light * and fput_light. * * The fput_needed flag returned by fget_light should be passed to the * corresponding fput_light. * * (As an exception to rule 2, you can call filp_close between fget_light and * fput_light provided that you capture a real refcount with get_file before * the call to filp_close, and ensure that this real refcount is fput *after* * the fput_light call.) * * See also the documentation in rust/kernel/file.rs. */ static inline struct fd __fget_light(unsigned int fd, fmode_t mask) { struct files_struct *files = current->files; struct file *file; /* * If another thread is concurrently calling close_fd() followed * by put_files_struct(), we must not observe the old table * entry combined with the new refcount - otherwise we could * return a file that is concurrently being freed. * * atomic_read_acquire() pairs with atomic_dec_and_test() in * put_files_struct(). */ if (likely(atomic_read_acquire(&files->count) == 1)) { file = files_lookup_fd_raw(files, fd); if (!file || unlikely(file->f_mode & mask)) return EMPTY_FD; return BORROWED_FD(file); } else { file = __fget_files(files, fd, mask); if (!file) return EMPTY_FD; return CLONED_FD(file); } } struct fd fdget(unsigned int fd) { return __fget_light(fd, FMODE_PATH); } EXPORT_SYMBOL(fdget); struct fd fdget_raw(unsigned int fd) { return __fget_light(fd, 0); } /* * Try to avoid f_pos locking. We only need it if the * file is marked for FMODE_ATOMIC_POS, and it can be * accessed multiple ways. * * Always do it for directories, because pidfd_getfd() * can make a file accessible even if it otherwise would * not be, and for directories this is a correctness * issue, not a "POSIX requirement". */ static inline bool file_needs_f_pos_lock(struct file *file) { return (file->f_mode & FMODE_ATOMIC_POS) && (file_count(file) > 1 || file->f_op->iterate_shared); } struct fd fdget_pos(unsigned int fd) { struct fd f = fdget(fd); struct file *file = fd_file(f); if (file && file_needs_f_pos_lock(file)) { f.word |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } return f; } void __f_unlock_pos(struct file *f) { mutex_unlock(&f->f_pos_lock); } /* * We only lock f_pos if we have threads or if the file might be * shared with another process. In both cases we'll have an elevated * file count (done either by fdget() or by fork()). */ void set_close_on_exec(unsigned int fd, int flag) { struct files_struct *files = current->files; spin_lock(&files->file_lock); __set_close_on_exec(fd, files_fdtable(files), flag); spin_unlock(&files->file_lock); } bool get_close_on_exec(unsigned int fd) { bool res; rcu_read_lock(); res = close_on_exec(fd, current->files); rcu_read_unlock(); return res; } static int do_dup2(struct files_struct *files, struct file *file, unsigned fd, unsigned flags) __releases(&files->file_lock) { struct file *tofree; struct fdtable *fdt; /* * We need to detect attempts to do dup2() over allocated but still * not finished descriptor. NB: OpenBSD avoids that at the price of * extra work in their equivalent of fget() - they insert struct * file immediately after grabbing descriptor, mark it larval if * more work (e.g. actual opening) is needed and make sure that * fget() treats larval files as absent. Potentially interesting, * but while extra work in fget() is trivial, locking implications * and amount of surgery on open()-related paths in VFS are not. * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" * deadlocks in rather amusing ways, AFAICS. All of that is out of * scope of POSIX or SUS, since neither considers shared descriptor * tables and this condition does not arise without those. */ fdt = files_fdtable(files); fd = array_index_nospec(fd, fdt->max_fds); tofree = fdt->fd[fd]; if (!tofree && fd_is_open(fd, fdt)) goto Ebusy; get_file(file); rcu_assign_pointer(fdt->fd[fd], file); __set_open_fd(fd, fdt, flags & O_CLOEXEC); spin_unlock(&files->file_lock); if (tofree) filp_close(tofree, files); return fd; Ebusy: spin_unlock(&files->file_lock); return -EBUSY; } int replace_fd(unsigned fd, struct file *file, unsigned flags) { int err; struct files_struct *files = current->files; if (!file) return close_fd(fd); if (fd >= rlimit(RLIMIT_NOFILE)) return -EBADF; spin_lock(&files->file_lock); err = expand_files(files, fd); if (unlikely(err < 0)) goto out_unlock; return do_dup2(files, file, fd, flags); out_unlock: spin_unlock(&files->file_lock); return err; } /** * receive_fd() - Install received file into file descriptor table * @file: struct file that was received from another process * @ufd: __user pointer to write new fd number to * @o_flags: the O_* flags to apply to the new fd entry * * Installs a received file into the file descriptor table, with appropriate * checks and count updates. Optionally writes the fd number to userspace, if * @ufd is non-NULL. * * This helper handles its own reference counting of the incoming * struct file. * * Returns newly install fd or -ve on error. */ int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) { int new_fd; int error; error = security_file_receive(file); if (error) return error; new_fd = get_unused_fd_flags(o_flags); if (new_fd < 0) return new_fd; if (ufd) { error = put_user(new_fd, ufd); if (error) { put_unused_fd(new_fd); return error; } } fd_install(new_fd, get_file(file)); __receive_sock(file); return new_fd; } EXPORT_SYMBOL_GPL(receive_fd); int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags) { int error; error = security_file_receive(file); if (error) return error; error = replace_fd(new_fd, file, o_flags); if (error) return error; __receive_sock(file); return new_fd; } static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) { int err = -EBADF; struct file *file; struct files_struct *files = current->files; if ((flags & ~O_CLOEXEC) != 0) return -EINVAL; if (unlikely(oldfd == newfd)) return -EINVAL; if (newfd >= rlimit(RLIMIT_NOFILE)) return -EBADF; spin_lock(&files->file_lock); err = expand_files(files, newfd); file = files_lookup_fd_locked(files, oldfd); if (unlikely(!file)) goto Ebadf; if (unlikely(err < 0)) { if (err == -EMFILE) goto Ebadf; goto out_unlock; } return do_dup2(files, file, newfd, flags); Ebadf: err = -EBADF; out_unlock: spin_unlock(&files->file_lock); return err; } SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) { return ksys_dup3(oldfd, newfd, flags); } SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) { if (unlikely(newfd == oldfd)) { /* corner case */ struct files_struct *files = current->files; struct file *f; int retval = oldfd; rcu_read_lock(); f = __fget_files_rcu(files, oldfd, 0); if (!f) retval = -EBADF; rcu_read_unlock(); if (f) fput(f); return retval; } return ksys_dup3(oldfd, newfd, 0); } SYSCALL_DEFINE1(dup, unsigned int, fildes) { int ret = -EBADF; struct file *file = fget_raw(fildes); if (file) { ret = get_unused_fd_flags(0); if (ret >= 0) fd_install(ret, file); else fput(file); } return ret; } int f_dupfd(unsigned int from, struct file *file, unsigned flags) { unsigned long nofile = rlimit(RLIMIT_NOFILE); int err; if (from >= nofile) return -EINVAL; err = alloc_fd(from, nofile, flags); if (err >= 0) { get_file(file); fd_install(err, file); } return err; } int iterate_fd(struct files_struct *files, unsigned n, int (*f)(const void *, struct file *, unsigned), const void *p) { struct fdtable *fdt; int res = 0; if (!files) return 0; spin_lock(&files->file_lock); for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { struct file *file; file = rcu_dereference_check_fdtable(files, fdt->fd[n]); if (!file) continue; res = f(p, file, n); if (res) break; } spin_unlock(&files->file_lock); return res; } EXPORT_SYMBOL(iterate_fd); |
| 519 521 514 126 126 52 51 521 514 509 6 503 509 12 12 12 12 8 12 12 12 12 12 12 12 12 8 12 12 12 12 12 12 12 12 569 10 565 565 565 565 562 1 562 565 10 557 551 550 5 3 546 547 536 254 10 297 97 254 542 544 18 539 5 536 531 29 18 20 524 572 16 22 541 546 12 27 26 27 27 27 12 12 12 12 27 12 489 489 487 483 481 488 484 481 421 422 422 418 418 223 3 485 52 51 484 481 24 474 437 10 4 3 4 6 3 483 488 488 488 490 248 248 247 247 246 246 1 246 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 | // SPDX-License-Identifier: GPL-2.0-or-later /* * IPv6 input * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Ian P. Morris <I.P.Morris@soton.ac.uk> * * Based in linux/net/ipv4/ip_input.c */ /* Changes * * Mitsuru KANDA @USAGI and * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/mroute6.h> #include <linux/slab.h> #include <linux/indirect_call_wrapper.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/udp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/xfrm.h> #include <net/inet_ecn.h> #include <net/dst_metadata.h> static void ip6_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb) { if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && !skb_dst(skb) && !skb->sk) { switch (ipv6_hdr(skb)->nexthdr) { case IPPROTO_TCP: if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) tcp_v6_early_demux(skb); break; case IPPROTO_UDP: if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) udp_v6_early_demux(skb); break; } } if (!skb_valid_dst(skb)) ip6_route_input(skb); } int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { /* if ingress device is enslaved to an L3 master device pass the * skb to its handler for processing */ skb = l3mdev_ip6_rcv(skb); if (!skb) return NET_RX_SUCCESS; ip6_rcv_finish_core(net, sk, skb); return dst_input(skb); } static void ip6_sublist_rcv_finish(struct list_head *head) { struct sk_buff *skb, *next; list_for_each_entry_safe(skb, next, head, list) { skb_list_del_init(skb); dst_input(skb); } } static bool ip6_can_use_hint(const struct sk_buff *skb, const struct sk_buff *hint) { return hint && !skb_dst(skb) && ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr); } static struct sk_buff *ip6_extract_route_hint(const struct net *net, struct sk_buff *skb) { if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) || IP6CB(skb)->flags & IP6SKB_MULTIPATH) return NULL; return skb; } static void ip6_list_rcv_finish(struct net *net, struct sock *sk, struct list_head *head) { struct sk_buff *skb, *next, *hint = NULL; struct dst_entry *curr_dst = NULL; LIST_HEAD(sublist); list_for_each_entry_safe(skb, next, head, list) { struct dst_entry *dst; skb_list_del_init(skb); /* if ingress device is enslaved to an L3 master device pass the * skb to its handler for processing */ skb = l3mdev_ip6_rcv(skb); if (!skb) continue; if (ip6_can_use_hint(skb, hint)) skb_dst_copy(skb, hint); else ip6_rcv_finish_core(net, sk, skb); dst = skb_dst(skb); if (curr_dst != dst) { hint = ip6_extract_route_hint(net, skb); /* dispatch old sublist */ if (!list_empty(&sublist)) ip6_sublist_rcv_finish(&sublist); /* start new sublist */ INIT_LIST_HEAD(&sublist); curr_dst = dst; } list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ ip6_sublist_rcv_finish(&sublist); } static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, struct net *net) { enum skb_drop_reason reason; const struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; if (skb->pkt_type == PACKET_OTHERHOST) { dev_core_stats_rx_otherhost_dropped_inc(skb->dev); kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST); return NULL; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len); SKB_DR_SET(reason, NOT_SPECIFIED); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6))) SKB_DR_SET(reason, IPV6DISABLED); goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = ipv6_hdr(skb); if (hdr->version != 6) { SKB_DR_SET(reason, UNHANDLED_PROTO); goto err; } __IP6_ADD_STATS(net, idev, IPSTATS_MIB_NOECTPKTS + (ipv6_get_dsfield(hdr) & INET_ECN_MASK), max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); /* * RFC4291 2.5.3 * The loopback address must not be used as the source address in IPv6 * packets that are sent outside of a single node. [..] * A packet received on an interface with a destination address * of loopback must be dropped. */ if ((ipv6_addr_loopback(&hdr->saddr) || ipv6_addr_loopback(&hdr->daddr)) && !(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev)) goto err; /* RFC4291 Errata ID: 3480 * Interface-Local scope spans only a single interface on a * node and is useful only for loopback transmission of * multicast. Packets with interface-local scope received * from another node must be discarded. */ if (!(skb->pkt_type == PACKET_LOOPBACK || dev->flags & IFF_LOOPBACK) && ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) goto err; /* If enabled, drop unicast packets that were encapsulated in link-layer * multicast or broadcast to protected against the so-called "hole-196" * attack in 802.11 wireless. */ if (!ipv6_addr_is_multicast(&hdr->daddr) && (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) && READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) { SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST); goto err; } /* RFC4291 2.7 * Nodes must not originate a packet to a multicast address whose scope * field contains the reserved value 0; if such a packet is received, it * must be silently dropped. */ if (ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0) goto err; /* * RFC4291 2.7 * Multicast addresses must not be used as source addresses in IPv6 * packets or appear in any Routing header. */ if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS); SKB_DR_SET(reason, PKT_TOO_SMALL); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto err; hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return NULL; } } rcu_read_unlock(); /* Must drop socket now because of tproxy. */ if (!skb_sk_is_prefetched(skb)) skb_orphan(skb); return skb; err: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); SKB_DR_OR(reason, IP_INHDR); drop: rcu_read_unlock(); kfree_skb_reason(skb, reason); return NULL; } int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct net *net = dev_net(skb->dev); skb = ip6_rcv_core(skb, dev, net); if (skb == NULL) return NET_RX_DROP; return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, skb, dev, NULL, ip6_rcv_finish); } static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev, struct net *net) { NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, head, dev, NULL, ip6_rcv_finish); ip6_list_rcv_finish(net, NULL, head); } /* Receive a list of IPv6 packets */ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, struct net_device *orig_dev) { struct net_device *curr_dev = NULL; struct net *curr_net = NULL; struct sk_buff *skb, *next; LIST_HEAD(sublist); list_for_each_entry_safe(skb, next, head, list) { struct net_device *dev = skb->dev; struct net *net = dev_net(dev); skb_list_del_init(skb); skb = ip6_rcv_core(skb, dev, net); if (skb == NULL) continue; if (curr_dev != dev || curr_net != net) { /* dispatch old sublist */ if (!list_empty(&sublist)) ip6_sublist_rcv(&sublist, curr_dev, curr_net); /* start new sublist */ INIT_LIST_HEAD(&sublist); curr_dev = dev; curr_net = net; } list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ if (!list_empty(&sublist)) ip6_sublist_rcv(&sublist, curr_dev, curr_net); } INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *)); /* * Deliver the packet to the host */ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, bool have_final) { const struct inet6_protocol *ipprot; struct inet6_dev *idev; unsigned int nhoff; SKB_DR(reason); bool raw; /* * Parse extension headers */ resubmit: idev = ip6_dst_idev(skb_dst(skb)); nhoff = IP6CB(skb)->nhoff; if (!have_final) { if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nexthdr = skb_network_header(skb)[nhoff]; } resubmit_final: raw = raw6_local_deliver(skb, nexthdr); ipprot = rcu_dereference(inet6_protos[nexthdr]); if (ipprot) { int ret; if (have_final) { if (!(ipprot->flags & INET6_PROTO_FINAL)) { /* Once we've seen a final protocol don't * allow encapsulation on any non-final * ones. This allows foo in UDP encapsulation * to work. */ goto discard; } } else if (ipprot->flags & INET6_PROTO_FINAL) { const struct ipv6hdr *hdr; int sdif = inet6_sdif(skb); struct net_device *dev; /* Only do this once for first final protocol */ have_final = true; skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); hdr = ipv6_hdr(skb); /* skb->dev passed may be master dev for vrfs. */ if (sdif) { dev = dev_get_by_index_rcu(net, sdif); if (!dev) goto discard; } else { dev = skb->dev; } if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) { SKB_DR_SET(reason, IP_INADDRERRORS); goto discard; } } if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { SKB_DR_SET(reason, XFRM_POLICY); goto discard; } nf_reset_ct(skb); } ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv, skb); if (ret > 0) { if (ipprot->flags & INET6_PROTO_FINAL) { /* Not an extension header, most likely UDP * encapsulation. Use return value as nexthdr * protocol not nhoff (which presumably is * not set by handler). */ nexthdr = ret; goto resubmit_final; } else { goto resubmit; } } else if (ret == 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); } } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff); SKB_DR_SET(reason, IP_NOPROTO); } else { SKB_DR_SET(reason, XFRM_POLICY); } kfree_skb_reason(skb, reason); } else { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } return; discard: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); kfree_skb_reason(skb, reason); } static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { skb_clear_delivery_time(skb); rcu_read_lock(); ip6_protocol_deliver_rcu(net, skb, 0, false); rcu_read_unlock(); return 0; } int ip6_input(struct sk_buff *skb) { return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, dev_net(skb->dev), NULL, skb, skb->dev, NULL, ip6_input_finish); } EXPORT_SYMBOL_GPL(ip6_input); int ip6_mc_input(struct sk_buff *skb) { int sdif = inet6_sdif(skb); const struct ipv6hdr *hdr; struct net_device *dev; bool deliver; __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev), __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST, skb->len); /* skb->dev passed may be master dev for vrfs. */ if (sdif) { rcu_read_lock(); dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif); if (!dev) { rcu_read_unlock(); kfree_skb(skb); return -ENODEV; } } else { dev = skb->dev; } hdr = ipv6_hdr(skb); deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL); if (sdif) rcu_read_unlock(); #ifdef CONFIG_IPV6_MROUTE /* * IPv6 multicast router mode is now supported ;) */ if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) && !(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate * packets. */ struct sk_buff *skb2; struct inet6_skb_parm *opt = IP6CB(skb); /* Check for MLD */ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { /* Check if this is a mld message */ u8 nexthdr = hdr->nexthdr; __be16 frag_off; int offset; /* Check if the value of Router Alert * is for MLD (0x0000). */ if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) { deliver = false; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ goto out; } offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); if (offset < 0) goto out; if (ipv6_is_mld(skb, nexthdr, offset)) deliver = true; goto out; } /* unknown RA - process it normally */ } if (deliver) skb2 = skb_clone(skb, GFP_ATOMIC); else { skb2 = skb; skb = NULL; } if (skb2) { ip6_mr_input(skb2); } } out: #endif if (likely(deliver)) ip6_input(skb); else { /* discard */ kfree_skb(skb); } return 0; } |
| 4 4 4 4 3 3 8 3 3 6 8 8 6 2 6 4 6 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_FS_IO_H #define _BCACHEFS_FS_IO_H #ifndef NO_BCACHEFS_FS #include "buckets.h" #include "fs.h" #include "io_write_types.h" #include "quota.h" #include <linux/uio.h> struct folio_vec { struct folio *fv_folio; size_t fv_offset; size_t fv_len; }; static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv) { struct folio *folio = page_folio(bv.bv_page); size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) + bv.bv_offset; size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len); return (struct folio_vec) { .fv_folio = folio, .fv_offset = offset, .fv_len = len, }; } static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio, struct bvec_iter iter) { return biovec_to_foliovec(bio_iter_iovec(bio, iter)); } #define __bio_for_each_folio(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \ bio_advance_iter_single((bio), &(iter), (bvl).fv_len)) /** * bio_for_each_folio - iterate over folios within a bio * * Like other non-_all versions, this iterates over what bio->bi_iter currently * points to. This version is for drivers, where the bio may have previously * been split or cloned. */ #define bio_for_each_folio(bvl, bio, iter) \ __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter) struct quota_res { u64 sectors; }; #ifdef CONFIG_BCACHEFS_QUOTA static inline void __bch2_quota_reservation_put(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res) { BUG_ON(res->sectors > inode->ei_quota_reserved); bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC); inode->ei_quota_reserved -= res->sectors; res->sectors = 0; } static inline void bch2_quota_reservation_put(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res) { if (res->sectors) { mutex_lock(&inode->ei_quota_lock); __bch2_quota_reservation_put(c, inode, res); mutex_unlock(&inode->ei_quota_lock); } } static inline int bch2_quota_reservation_add(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res, u64 sectors, bool check_enospc) { int ret; if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags)) return 0; mutex_lock(&inode->ei_quota_lock); ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK); if (likely(!ret)) { inode->ei_quota_reserved += sectors; res->sectors += sectors; } mutex_unlock(&inode->ei_quota_lock); return ret; } #else static inline void __bch2_quota_reservation_put(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res) {} static inline void bch2_quota_reservation_put(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res) {} static inline int bch2_quota_reservation_add(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res, unsigned sectors, bool check_enospc) { return 0; } #endif void __bch2_i_sectors_acct(struct bch_fs *, struct bch_inode_info *, struct quota_res *, s64); static inline void bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *quota_res, s64 sectors) { if (sectors) { mutex_lock(&inode->ei_quota_lock); __bch2_i_sectors_acct(c, inode, quota_res, sectors); mutex_unlock(&inode->ei_quota_lock); } } static inline struct address_space *faults_disabled_mapping(void) { return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL); } static inline void set_fdm_dropped_locks(void) { current->faults_disabled_mapping = (void *) (((unsigned long) current->faults_disabled_mapping)|1); } static inline bool fdm_dropped_locks(void) { return ((unsigned long) current->faults_disabled_mapping) & 1; } void bch2_inode_flush_nocow_writes_async(struct bch_fs *, struct bch_inode_info *, struct closure *); int __must_check bch2_write_inode_size(struct bch_fs *, struct bch_inode_info *, loff_t, unsigned); int bch2_fsync(struct file *, loff_t, loff_t, int); int bchfs_truncate(struct mnt_idmap *, struct bch_inode_info *, struct iattr *); long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t); loff_t bch2_remap_file_range(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned); loff_t bch2_llseek(struct file *, loff_t, int); void bch2_fs_fsio_exit(struct bch_fs *); int bch2_fs_fsio_init(struct bch_fs *); #else static inline void bch2_fs_fsio_exit(struct bch_fs *c) {} static inline int bch2_fs_fsio_init(struct bch_fs *c) { return 0; } #endif #endif /* _BCACHEFS_FS_IO_H */ |
| 12 12 12 12 12 12 2 13 13 12 2 2 12 1 1 12 12 12 12 12 12 14 12 12 12 12 12 12 12 12 12 12 14 2 2 2 12 14 14 14 1 1 2 2 2 2 2 2 2 1 1 2 2 2 2 1 2 4 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 | // SPDX-License-Identifier: GPL-2.0-only /* * slip.c This module implements the SLIP protocol for kernel-based * devices like TTY. It interfaces between a raw TTY, and the * kernel's INET protocol layers. * * Version: @(#)slip.c 0.8.3 12/24/94 * * Authors: Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> * * Fixes: * Alan Cox : Sanity checks and avoid tx overruns. * Has a new sl->mtu field. * Alan Cox : Found cause of overrun. ifconfig sl0 * mtu upwards. Driver now spots this * and grows/shrinks its buffers(hack!). * Memory leak if you run out of memory * setting up a slip driver fixed. * Matt Dillon : Printable slip (borrowed from NET2E) * Pauline Middelink : Slip driver fixes. * Alan Cox : Honours the old SL_COMPRESSED flag * Alan Cox : KISS AX.25 and AXUI IP support * Michael Riepe : Automatic CSLIP recognition added * Charles Hedrick : CSLIP header length problem fix. * Alan Cox : Corrected non-IP cases of the above. * Alan Cox : Now uses hardware type as per FvK. * Alan Cox : Default to 192.168.0.0 (RFC 1597) * A.N.Kuznetsov : dev_tint() recursion fix. * Dmitry Gorodchanin : SLIP memory leaks * Dmitry Gorodchanin : Code cleanup. Reduce tty driver * buffering from 4096 to 256 bytes. * Improving SLIP response time. * CONFIG_SLIP_MODE_SLIP6. * ifconfig sl? up & down now works * correctly. * Modularization. * Alan Cox : Oops - fix AX.25 buffer lengths * Dmitry Gorodchanin : Even more cleanups. Preserve CSLIP * statistics. Include CSLIP code only * if it really needed. * Alan Cox : Free slhc buffers in the right place. * Alan Cox : Allow for digipeated IP over AX.25 * Matti Aarnio : Dynamic SLIP devices, with ideas taken * from Jim Freeman's <jfree@caldera.com> * dynamic PPP devices. We do NOT kfree() * device entries, just reg./unreg. them * as they are needed. We kfree() them * at module cleanup. * With MODULE-loading ``insmod'', user * can issue parameter: slip_maxdev=1024 * (Or how much he/she wants.. Default * is 256) * Stanislav Voronyi : Slip line checking, with ideas taken * from multislip BSDI driver which was * written by Igor Chechik, RELCOM Corp. * Only algorithms have been ported to * Linux SLIP driver. * Vitaly E. Lavrov : Sane behaviour on tty hangup. * Alexey Kuznetsov : Cleanup interfaces to tty & netdevice * modules. */ #define SL_CHECK_TRANSMIT #include <linux/compat.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/sched/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/if_slip.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "slip.h" #ifdef CONFIG_INET #include <linux/ip.h> #include <linux/tcp.h> #include <net/slhc_vj.h> #endif #define SLIP_VERSION "0.8.4-NET3.019-NEWTTY" static struct net_device **slip_devs; static int slip_maxdev = SL_NRUNIT; module_param(slip_maxdev, int, 0); MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices"); static int slip_esc(unsigned char *p, unsigned char *d, int len); static void slip_unesc(struct slip *sl, unsigned char c); #ifdef CONFIG_SLIP_MODE_SLIP6 static int slip_esc6(unsigned char *p, unsigned char *d, int len); static void slip_unesc6(struct slip *sl, unsigned char c); #endif #ifdef CONFIG_SLIP_SMART static void sl_keepalive(struct timer_list *t); static void sl_outfill(struct timer_list *t); static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); #endif /******************************** * Buffer administration routines: * sl_alloc_bufs() * sl_free_bufs() * sl_realloc_bufs() * * NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because * sl_realloc_bufs provides strong atomicity and reallocation * on actively running device. *********************************/ /* Allocate channel buffers. */ static int sl_alloc_bufs(struct slip *sl, int mtu) { int err = -ENOBUFS; unsigned long len; char *rbuff = NULL; char *xbuff = NULL; #ifdef SL_INCLUDE_CSLIP char *cbuff = NULL; struct slcompress *slcomp = NULL; #endif /* * Allocate the SLIP frame buffers: * * rbuff Receive buffer. * xbuff Transmit buffer. * cbuff Temporary compression buffer. */ len = mtu * 2; /* * allow for arrival of larger UDP packets, even if we say not to * also fixes a bug in which SunOS sends 512-byte packets even with * an MSS of 128 */ if (len < 576 * 2) len = 576 * 2; rbuff = kmalloc(len + 4, GFP_KERNEL); if (rbuff == NULL) goto err_exit; xbuff = kmalloc(len + 4, GFP_KERNEL); if (xbuff == NULL) goto err_exit; #ifdef SL_INCLUDE_CSLIP cbuff = kmalloc(len + 4, GFP_KERNEL); if (cbuff == NULL) goto err_exit; slcomp = slhc_init(16, 16); if (IS_ERR(slcomp)) goto err_exit; #endif spin_lock_bh(&sl->lock); if (sl->tty == NULL) { spin_unlock_bh(&sl->lock); err = -ENODEV; goto err_exit; } sl->mtu = mtu; sl->buffsize = len; sl->rcount = 0; sl->xleft = 0; rbuff = xchg(&sl->rbuff, rbuff); xbuff = xchg(&sl->xbuff, xbuff); #ifdef SL_INCLUDE_CSLIP cbuff = xchg(&sl->cbuff, cbuff); slcomp = xchg(&sl->slcomp, slcomp); #endif #ifdef CONFIG_SLIP_MODE_SLIP6 sl->xdata = 0; sl->xbits = 0; #endif spin_unlock_bh(&sl->lock); err = 0; /* Cleanup */ err_exit: #ifdef SL_INCLUDE_CSLIP kfree(cbuff); slhc_free(slcomp); #endif kfree(xbuff); kfree(rbuff); return err; } /* Free a SLIP channel buffers. */ static void sl_free_bufs(struct slip *sl) { /* Free all SLIP frame buffers. */ kfree(xchg(&sl->rbuff, NULL)); kfree(xchg(&sl->xbuff, NULL)); #ifdef SL_INCLUDE_CSLIP kfree(xchg(&sl->cbuff, NULL)); slhc_free(xchg(&sl->slcomp, NULL)); #endif } /* Reallocate slip channel buffers. */ static int sl_realloc_bufs(struct slip *sl, int mtu) { int err = 0; struct net_device *dev = sl->dev; unsigned char *xbuff, *rbuff; #ifdef SL_INCLUDE_CSLIP unsigned char *cbuff; #endif int len = mtu * 2; /* * allow for arrival of larger UDP packets, even if we say not to * also fixes a bug in which SunOS sends 512-byte packets even with * an MSS of 128 */ if (len < 576 * 2) len = 576 * 2; xbuff = kmalloc(len + 4, GFP_ATOMIC); rbuff = kmalloc(len + 4, GFP_ATOMIC); #ifdef SL_INCLUDE_CSLIP cbuff = kmalloc(len + 4, GFP_ATOMIC); #endif #ifdef SL_INCLUDE_CSLIP if (xbuff == NULL || rbuff == NULL || cbuff == NULL) { #else if (xbuff == NULL || rbuff == NULL) { #endif if (mtu > sl->mtu) { printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n", dev->name); err = -ENOBUFS; } goto done; } spin_lock_bh(&sl->lock); err = -ENODEV; if (sl->tty == NULL) goto done_on_bh; xbuff = xchg(&sl->xbuff, xbuff); rbuff = xchg(&sl->rbuff, rbuff); #ifdef SL_INCLUDE_CSLIP cbuff = xchg(&sl->cbuff, cbuff); #endif if (sl->xleft) { if (sl->xleft <= len) { memcpy(sl->xbuff, sl->xhead, sl->xleft); } else { sl->xleft = 0; dev->stats.tx_dropped++; } } sl->xhead = sl->xbuff; if (sl->rcount) { if (sl->rcount <= len) { memcpy(sl->rbuff, rbuff, sl->rcount); } else { sl->rcount = 0; dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } sl->mtu = mtu; WRITE_ONCE(dev->mtu, mtu); sl->buffsize = len; err = 0; done_on_bh: spin_unlock_bh(&sl->lock); done: kfree(xbuff); kfree(rbuff); #ifdef SL_INCLUDE_CSLIP kfree(cbuff); #endif return err; } /* Set the "sending" flag. This must be atomic hence the set_bit. */ static inline void sl_lock(struct slip *sl) { netif_stop_queue(sl->dev); } /* Clear the "sending" flag. This must be atomic, hence the ASM. */ static inline void sl_unlock(struct slip *sl) { netif_wake_queue(sl->dev); } /* Send one completely decapsulated IP datagram to the IP layer. */ static void sl_bump(struct slip *sl) { struct net_device *dev = sl->dev; struct sk_buff *skb; int count; count = sl->rcount; #ifdef SL_INCLUDE_CSLIP if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) { unsigned char c = sl->rbuff[0]; if (c & SL_TYPE_COMPRESSED_TCP) { /* ignore compressed packets when CSLIP is off */ if (!(sl->mode & SL_MODE_CSLIP)) { printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name); return; } /* make sure we've reserved enough space for uncompress to use */ if (count + 80 > sl->buffsize) { dev->stats.rx_over_errors++; return; } count = slhc_uncompress(sl->slcomp, sl->rbuff, count); if (count <= 0) return; } else if (c >= SL_TYPE_UNCOMPRESSED_TCP) { if (!(sl->mode & SL_MODE_CSLIP)) { /* turn on header compression */ sl->mode |= SL_MODE_CSLIP; sl->mode &= ~SL_MODE_ADAPTIVE; printk(KERN_INFO "%s: header compression turned on\n", dev->name); } sl->rbuff[0] &= 0x4f; if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0) return; } } #endif /* SL_INCLUDE_CSLIP */ dev->stats.rx_bytes += count; skb = dev_alloc_skb(count); if (skb == NULL) { printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; return; } skb->dev = dev; skb_put_data(skb, sl->rbuff, count); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IP); netif_rx(skb); dev->stats.rx_packets++; } /* Encapsulate one IP datagram and stuff into a TTY queue. */ static void sl_encaps(struct slip *sl, unsigned char *icp, int len) { unsigned char *p; int actual, count; if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */ printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name); sl->dev->stats.tx_dropped++; sl_unlock(sl); return; } p = icp; #ifdef SL_INCLUDE_CSLIP if (sl->mode & SL_MODE_CSLIP) len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1); #endif #ifdef CONFIG_SLIP_MODE_SLIP6 if (sl->mode & SL_MODE_SLIP6) count = slip_esc6(p, sl->xbuff, len); else #endif count = slip_esc(p, sl->xbuff, len); /* Order of next two lines is *very* important. * When we are sending a little amount of data, * the transfer may be completed inside the ops->write() * routine, because it's running with interrupts enabled. * In this case we *never* got WRITE_WAKEUP event, * if we did not request it before write operation. * 14 Oct 1994 Dmitry Gorodchanin. */ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); #ifdef SL_CHECK_TRANSMIT netif_trans_update(sl->dev); #endif sl->xleft = count - actual; sl->xhead = sl->xbuff + actual; #ifdef CONFIG_SLIP_SMART /* VSV */ clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */ #endif } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ static void slip_transmit(struct work_struct *work) { struct slip *sl = container_of(work, struct slip, tx_work); int actual; spin_lock_bh(&sl->lock); /* First make sure we're connected. */ if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) { spin_unlock_bh(&sl->lock); return; } if (sl->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); spin_unlock_bh(&sl->lock); sl_unlock(sl); return; } actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; spin_unlock_bh(&sl->lock); } /* * Called by the driver when there's room for more data. * Schedule the transmit. */ static void slip_write_wakeup(struct tty_struct *tty) { struct slip *sl; rcu_read_lock(); sl = rcu_dereference(tty->disc_data); if (sl) schedule_work(&sl->tx_work); rcu_read_unlock(); } static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct slip *sl = netdev_priv(dev); spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? * 14 Oct 1994 Dmitry Gorodchanin. */ #ifdef SL_CHECK_TRANSMIT if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) { /* 20 sec timeout not reached */ goto out; } printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, (tty_chars_in_buffer(sl->tty) || sl->xleft) ? "bad line quality" : "driver error"); sl->xleft = 0; clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); sl_unlock(sl); #endif } out: spin_unlock(&sl->lock); } /* Encapsulate an IP datagram and kick it into a TTY queue. */ static netdev_tx_t sl_xmit(struct sk_buff *skb, struct net_device *dev) { struct slip *sl = netdev_priv(dev); spin_lock(&sl->lock); if (!netif_running(dev)) { spin_unlock(&sl->lock); printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } if (sl->tty == NULL) { spin_unlock(&sl->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } sl_lock(sl); dev->stats.tx_bytes += skb->len; sl_encaps(sl, skb->data, skb->len); spin_unlock(&sl->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } /****************************************** * Routines looking at netdevice side. ******************************************/ /* Netdevice UP -> DOWN routine */ static int sl_close(struct net_device *dev) { struct slip *sl = netdev_priv(dev); spin_lock_bh(&sl->lock); if (sl->tty) /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; spin_unlock_bh(&sl->lock); return 0; } /* Netdevice DOWN -> UP routine */ static int sl_open(struct net_device *dev) { struct slip *sl = netdev_priv(dev); if (sl->tty == NULL) return -ENODEV; sl->flags &= (1 << SLF_INUSE); netif_start_queue(dev); return 0; } /* Netdevice change MTU request */ static int sl_change_mtu(struct net_device *dev, int new_mtu) { struct slip *sl = netdev_priv(dev); return sl_realloc_bufs(sl, new_mtu); } /* Netdevice get statistics request */ static void sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct net_device_stats *devstats = &dev->stats; #ifdef SL_INCLUDE_CSLIP struct slip *sl = netdev_priv(dev); struct slcompress *comp = sl->slcomp; #endif stats->rx_packets = devstats->rx_packets; stats->tx_packets = devstats->tx_packets; stats->rx_bytes = devstats->rx_bytes; stats->tx_bytes = devstats->tx_bytes; stats->rx_dropped = devstats->rx_dropped; stats->tx_dropped = devstats->tx_dropped; stats->tx_errors = devstats->tx_errors; stats->rx_errors = devstats->rx_errors; stats->rx_over_errors = devstats->rx_over_errors; #ifdef SL_INCLUDE_CSLIP if (comp) { /* Generic compressed statistics */ stats->rx_compressed = comp->sls_i_compressed; stats->tx_compressed = comp->sls_o_compressed; /* Are we really still needs this? */ stats->rx_fifo_errors += comp->sls_i_compressed; stats->rx_dropped += comp->sls_i_tossed; stats->tx_fifo_errors += comp->sls_o_compressed; stats->collisions += comp->sls_o_misses; } #endif } /* Netdevice register callback */ static int sl_init(struct net_device *dev) { struct slip *sl = netdev_priv(dev); /* * Finish setting up the DEVICE info. */ dev->mtu = sl->mtu; dev->type = ARPHRD_SLIP + sl->mode; #ifdef SL_CHECK_TRANSMIT dev->watchdog_timeo = 20*HZ; #endif return 0; } static void sl_uninit(struct net_device *dev) { struct slip *sl = netdev_priv(dev); sl_free_bufs(sl); } /* Hook the destructor so we can free slip devices at the right point in time */ static void sl_free_netdev(struct net_device *dev) { int i = dev->base_addr; slip_devs[i] = NULL; } static const struct net_device_ops sl_netdev_ops = { .ndo_init = sl_init, .ndo_uninit = sl_uninit, .ndo_open = sl_open, .ndo_stop = sl_close, .ndo_start_xmit = sl_xmit, .ndo_get_stats64 = sl_get_stats64, .ndo_change_mtu = sl_change_mtu, .ndo_tx_timeout = sl_tx_timeout, #ifdef CONFIG_SLIP_SMART .ndo_siocdevprivate = sl_siocdevprivate, #endif }; static void sl_setup(struct net_device *dev) { dev->netdev_ops = &sl_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = sl_free_netdev; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 10; /* MTU range: 68 - 65534 */ dev->min_mtu = 68; dev->max_mtu = 65534; /* New-style flags. */ dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST; } /****************************************** Routines looking at TTY side. ******************************************/ /* * Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of SLIP data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. This will not * be re-entered while running but other ldisc functions may be called * in parallel */ static void slip_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct slip *sl = tty->disc_data; if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) return; /* Read the characters out of the buffer */ while (count--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; cp++; continue; } #ifdef CONFIG_SLIP_MODE_SLIP6 if (sl->mode & SL_MODE_SLIP6) slip_unesc6(sl, *cp++); else #endif slip_unesc(sl, *cp++); } } /************************************ * slip_open helper routines. ************************************/ /* Collect hanged up channels */ static void sl_sync(void) { int i; struct net_device *dev; struct slip *sl; for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (dev == NULL) break; sl = netdev_priv(dev); if (sl->tty || sl->leased) continue; if (dev->flags & IFF_UP) dev_close(dev); } } /* Find a free SLIP channel, and link in this `tty' line. */ static struct slip *sl_alloc(void) { int i; char name[IFNAMSIZ]; struct net_device *dev = NULL; struct slip *sl; for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (dev == NULL) break; } /* Sorry, too many, all slots in use */ if (i >= slip_maxdev) return NULL; sprintf(name, "sl%d", i); dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup); if (!dev) return NULL; dev->base_addr = i; sl = netdev_priv(dev); /* Initialize channel control data */ sl->magic = SLIP_MAGIC; sl->dev = dev; spin_lock_init(&sl->lock); INIT_WORK(&sl->tx_work, slip_transmit); sl->mode = SL_MODE_DEFAULT; #ifdef CONFIG_SLIP_SMART /* initialize timer_list struct */ timer_setup(&sl->keepalive_timer, sl_keepalive, 0); timer_setup(&sl->outfill_timer, sl_outfill, 0); #endif slip_devs[i] = dev; return sl; } /* * Open the high-level part of the SLIP channel. * This function is called by the TTY module when the * SLIP line discipline is called for. Because we are * sure the tty line exists, we only have to link it to * a free SLIP channel... * * Called in process context serialized from other ldisc calls. */ static int slip_open(struct tty_struct *tty) { struct slip *sl; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; /* RTnetlink lock is misused here to serialize concurrent opens of slip channels. There are better ways, but it is the simplest one. */ rtnl_lock(); /* Collect hanged up channels. */ sl_sync(); sl = tty->disc_data; err = -EEXIST; /* First make sure we're not already connected. */ if (sl && sl->magic == SLIP_MAGIC) goto err_exit; /* OK. Find a free SLIP channel to use. */ err = -ENFILE; sl = sl_alloc(); if (sl == NULL) goto err_exit; sl->tty = tty; tty->disc_data = sl; sl->pid = current->pid; if (!test_bit(SLF_INUSE, &sl->flags)) { /* Perform the low-level SLIP initialization. */ err = sl_alloc_bufs(sl, SL_MTU); if (err) goto err_free_chan; set_bit(SLF_INUSE, &sl->flags); err = register_netdevice(sl->dev); if (err) goto err_free_bufs; } #ifdef CONFIG_SLIP_SMART if (sl->keepalive) { sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ; add_timer(&sl->keepalive_timer); } if (sl->outfill) { sl->outfill_timer.expires = jiffies + sl->outfill * HZ; add_timer(&sl->outfill_timer); } #endif /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ /* TTY layer expects 0 on success */ return 0; err_free_bufs: sl_free_bufs(sl); err_free_chan: sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); sl_free_netdev(sl->dev); /* do not call free_netdev before rtnl_unlock */ rtnl_unlock(); free_netdev(sl->dev); return err; err_exit: rtnl_unlock(); /* Count references from TTY module */ return err; } /* * Close down a SLIP channel. * This means flushing out any pending queues, and then returning. This * call is serialized against other ldisc functions. * * We also use this method fo a hangup event */ static void slip_close(struct tty_struct *tty) { struct slip *sl = tty->disc_data; /* First make sure we're connected. */ if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty) return; spin_lock_bh(&sl->lock); rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); synchronize_rcu(); flush_work(&sl->tx_work); /* VSV = very important to remove timers */ #ifdef CONFIG_SLIP_SMART del_timer_sync(&sl->keepalive_timer); del_timer_sync(&sl->outfill_timer); #endif /* Flush network side */ unregister_netdev(sl->dev); /* This will complete via sl_free_netdev */ } static void slip_hangup(struct tty_struct *tty) { slip_close(tty); } /************************************************************************ * STANDARD SLIP ENCAPSULATION * ************************************************************************/ static int slip_esc(unsigned char *s, unsigned char *d, int len) { unsigned char *ptr = d; unsigned char c; /* * Send an initial END character to flush out any * data that may have accumulated in the receiver * due to line noise. */ *ptr++ = END; /* * For each byte in the packet, send the appropriate * character sequence, according to the SLIP protocol. */ while (len-- > 0) { switch (c = *s++) { case END: *ptr++ = ESC; *ptr++ = ESC_END; break; case ESC: *ptr++ = ESC; *ptr++ = ESC_ESC; break; default: *ptr++ = c; break; } } *ptr++ = END; return ptr - d; } static void slip_unesc(struct slip *sl, unsigned char s) { switch (s) { case END: #ifdef CONFIG_SLIP_SMART /* drop keeptest bit = VSV */ if (test_bit(SLF_KEEPTEST, &sl->flags)) clear_bit(SLF_KEEPTEST, &sl->flags); #endif if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) sl_bump(sl); clear_bit(SLF_ESCAPE, &sl->flags); sl->rcount = 0; return; case ESC: set_bit(SLF_ESCAPE, &sl->flags); return; case ESC_ESC: if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) s = ESC; break; case ESC_END: if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) s = END; break; } if (!test_bit(SLF_ERROR, &sl->flags)) { if (sl->rcount < sl->buffsize) { sl->rbuff[sl->rcount++] = s; return; } sl->dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } #ifdef CONFIG_SLIP_MODE_SLIP6 /************************************************************************ * 6 BIT SLIP ENCAPSULATION * ************************************************************************/ static int slip_esc6(unsigned char *s, unsigned char *d, int len) { unsigned char *ptr = d; unsigned char c; int i; unsigned short v = 0; short bits = 0; /* * Send an initial END character to flush out any * data that may have accumulated in the receiver * due to line noise. */ *ptr++ = 0x70; /* * Encode the packet into printable ascii characters */ for (i = 0; i < len; ++i) { v = (v << 8) | s[i]; bits += 8; while (bits >= 6) { bits -= 6; c = 0x30 + ((v >> bits) & 0x3F); *ptr++ = c; } } if (bits) { c = 0x30 + ((v << (6 - bits)) & 0x3F); *ptr++ = c; } *ptr++ = 0x70; return ptr - d; } static void slip_unesc6(struct slip *sl, unsigned char s) { unsigned char c; if (s == 0x70) { #ifdef CONFIG_SLIP_SMART /* drop keeptest bit = VSV */ if (test_bit(SLF_KEEPTEST, &sl->flags)) clear_bit(SLF_KEEPTEST, &sl->flags); #endif if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) sl_bump(sl); sl->rcount = 0; sl->xbits = 0; sl->xdata = 0; } else if (s >= 0x30 && s < 0x70) { sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F); sl->xbits += 6; if (sl->xbits >= 8) { sl->xbits -= 8; c = (unsigned char)(sl->xdata >> sl->xbits); if (!test_bit(SLF_ERROR, &sl->flags)) { if (sl->rcount < sl->buffsize) { sl->rbuff[sl->rcount++] = c; return; } sl->dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } } } #endif /* CONFIG_SLIP_MODE_SLIP6 */ /* Perform I/O control on an active SLIP channel. */ static int slip_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct slip *sl = tty->disc_data; unsigned int tmp; int __user *p = (int __user *)arg; /* First make sure we're connected. */ if (!sl || sl->magic != SLIP_MAGIC) return -EINVAL; switch (cmd) { case SIOCGIFNAME: tmp = strlen(sl->dev->name) + 1; if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) return -EFAULT; return 0; case SIOCGIFENCAP: if (put_user(sl->mode, p)) return -EFAULT; return 0; case SIOCSIFENCAP: if (get_user(tmp, p)) return -EFAULT; #ifndef SL_INCLUDE_CSLIP if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE)) return -EINVAL; #else if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) == (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) /* return -EINVAL; */ tmp &= ~SL_MODE_ADAPTIVE; #endif #ifndef CONFIG_SLIP_MODE_SLIP6 if (tmp & SL_MODE_SLIP6) return -EINVAL; #endif sl->mode = tmp; sl->dev->type = ARPHRD_SLIP + sl->mode; return 0; case SIOCSIFHWADDR: return -EINVAL; #ifdef CONFIG_SLIP_SMART /* VSV changes start here */ case SIOCSKEEPALIVE: if (get_user(tmp, p)) return -EFAULT; if (tmp > 255) /* max for unchar */ return -EINVAL; spin_lock_bh(&sl->lock); if (!sl->tty) { spin_unlock_bh(&sl->lock); return -ENODEV; } sl->keepalive = (u8)tmp; if (sl->keepalive != 0) { mod_timer(&sl->keepalive_timer, jiffies + sl->keepalive * HZ); set_bit(SLF_KEEPTEST, &sl->flags); } else del_timer(&sl->keepalive_timer); spin_unlock_bh(&sl->lock); return 0; case SIOCGKEEPALIVE: if (put_user(sl->keepalive, p)) return -EFAULT; return 0; case SIOCSOUTFILL: if (get_user(tmp, p)) return -EFAULT; if (tmp > 255) /* max for unchar */ return -EINVAL; spin_lock_bh(&sl->lock); if (!sl->tty) { spin_unlock_bh(&sl->lock); return -ENODEV; } sl->outfill = (u8)tmp; if (sl->outfill != 0) { mod_timer(&sl->outfill_timer, jiffies + sl->outfill * HZ); set_bit(SLF_OUTWAIT, &sl->flags); } else del_timer(&sl->outfill_timer); spin_unlock_bh(&sl->lock); return 0; case SIOCGOUTFILL: if (put_user(sl->outfill, p)) return -EFAULT; return 0; /* VSV changes end */ #endif default: return tty_mode_ioctl(tty, cmd, arg); } } /* VSV changes start here */ #ifdef CONFIG_SLIP_SMART /* function sl_siocdevprivate called from net/core/dev.c to allow get/set outfill/keepalive parameter by ifconfig */ static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) { struct slip *sl = netdev_priv(dev); unsigned long *p = (unsigned long *)&rq->ifr_ifru; if (sl == NULL) /* Allocation failed ?? */ return -ENODEV; if (in_compat_syscall()) return -EOPNOTSUPP; spin_lock_bh(&sl->lock); if (!sl->tty) { spin_unlock_bh(&sl->lock); return -ENODEV; } switch (cmd) { case SIOCSKEEPALIVE: /* max for unchar */ if ((unsigned)*p > 255) { spin_unlock_bh(&sl->lock); return -EINVAL; } sl->keepalive = (u8)*p; if (sl->keepalive != 0) { sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ; mod_timer(&sl->keepalive_timer, jiffies + sl->keepalive * HZ); set_bit(SLF_KEEPTEST, &sl->flags); } else del_timer(&sl->keepalive_timer); break; case SIOCGKEEPALIVE: *p = sl->keepalive; break; case SIOCSOUTFILL: if ((unsigned)*p > 255) { /* max for unchar */ spin_unlock_bh(&sl->lock); return -EINVAL; } sl->outfill = (u8)*p; if (sl->outfill != 0) { mod_timer(&sl->outfill_timer, jiffies + sl->outfill * HZ); set_bit(SLF_OUTWAIT, &sl->flags); } else del_timer(&sl->outfill_timer); break; case SIOCGOUTFILL: *p = sl->outfill; break; case SIOCSLEASE: /* Resolve race condition, when ioctl'ing hanged up and opened by another process device. */ if (sl->tty != current->signal->tty && sl->pid != current->pid) { spin_unlock_bh(&sl->lock); return -EPERM; } sl->leased = 0; if (*p) sl->leased = 1; break; case SIOCGLEASE: *p = sl->leased; } spin_unlock_bh(&sl->lock); return 0; } #endif /* VSV changes end */ static struct tty_ldisc_ops sl_ldisc = { .owner = THIS_MODULE, .num = N_SLIP, .name = "slip", .open = slip_open, .close = slip_close, .hangup = slip_hangup, .ioctl = slip_ioctl, .receive_buf = slip_receive_buf, .write_wakeup = slip_write_wakeup, }; static int __init slip_init(void) { int status; if (slip_maxdev < 4) slip_maxdev = 4; /* Sanity */ printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)" #ifdef CONFIG_SLIP_MODE_SLIP6 " (6 bit encapsulation enabled)" #endif ".\n", SLIP_VERSION, slip_maxdev); #if defined(SL_INCLUDE_CSLIP) printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n"); #endif #ifdef CONFIG_SLIP_SMART printk(KERN_INFO "SLIP linefill/keepalive option.\n"); #endif slip_devs = kcalloc(slip_maxdev, sizeof(struct net_device *), GFP_KERNEL); if (!slip_devs) return -ENOMEM; /* Fill in our line protocol discipline, and register it */ status = tty_register_ldisc(&sl_ldisc); if (status != 0) { printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status); kfree(slip_devs); } return status; } static void __exit slip_exit(void) { int i; struct net_device *dev; struct slip *sl; unsigned long timeout = jiffies + HZ; int busy = 0; if (slip_devs == NULL) return; /* First of all: check for active disciplines and hangup them. */ do { if (busy) msleep_interruptible(100); busy = 0; for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (!dev) continue; sl = netdev_priv(dev); spin_lock_bh(&sl->lock); if (sl->tty) { busy++; tty_hangup(sl->tty); } spin_unlock_bh(&sl->lock); } } while (busy && time_before(jiffies, timeout)); /* FIXME: hangup is async so we should wait when doing this second phase */ for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (!dev) continue; slip_devs[i] = NULL; sl = netdev_priv(dev); if (sl->tty) { printk(KERN_ERR "%s: tty discipline still running\n", dev->name); } unregister_netdev(dev); } kfree(slip_devs); slip_devs = NULL; tty_unregister_ldisc(&sl_ldisc); } module_init(slip_init); module_exit(slip_exit); #ifdef CONFIG_SLIP_SMART /* * This is start of the code for multislip style line checking * added by Stanislav Voronyi. All changes before marked VSV */ static void sl_outfill(struct timer_list *t) { struct slip *sl = from_timer(sl, t, outfill_timer); spin_lock(&sl->lock); if (sl->tty == NULL) goto out; if (sl->outfill) { if (test_bit(SLF_OUTWAIT, &sl->flags)) { /* no packets were transmitted, do outfill */ #ifdef CONFIG_SLIP_MODE_SLIP6 unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END; #else unsigned char s = END; #endif /* put END into tty queue. Is it right ??? */ if (!netif_queue_stopped(sl->dev)) { /* if device busy no outfill */ sl->tty->ops->write(sl->tty, &s, 1); } } else set_bit(SLF_OUTWAIT, &sl->flags); mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ); } out: spin_unlock(&sl->lock); } static void sl_keepalive(struct timer_list *t) { struct slip *sl = from_timer(sl, t, keepalive_timer); spin_lock(&sl->lock); if (sl->tty == NULL) goto out; if (sl->keepalive) { if (test_bit(SLF_KEEPTEST, &sl->flags)) { /* keepalive still high :(, we must hangup */ if (sl->outfill) /* outfill timer must be deleted too */ (void)del_timer(&sl->outfill_timer); printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name); /* this must hangup tty & close slip */ tty_hangup(sl->tty); /* I think we need not something else */ goto out; } else set_bit(SLF_KEEPTEST, &sl->flags); mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ); } out: spin_unlock(&sl->lock); } #endif MODULE_DESCRIPTION("SLIP (serial line) protocol module"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_SLIP); |
| 13 5 13 5 5 6 6 5 6 6 1 6 6 6 5 26 26 26 6 21 21 8 13 13 2 11 11 6 10 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 | // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for UC-Logic devices not fully compliant with HID standard * * Copyright (c) 2010-2014 Nikolai Kondrashov * Copyright (c) 2013 Martin Rusko */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/timer.h> #include "usbhid/usbhid.h" #include "hid-uclogic-params.h" #include "hid-ids.h" /** * uclogic_inrange_timeout - handle pen in-range state timeout. * Emulate input events normally generated when pen goes out of range for * tablets which don't report that. * * @t: The timer the timeout handler is attached to, stored in a struct * uclogic_drvdata. */ static void uclogic_inrange_timeout(struct timer_list *t) { struct uclogic_drvdata *drvdata = from_timer(drvdata, t, inrange_timer); struct input_dev *input = drvdata->pen_input; if (input == NULL) return; input_report_abs(input, ABS_PRESSURE, 0); /* If BTN_TOUCH state is changing */ if (test_bit(BTN_TOUCH, input->key)) { input_event(input, EV_MSC, MSC_SCAN, /* Digitizer Tip Switch usage */ 0xd0042); input_report_key(input, BTN_TOUCH, 0); } input_report_key(input, BTN_TOOL_PEN, 0); input_sync(input); } static const __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); if (drvdata->desc_ptr != NULL) { *rsize = drvdata->desc_size; return drvdata->desc_ptr; } return rdesc; } static int uclogic_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); struct uclogic_params *params = &drvdata->params; /* Discard invalid pen usages */ if (params->pen.usage_invalid && (field->application == HID_DG_PEN)) return -1; /* Let hid-core decide what to do */ return 0; } static int uclogic_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); struct uclogic_params *params = &drvdata->params; const char *suffix = NULL; struct hid_field *field; size_t i; const struct uclogic_params_frame *frame; /* no report associated (HID_QUIRK_MULTI_INPUT not set) */ if (!hi->report) return 0; /* * If this is the input corresponding to the pen report * in need of tweaking. */ if (hi->report->id == params->pen.id) { /* Remember the input device so we can simulate events */ drvdata->pen_input = hi->input; } /* If it's one of the frame devices */ for (i = 0; i < ARRAY_SIZE(params->frame_list); i++) { frame = ¶ms->frame_list[i]; if (hi->report->id == frame->id) { /* Assign custom suffix, if any */ suffix = frame->suffix; /* * Disable EV_MSC reports for touch ring interfaces to * make the Wacom driver pickup touch ring extents */ if (frame->touch_byte > 0) __clear_bit(EV_MSC, hi->input->evbit); } } if (!suffix) { field = hi->report->field[0]; switch (field->application) { case HID_GD_KEYBOARD: suffix = "Keyboard"; break; case HID_GD_MOUSE: suffix = "Mouse"; break; case HID_GD_KEYPAD: suffix = "Pad"; break; case HID_DG_PEN: case HID_DG_DIGITIZER: suffix = "Pen"; break; case HID_CP_CONSUMER_CONTROL: suffix = "Consumer Control"; break; case HID_GD_SYSTEM_CONTROL: suffix = "System Control"; break; } } if (suffix) hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, suffix); return 0; } static int uclogic_probe(struct hid_device *hdev, const struct hid_device_id *id) { int rc; struct uclogic_drvdata *drvdata = NULL; bool params_initialized = false; if (!hid_is_usb(hdev)) return -EINVAL; /* * libinput requires the pad interface to be on a different node * than the pen, so use QUIRK_MULTI_INPUT for all tablets. */ hdev->quirks |= HID_QUIRK_MULTI_INPUT; hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE; /* Allocate and assign driver data */ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL); if (drvdata == NULL) { rc = -ENOMEM; goto failure; } timer_setup(&drvdata->inrange_timer, uclogic_inrange_timeout, 0); drvdata->re_state = U8_MAX; drvdata->quirks = id->driver_data; hid_set_drvdata(hdev, drvdata); /* Initialize the device and retrieve interface parameters */ rc = uclogic_params_init(&drvdata->params, hdev); if (rc != 0) { hid_err(hdev, "failed probing parameters: %d\n", rc); goto failure; } params_initialized = true; hid_dbg(hdev, "parameters:\n"); uclogic_params_hid_dbg(hdev, &drvdata->params); if (drvdata->params.invalid) { hid_info(hdev, "interface is invalid, ignoring\n"); rc = -ENODEV; goto failure; } /* Generate replacement report descriptor */ rc = uclogic_params_get_desc(&drvdata->params, &drvdata->desc_ptr, &drvdata->desc_size); if (rc) { hid_err(hdev, "failed generating replacement report descriptor: %d\n", rc); goto failure; } rc = hid_parse(hdev); if (rc) { hid_err(hdev, "parse failed\n"); goto failure; } rc = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (rc) { hid_err(hdev, "hw start failed\n"); goto failure; } return 0; failure: /* Assume "remove" might not be called if "probe" failed */ if (params_initialized) uclogic_params_cleanup(&drvdata->params); return rc; } #ifdef CONFIG_PM static int uclogic_resume(struct hid_device *hdev) { int rc; struct uclogic_params params; /* Re-initialize the device, but discard parameters */ rc = uclogic_params_init(¶ms, hdev); if (rc != 0) hid_err(hdev, "failed to re-initialize the device\n"); else uclogic_params_cleanup(¶ms); return rc; } #endif /** * uclogic_exec_event_hook - if the received event is hooked schedules the * associated work. * * @p: Tablet interface report parameters. * @event: Raw event. * @size: The size of event. * * Returns: * Whether the event was hooked or not. */ static bool uclogic_exec_event_hook(struct uclogic_params *p, u8 *event, int size) { struct uclogic_raw_event_hook *curr; if (!p->event_hooks) return false; list_for_each_entry(curr, &p->event_hooks->list, list) { if (curr->size == size && memcmp(curr->event, event, size) == 0) { schedule_work(&curr->work); return true; } } return false; } /** * uclogic_raw_event_pen - handle raw pen events (pen HID reports). * * @drvdata: Driver data. * @data: Report data buffer, can be modified. * @size: Report data size, bytes. * * Returns: * Negative value on error (stops event delivery), zero for success. */ static int uclogic_raw_event_pen(struct uclogic_drvdata *drvdata, u8 *data, int size) { struct uclogic_params_pen *pen = &drvdata->params.pen; WARN_ON(drvdata == NULL); WARN_ON(data == NULL && size != 0); /* If in-range reports are inverted */ if (pen->inrange == UCLOGIC_PARAMS_PEN_INRANGE_INVERTED) { /* Invert the in-range bit */ data[1] ^= 0x40; } /* * If report contains fragmented high-resolution pen * coordinates */ if (size >= 10 && pen->fragmented_hires) { u8 pressure_low_byte; u8 pressure_high_byte; /* Lift pressure bytes */ pressure_low_byte = data[6]; pressure_high_byte = data[7]; /* * Move Y coord to make space for high-order X * coord byte */ data[6] = data[5]; data[5] = data[4]; /* Move high-order X coord byte */ data[4] = data[8]; /* Move high-order Y coord byte */ data[7] = data[9]; /* Place pressure bytes */ data[8] = pressure_low_byte; data[9] = pressure_high_byte; } /* If we need to emulate in-range detection */ if (pen->inrange == UCLOGIC_PARAMS_PEN_INRANGE_NONE) { /* Set in-range bit */ data[1] |= 0x40; /* (Re-)start in-range timeout */ mod_timer(&drvdata->inrange_timer, jiffies + msecs_to_jiffies(100)); } /* If we report tilt and Y direction is flipped */ if (size >= 12 && pen->tilt_y_flipped) data[11] = -data[11]; return 0; } /** * uclogic_raw_event_frame - handle raw frame events (frame HID reports). * * @drvdata: Driver data. * @frame: The parameters of the frame controls to handle. * @data: Report data buffer, can be modified. * @size: Report data size, bytes. * * Returns: * Negative value on error (stops event delivery), zero for success. */ static int uclogic_raw_event_frame( struct uclogic_drvdata *drvdata, const struct uclogic_params_frame *frame, u8 *data, int size) { WARN_ON(drvdata == NULL); WARN_ON(data == NULL && size != 0); /* If need to, and can, set pad device ID for Wacom drivers */ if (frame->dev_id_byte > 0 && frame->dev_id_byte < size) { /* If we also have a touch ring and the finger left it */ if (frame->touch_byte > 0 && frame->touch_byte < size && data[frame->touch_byte] == 0) { data[frame->dev_id_byte] = 0; } else { data[frame->dev_id_byte] = 0xf; } } /* If need to, and can, read rotary encoder state change */ if (frame->re_lsb > 0 && frame->re_lsb / 8 < size) { unsigned int byte = frame->re_lsb / 8; unsigned int bit = frame->re_lsb % 8; u8 change; u8 prev_state = drvdata->re_state; /* Read Gray-coded state */ u8 state = (data[byte] >> bit) & 0x3; /* Encode state change into 2-bit signed integer */ if ((prev_state == 1 && state == 0) || (prev_state == 2 && state == 3)) { change = 1; } else if ((prev_state == 2 && state == 0) || (prev_state == 1 && state == 3)) { change = 3; } else { change = 0; } /* Write change */ data[byte] = (data[byte] & ~((u8)3 << bit)) | (change << bit); /* Remember state */ drvdata->re_state = state; } /* If need to, and can, transform the touch ring reports */ if (frame->touch_byte > 0 && frame->touch_byte < size) { __s8 value = data[frame->touch_byte]; if (value != 0) { if (frame->touch_flip_at != 0) { value = frame->touch_flip_at - value; if (value <= 0) value = frame->touch_max + value; } data[frame->touch_byte] = value - 1; } } /* If need to, and can, transform the bitmap dial reports */ if (frame->bitmap_dial_byte > 0 && frame->bitmap_dial_byte < size) { if (data[frame->bitmap_dial_byte] == 2) data[frame->bitmap_dial_byte] = -1; } return 0; } static int uclogic_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { unsigned int report_id = report->id; struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); struct uclogic_params *params = &drvdata->params; struct uclogic_params_pen_subreport *subreport; struct uclogic_params_pen_subreport *subreport_list_end; size_t i; /* Do not handle anything but input reports */ if (report->type != HID_INPUT_REPORT) return 0; if (uclogic_exec_event_hook(params, data, size)) return 0; while (true) { /* Tweak pen reports, if necessary */ if ((report_id == params->pen.id) && (size >= 2)) { subreport_list_end = params->pen.subreport_list + ARRAY_SIZE(params->pen.subreport_list); /* Try to match a subreport */ for (subreport = params->pen.subreport_list; subreport < subreport_list_end; subreport++) { if (subreport->value != 0 && subreport->value == data[1]) { break; } } /* If a subreport matched */ if (subreport < subreport_list_end) { /* Change to subreport ID, and restart */ report_id = data[0] = subreport->id; continue; } else { return uclogic_raw_event_pen(drvdata, data, size); } } /* Tweak frame control reports, if necessary */ for (i = 0; i < ARRAY_SIZE(params->frame_list); i++) { if (report_id == params->frame_list[i].id) { return uclogic_raw_event_frame( drvdata, ¶ms->frame_list[i], data, size); } } break; } return 0; } static void uclogic_remove(struct hid_device *hdev) { struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); del_timer_sync(&drvdata->inrange_timer); hid_hw_stop(hdev); kfree(drvdata->desc_ptr); uclogic_params_cleanup(&drvdata->params); } static const struct hid_device_id uclogic_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET2) }, { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_UGEE_TABLET_81) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_UGEE_TABLET_47) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GT5040) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_PARBLO_A610_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_G5) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_RAINBOW_CV720) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW), .driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW), .driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) }, { } }; MODULE_DEVICE_TABLE(hid, uclogic_devices); static struct hid_driver uclogic_driver = { .name = "uclogic", .id_table = uclogic_devices, .probe = uclogic_probe, .remove = uclogic_remove, .report_fixup = uclogic_report_fixup, .raw_event = uclogic_raw_event, .input_mapping = uclogic_input_mapping, .input_configured = uclogic_input_configured, #ifdef CONFIG_PM .resume = uclogic_resume, .reset_resume = uclogic_resume, #endif }; module_hid_driver(uclogic_driver); MODULE_AUTHOR("Martin Rusko"); MODULE_AUTHOR("Nikolai Kondrashov"); MODULE_DESCRIPTION("HID driver for UC-Logic devices not fully compliant with HID standard"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HID driver for UC-Logic devices not fully compliant with HID standard"); #ifdef CONFIG_HID_KUNIT_TEST #include "hid-uclogic-core-test.c" #endif |
| 5 5 5 5 4 1 1 1 1 1 1 5 5 1 5 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 | // SPDX-License-Identifier: GPL-2.0 /* * Parts of this file are * Copyright (C) 2022-2023 Intel Corporation */ #include <linux/ieee80211.h> #include <linux/export.h> #include <net/cfg80211.h> #include "nl80211.h" #include "core.h" #include "rdev-ops.h" static int ___cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id, bool notify) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; lockdep_assert_wiphy(wdev->wiphy); if (!rdev->ops->stop_ap) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!wdev->links[link_id].ap.beacon_interval) return -ENOENT; err = rdev_stop_ap(rdev, dev, link_id); if (!err) { wdev->conn_owner_nlportid = 0; wdev->links[link_id].ap.beacon_interval = 0; memset(&wdev->links[link_id].ap.chandef, 0, sizeof(wdev->links[link_id].ap.chandef)); wdev->u.ap.ssid_len = 0; rdev_set_qos_map(rdev, dev, NULL); if (notify) nl80211_send_ap_stopped(wdev, link_id); /* Should we apply the grace period during beaconing interface * shutdown also? */ cfg80211_sched_dfs_chan_update(rdev); } schedule_work(&cfg80211_disconnect_work); return err; } int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, int link_id, bool notify) { unsigned int link; int ret = 0; if (link_id >= 0) return ___cfg80211_stop_ap(rdev, dev, link_id, notify); for_each_valid_link(dev->ieee80211_ptr, link) { int ret1 = ___cfg80211_stop_ap(rdev, dev, link, notify); if (ret1) ret = ret1; /* try the next one also if one errored */ } return ret; } |
| 51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * acpi.h - ACPI Interface * * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> */ #ifndef _LINUX_ACPI_H #define _LINUX_ACPI_H #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ #include <linux/resource_ext.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/property.h> #include <linux/uuid.h> #include <linux/node.h> struct irq_domain; struct irq_domain_ops; #ifndef _LINUX #define _LINUX #endif #include <acpi/acpi.h> #include <acpi/acpi_numa.h> #ifdef CONFIG_ACPI #include <linux/list.h> #include <linux/dynamic_debug.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/fw_table.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_io.h> #include <asm/acpi.h> #ifdef CONFIG_ACPI_TABLE_LIB #define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI") #define __init_or_acpilib #define __initdata_or_acpilib #else #define EXPORT_SYMBOL_ACPI_LIB(x) #define __init_or_acpilib __init #define __initdata_or_acpilib __initdata #endif static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return adev ? adev->handle : NULL; } #define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) #define ACPI_HANDLE_FWNODE(fwnode) \ acpi_device_handle(to_acpi_device_node(fwnode)) static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) { struct fwnode_handle *fwnode; fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); if (!fwnode) return NULL; fwnode_init(fwnode, &acpi_static_fwnode_ops); return fwnode; } static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) { if (WARN_ON(!is_acpi_static_node(fwnode))) return; kfree(fwnode); } static inline bool has_acpi_companion(struct device *dev) { return is_acpi_device_node(dev->fwnode); } static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); } static inline const char *acpi_dev_name(struct acpi_device *adev) { return dev_name(&adev->dev); } struct device *acpi_get_first_physical_node(struct acpi_device *adev); enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC, ACPI_IRQ_MODEL_IOSAPIC, ACPI_IRQ_MODEL_PLATFORM, ACPI_IRQ_MODEL_GIC, ACPI_IRQ_MODEL_LPIC, ACPI_IRQ_MODEL_RINTC, ACPI_IRQ_MODEL_COUNT }; extern enum acpi_irq_model_id acpi_irq_model; enum acpi_interrupt_id { ACPI_INTERRUPT_PMI = 1, ACPI_INTERRUPT_INIT, ACPI_INTERRUPT_CPEI, ACPI_INTERRUPT_COUNT }; #define ACPI_SPACE_MEM 0 enum acpi_address_range_id { ACPI_ADDRESS_RANGE_MEMORY = 1, ACPI_ADDRESS_RANGE_RESERVED = 2, ACPI_ADDRESS_RANGE_ACPI = 3, ACPI_ADDRESS_RANGE_NVS = 4, ACPI_ADDRESS_RANGE_COUNT }; /* Table Handlers */ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); /* Debugger support */ struct acpi_debugger_ops { int (*create_thread)(acpi_osd_exec_callback function, void *context); ssize_t (*write_log)(const char *msg); ssize_t (*read_cmd)(char *buffer, size_t length); int (*wait_command_ready)(bool single_step, char *buffer, size_t length); int (*notify_command_complete)(void); }; struct acpi_debugger { const struct acpi_debugger_ops *ops; struct module *owner; struct mutex lock; }; #ifdef CONFIG_ACPI_DEBUGGER int __init acpi_debugger_init(void); int acpi_register_debugger(struct module *owner, const struct acpi_debugger_ops *ops); void acpi_unregister_debugger(const struct acpi_debugger_ops *ops); int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context); ssize_t acpi_debugger_write_log(const char *msg); ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length); int acpi_debugger_wait_command_ready(void); int acpi_debugger_notify_command_complete(void); #else static inline int acpi_debugger_init(void) { return -ENODEV; } static inline int acpi_register_debugger(struct module *owner, const struct acpi_debugger_ops *ops) { return -ENODEV; } static inline void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) { } static inline int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) { return -ENODEV; } static inline int acpi_debugger_write_log(const char *msg) { return -ENODEV; } static inline int acpi_debugger_read_cmd(char *buffer, u32 buffer_length) { return -ENODEV; } static inline int acpi_debugger_wait_command_ready(void) { return -ENODEV; } static inline int acpi_debugger_notify_command_complete(void) { return -ENODEV; } #endif #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); void __acpi_unmap_table(void __iomem *map, unsigned long size); int early_acpi_boot_init(void); int acpi_boot_init (void); void acpi_boot_table_prepare (void); void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); int acpi_locate_initial_tables (void); void acpi_reserve_initial_tables (void); void acpi_table_init_complete (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init_or_acpilib acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, unsigned int max_entries); int __init_or_acpilib acpi_table_parse_entries_array(char *id, unsigned long table_size, struct acpi_subtable_proc *proc, int proc_num, unsigned int max_entries); int acpi_table_parse_madt(enum acpi_madt_type id, acpi_tbl_entry_handler handler, unsigned int max_entries); int __init_or_acpilib acpi_table_parse_cedt(enum acpi_cedt_type id, acpi_tbl_entry_handler_arg handler_arg, void *arg); int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } #endif void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); #if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) void acpi_arch_dma_setup(struct device *dev); #else static inline void acpi_arch_dma_setup(struct device *dev) { } #endif #ifdef CONFIG_ARM64 void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); #else static inline void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } #endif #ifdef CONFIG_RISCV void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa); #else static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { } #endif #ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) #endif static inline bool invalid_logical_cpuid(u32 cpuid) { return (int)cpuid < 0; } static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) { return phys_id == PHYS_CPUID_INVALID; } int __init acpi_get_madt_revision(void); /* Validate the processor object's proc_id */ bool acpi_duplicate_processor_id(int proc_id); /* Processor _CTS control */ struct acpi_processor_power; #ifdef CONFIG_ACPI_PROCESSOR_CSTATE bool acpi_processor_claim_cst_control(void); int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, struct acpi_processor_power *info); #else static inline bool acpi_processor_claim_cst_control(void) { return false; } static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, struct acpi_processor_power *info) { return -ENODEV; } #endif #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu); int acpi_unmap_cpu(int cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ acpi_handle acpi_get_processor_handle(int cpu); #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; extern u32 acpi_irq_not_handled; extern unsigned int acpi_sci_irq; extern bool acpi_no_s5; #define INVALID_ACPI_IRQ ((unsigned)-1) static inline bool acpi_sci_irq_valid(void) { return acpi_sci_irq != INVALID_ACPI_IRQ; } extern int sbf_port; extern unsigned long acpi_realmode_flags; int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *(*)(u32)); void acpi_set_gsi_to_irq_fallback(u32 (*)(u32)); struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, unsigned int size, struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data); #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) { return -1; } #endif /* * This function undoes the effect of one call to acpi_register_gsi(). * If this matches the last registration, any IRQ resources for gsi * are freed. */ void acpi_unregister_gsi (u32 gsi); struct pci_dev; struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin); int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); #ifdef CONFIG_PCI void acpi_penalize_sci_irq(int irq, int trigger, int polarity); #else static inline void acpi_penalize_sci_irq(int irq, int trigger, int polarity) { } #endif void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); extern int ec_write(u8 addr, u8 val); extern int ec_transaction(u8 command, const u8 *wdata, unsigned wdata_len, u8 *rdata, unsigned rdata_len); extern acpi_handle ec_get_handle(void); extern bool acpi_is_pnp_device(struct acpi_device *); #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) typedef void (*wmi_notify_handler) (union acpi_object *data, void *context); int wmi_instance_count(const char *guid); extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out); extern acpi_status wmi_query_block(const char *guid, u8 instance, struct acpi_buffer *out); extern acpi_status wmi_set_block(const char *guid, u8 instance, const struct acpi_buffer *in); extern acpi_status wmi_install_notify_handler(const char *guid, wmi_notify_handler handler, void *data); extern acpi_status wmi_remove_notify_handler(const char *guid); extern bool wmi_has_guid(const char *guid); extern char *wmi_get_acpi_device_uid(const char *guid); #endif /* CONFIG_ACPI_WMI */ #define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 #define ACPI_VIDEO_DEVICE_POSTING 0x0002 #define ACPI_VIDEO_ROM_AVAILABLE 0x0004 #define ACPI_VIDEO_BACKLIGHT 0x0008 #define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 #define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 #define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 #define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 extern char acpi_video_backlight_string[]; extern long acpi_is_video_device(acpi_handle handle); extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); #ifdef CONFIG_ACPI_THERMAL_LIB int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp); int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp); int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp); int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); #endif #ifdef CONFIG_ACPI_HMAT int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord); #else static inline int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord) { return -EOPNOTSUPP; } #endif #ifdef CONFIG_ACPI_NUMA int acpi_map_pxm_to_node(int pxm); int acpi_get_node(acpi_handle handle); /** * pxm_to_online_node - Map proximity ID to online node * @pxm: ACPI proximity ID * * This is similar to pxm_to_node(), but always returns an online * node. When the mapped node from a given proximity ID is offline, it * looks up the node distance table and returns the nearest online node. * * ACPI device drivers, which are called after the NUMA initialization has * completed in the kernel, can call this interface to obtain their device * NUMA topology from ACPI tables. Such drivers do not have to deal with * offline nodes. A node may be offline when SRAT memory entry does not exist, * or NUMA is disabled, ex. "numa=off" on x86. */ static inline int pxm_to_online_node(int pxm) { int node = pxm_to_node(pxm); return numa_map_to_online_node(node); } #else static inline int pxm_to_online_node(int pxm) { return 0; } static inline int acpi_map_pxm_to_node(int pxm) { return 0; } static inline int acpi_get_node(acpi_handle handle) { return 0; } #endif extern int pnpacpi_disabled; #define PXM_INVAL (-1) bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); bool acpi_dev_resource_address_space(struct acpi_resource *ares, struct resource_win *win); bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, struct resource_win *win); unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable); unsigned int acpi_dev_get_irq_type(int triggering, int polarity); bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, struct resource *res); void acpi_dev_free_resource_list(struct list_head *list); int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, int (*preproc)(struct acpi_resource *, void *), void *preproc_data); int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list); int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list); int acpi_dev_filter_resource_type(struct acpi_resource *ares, unsigned long types); static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, void *arg) { return acpi_dev_filter_resource_type(ares, (unsigned long)arg); } struct acpi_device *acpi_resource_consumer(struct resource *res); int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name); int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION extern int acpi_check_s4_hw_signature; #endif #ifdef CONFIG_PM_SLEEP void __init acpi_old_suspend_ordering(void); void __init acpi_nvs_nosave(void); void __init acpi_nvs_nosave_s3(void); void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ int acpi_register_wakeup_handler( int wake_irq, bool (*wakeup)(void *context), void *context); void acpi_unregister_wakeup_handler( bool (*wakeup)(void *context), void *context); struct acpi_osc_context { char *uuid_str; /* UUID string */ int rev; struct acpi_buffer cap; /* list of DWORD capabilities */ struct acpi_buffer ret; /* free by caller if success */ }; acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); /* Number of _OSC capability DWORDS depends on bridge type */ #define OSC_PCI_CAPABILITY_DWORDS 3 #define OSC_CXL_CAPABILITY_DWORDS 5 /* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */ #define OSC_QUERY_DWORD 0 /* DWORD 1 */ #define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ #define OSC_CONTROL_DWORD 2 /* DWORD 3 */ #define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */ #define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */ /* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ #define OSC_QUERY_ENABLE 0x00000001 /* input */ #define OSC_REQUEST_ERROR 0x00000002 /* return */ #define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ #define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ #define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ /* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ #define OSC_SB_PAD_SUPPORT 0x00000001 #define OSC_SB_PPC_OST_SUPPORT 0x00000002 #define OSC_SB_PR3_SUPPORT 0x00000004 #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 #define OSC_SB_APEI_SUPPORT 0x00000010 #define OSC_SB_CPC_SUPPORT 0x00000020 #define OSC_SB_CPCV2_SUPPORT 0x00000040 #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 #define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200 #define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400 #define OSC_SB_GED_SUPPORT 0x00000800 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 #define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000 #define OSC_SB_PRM_SUPPORT 0x00200000 #define OSC_SB_FFH_OPR_SUPPORT 0x00400000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; extern bool osc_sb_native_usb4_support_confirmed; extern bool osc_sb_cppc2_support_acked; extern bool osc_cpc_flexible_adr_space_confirmed; /* USB4 Capabilities */ #define OSC_USB_USB3_TUNNELING 0x00000001 #define OSC_USB_DP_TUNNELING 0x00000002 #define OSC_USB_PCIE_TUNNELING 0x00000004 #define OSC_USB_XDOMAIN 0x00000008 extern u32 osc_sb_native_usb4_control; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 #define OSC_PCI_ASPM_SUPPORT 0x00000002 #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 #define OSC_PCI_MSI_SUPPORT 0x00000010 #define OSC_PCI_EDR_SUPPORT 0x00000080 #define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 #define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 #define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 #define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 #define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080 /* CXL _OSC: Capabilities DWORD 4: Support Field */ #define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001 #define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002 #define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004 #define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008 /* CXL _OSC: Capabilities DWORD 5: Control Field */ #define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001 static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context) { u32 *ret = context->ret.pointer; return ret[OSC_CONTROL_DWORD]; } static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context) { u32 *ret = context->ret.pointer; return ret[OSC_EXT_CONTROL_DWORD]; } #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 #define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006 #define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008 #define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A #define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B #define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C #define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D #define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E #define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F /* Enable _OST when all relevant hotplug operations are enabled */ #if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ defined(CONFIG_ACPI_CONTAINER) #define ACPI_HOTPLUG_OST #endif /* _OST Source Event Code (OSPM Action) */ #define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 #define ACPI_OST_EC_OSPM_EJECT 0x103 #define ACPI_OST_EC_OSPM_INSERTION 0x200 /* _OST General Processing Status Code */ #define ACPI_OST_SC_SUCCESS 0x0 #define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 #define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 /* _OST OS Shutdown Processing (0x100) Status Code */ #define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 #define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 #define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 #define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 /* _OST Ejection Request (0x3, 0x103) Status Code */ #define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 #define ACPI_OST_SC_DEVICE_IN_USE 0x81 #define ACPI_OST_SC_DEVICE_BUSY 0x82 #define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 #define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 /* _OST Insertion Request (0x200) Status Code */ #define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 #define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 enum acpi_predicate { all_versions, less_than_or_equal, equal, greater_than_or_equal, }; /* Table must be terminted by a NULL entry */ struct acpi_platform_list { char oem_id[ACPI_OEM_ID_SIZE+1]; char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1]; u32 oem_revision; char *table; enum acpi_predicate pred; char *reason; u32 data; }; int acpi_match_platform_list(const struct acpi_platform_list *plat); extern void acpi_early_init(void); extern void acpi_subsystem_init(void); extern int acpi_nvs_register(__u64 start, __u64 size); extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), void *data); const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids, const struct acpi_device *adev); const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); const void *acpi_device_get_match_data(const struct device *dev); extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); struct platform_device *acpi_create_platform_device(struct acpi_device *, const struct property_entry *); #define ACPI_PTR(_ptr) (_ptr) static inline void acpi_device_set_enumerated(struct acpi_device *adev) { adev->flags.visited = true; } static inline void acpi_device_clear_enumerated(struct acpi_device *adev) { adev->flags.visited = false; } enum acpi_reconfig_event { ACPI_RECONFIG_DEVICE_ADD = 0, ACPI_RECONFIG_DEVICE_REMOVE, }; int acpi_reconfig_notifier_register(struct notifier_block *nb); int acpi_reconfig_notifier_unregister(struct notifier_block *nb); #ifdef CONFIG_ACPI_GTDT int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); int acpi_gtdt_map_ppi(int type); bool acpi_gtdt_c3stop(int type); int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); #endif #ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER static inline void acpi_arch_set_root_pointer(u64 addr) { } #endif #ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER static inline u64 acpi_arch_get_root_pointer(void) { return 0; } #endif int acpi_get_local_u64_address(acpi_handle handle, u64 *addr); int acpi_get_local_address(acpi_handle handle, u32 *addr); const char *acpi_get_subsystem_id(acpi_handle handle); #else /* !CONFIG_ACPI */ #define acpi_disabled 1 #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) #define ACPI_HANDLE_FWNODE(fwnode) (NULL) /* Get rid of the -Wunused-variable for adev */ #define acpi_dev_uid_match(adev, uid2) (adev && false) #define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false) struct fwnode_handle; static inline bool acpi_dev_found(const char *hid) { return false; } static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) { return false; } struct acpi_device; static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer) { return -ENODEV; } static inline struct acpi_device * acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) { return NULL; } static inline bool acpi_reduced_hardware(void) { return false; } static inline void acpi_dev_put(struct acpi_device *adev) {} static inline bool is_acpi_node(const struct fwnode_handle *fwnode) { return false; } static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) { return false; } static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) { return false; } static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, const char *name) { return false; } static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) { return NULL; } static inline bool has_acpi_companion(struct device *dev) { return false; } static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { } static inline const char *acpi_dev_name(struct acpi_device *adev) { return NULL; } static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev) { return NULL; } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } static inline int early_acpi_boot_init(void) { return 0; } static inline int acpi_boot_init(void) { return 0; } static inline void acpi_boot_table_prepare(void) { } static inline void acpi_boot_table_init(void) { } static inline int acpi_mps_check(void) { return 0; } static inline int acpi_check_resource_conflict(struct resource *res) { return 0; } static inline int acpi_check_region(resource_size_t start, resource_size_t n, const char *name) { return 0; } struct acpi_table_header; static inline int acpi_table_parse(char *id, int (*handler)(struct acpi_table_header *)) { return -ENODEV; } static inline int acpi_nvs_register(__u64 start, __u64 size) { return 0; } static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), void *data) { return 0; } struct acpi_device_id; static inline const struct acpi_device_id *acpi_match_acpi_device( const struct acpi_device_id *ids, const struct acpi_device *adev) { return NULL; } static inline const struct acpi_device_id *acpi_match_device( const struct acpi_device_id *ids, const struct device *dev) { return NULL; } static inline const void *acpi_device_get_match_data(const struct device *dev) { return NULL; } static inline bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { return false; } static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs) { return false; } static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4) { return NULL; } static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4, acpi_object_type type) { return NULL; } static inline int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } static inline int acpi_device_modalias(struct device *dev, char *buf, int size) { return -ENODEV; } static inline struct platform_device * acpi_create_platform_device(struct acpi_device *adev, const struct property_entry *properties) { return NULL; } static inline bool acpi_dma_supported(const struct acpi_device *adev) { return false; } static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) { return DEV_DMA_NOT_SUPPORTED; } static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) { return -ENODEV; } static inline int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) { return 0; } static inline int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, const u32 *input_id) { return 0; } #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) { } static inline void acpi_device_clear_enumerated(struct acpi_device *adev) { } static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) { return -EINVAL; } static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) { return -EINVAL; } static inline struct acpi_device *acpi_resource_consumer(struct resource *res) { return NULL; } static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) { return -ENODEV; } static inline const char *acpi_get_subsystem_id(acpi_handle handle) { return ERR_PTR(-ENODEV); } static inline int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), void *context) { return -ENXIO; } static inline void acpi_unregister_wakeup_handler( bool (*wakeup)(void *context), void *context) { } struct acpi_osc_context; static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context) { return 0; } static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context) { return 0; } static inline bool acpi_sleep_state_supported(u8 sleep_state) { return false; } static inline acpi_handle acpi_get_processor_handle(int cpu) { return NULL; } #endif /* !CONFIG_ACPI */ extern void arch_post_acpi_subsys_init(void); #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_ioapic_add(acpi_handle root); #else static inline int acpi_ioapic_add(acpi_handle root) { return 0; } #endif #ifdef CONFIG_ACPI void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, u32 pm1a_ctrl, u32 pm1b_ctrl)); acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control); void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, u32 val_a, u32 val_b)); acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); #if defined(CONFIG_SUSPEND) && defined(CONFIG_X86) struct acpi_s2idle_dev_ops { struct list_head list_node; void (*prepare)(void); void (*check)(void); void (*restore)(void); }; int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); int acpi_get_lps0_constraint(struct acpi_device *adev); #else /* CONFIG_SUSPEND && CONFIG_X86 */ static inline int acpi_get_lps0_constraint(struct device *dev) { return ACPI_STATE_UNKNOWN; } #endif /* CONFIG_SUSPEND && CONFIG_X86 */ void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM) int acpi_dev_suspend(struct device *dev, bool wakeup); int acpi_dev_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); bool acpi_storage_d3(struct device *dev); bool acpi_dev_state_d0(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return 0; } static inline bool acpi_storage_d3(struct device *dev) { return false; } static inline bool acpi_dev_state_d0(struct device *dev) { return true; } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) int acpi_subsys_prepare(struct device *dev); void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); int acpi_subsys_suspend_noirq(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); int acpi_subsys_poweroff(struct device *dev); int acpi_subsys_restore_early(struct device *dev); #else static inline int acpi_subsys_prepare(struct device *dev) { return 0; } static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } static inline int acpi_subsys_restore_early(struct device *dev) { return 0; } #endif #if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP) void acpi_ec_mark_gpe_for_wake(void); void acpi_ec_set_gpe_wake_mask(u8 action); #else static inline void acpi_ec_mark_gpe_for_wake(void) {} static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif #ifdef CONFIG_ACPI char *acpi_handle_path(acpi_handle handle); __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...); void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, acpi_status status); #else /* !CONFIG_ACPI */ static inline __printf(3, 4) void acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} static inline void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, acpi_status status) {} #endif /* !CONFIG_ACPI */ #if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) __printf(3, 4) void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); #endif /* * acpi_handle_<level>: Print message with ACPI prefix and object path * * These interfaces acquire the global namespace mutex to obtain an object * path. In interrupt context, it shows the object path as <n/a>. */ #define acpi_handle_emerg(handle, fmt, ...) \ acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) #define acpi_handle_alert(handle, fmt, ...) \ acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) #define acpi_handle_crit(handle, fmt, ...) \ acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) #define acpi_handle_err(handle, fmt, ...) \ acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) #define acpi_handle_warn(handle, fmt, ...) \ acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) #define acpi_handle_notice(handle, fmt, ...) \ acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) #define acpi_handle_info(handle, fmt, ...) \ acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) #if defined(DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) #else #if defined(CONFIG_DYNAMIC_DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ _dynamic_func_call(fmt, __acpi_handle_debug, \ handle, pr_fmt(fmt), ##__VA_ARGS__) #else #define acpi_handle_debug(handle, fmt, ...) \ ({ \ if (0) \ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ 0; \ }) #endif #endif #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { return false; } static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { return false; } static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) { return -ENXIO; } #endif static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index, bool *wake_capable) { return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable); } static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id, int index) { return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL); } static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) { return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL); } /* Device properties */ #ifdef CONFIG_ACPI int acpi_dev_get_property(const struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, size_t num_args, struct fwnode_reference_args *args); static inline int acpi_node_get_property_reference( const struct fwnode_handle *fwnode, const char *name, size_t index, struct fwnode_reference_args *args) { return __acpi_node_get_property_reference(fwnode, name, index, NR_FWNODE_REFERENCE_ARGS, args); } static inline bool acpi_dev_has_props(const struct acpi_device *adev) { return !list_empty(&adev->data.properties); } struct acpi_device_properties * acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, union acpi_object *properties); int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr); struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child); struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); #define ACPI_TABLE_ID_LEN 5 /** * struct acpi_probe_entry - boot-time probing entry * @id: ACPI table name * @type: Optional subtable type to match * (if @id contains subtables) * @subtable_valid: Optional callback to check the validity of * the subtable * @probe_table: Callback to the driver being probed when table * match is successful * @probe_subtbl: Callback to the driver being probed when table and * subtable match (and optional callback is successful) * @driver_data: Sideband data provided back to the driver */ struct acpi_probe_entry { __u8 id[ACPI_TABLE_ID_LEN]; __u8 type; acpi_probe_entry_validate_subtbl subtable_valid; union { acpi_tbl_table_handler probe_table; acpi_tbl_entry_handler probe_subtbl; }; kernel_ulong_t driver_data; }; void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr); #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ __used __section("__" #table "_acpi_probe_table") = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ .probe_table = fn, \ .driver_data = data, \ } #define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ subtable, valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ __used __section("__" #table "_acpi_probe_table") = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ .probe_subtbl = fn, \ .driver_data = data, \ } #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); #define acpi_probe_device_table(t) \ ({ \ extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ ACPI_PROBE_TABLE_END(t); \ __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ (&ACPI_PROBE_TABLE_END(t) - \ &ACPI_PROBE_TABLE(t))); \ }) #else static inline int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj) { return -ENXIO; } static inline int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, size_t num_args, struct fwnode_reference_args *args) { return -ENXIO; } static inline int acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, struct fwnode_reference_args *args) { return -ENXIO; } static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr) { return -ENXIO; } static inline struct fwnode_handle * acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { return NULL; } static inline struct fwnode_handle * acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { return ERR_PTR(-ENXIO); } static inline int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle **remote, struct fwnode_handle **port, struct fwnode_handle **endpoint) { return -ENXIO; } #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ static const void * __acpi_table_##name[] \ __attribute__((unused)) \ = { (void *) table_id, \ (void *) subtable, \ (void *) valid, \ (void *) fn, \ (void *) data } #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif #ifdef CONFIG_ACPI_TABLE_UPGRADE void acpi_table_upgrade(void); #else static inline void acpi_table_upgrade(void) { } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG) extern bool acpi_has_watchdog(void); #else static inline bool acpi_has_watchdog(void) { return false; } #endif #ifdef CONFIG_ACPI_SPCR_TABLE extern bool qdf2400_e44_present; int acpi_parse_spcr(bool enable_earlycon, bool enable_console); #else static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console) { return 0; } #endif #if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI) int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res); #else static inline int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res) { return -EINVAL; } #endif #ifdef CONFIG_ACPI_LPIT int lpit_read_residency_count_address(u64 *address); #else static inline int lpit_read_residency_count_address(u64 *address) { return -EINVAL; } #endif #ifdef CONFIG_ACPI_PROCESSOR_IDLE #ifndef arch_get_idle_state_flags static inline unsigned int arch_get_idle_state_flags(u32 arch_flags) { return 0; } #endif #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ #ifdef CONFIG_ACPI_PPTT int acpi_pptt_cpu_is_thread(unsigned int cpu); int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology(unsigned int cpu, int level) { return -EINVAL; } static inline int find_acpi_cpu_topology_cluster(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology_package(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } #endif void acpi_arch_init(void); #ifdef CONFIG_ACPI_PCC void acpi_init_pcc(void); #else static inline void acpi_init_pcc(void) { } #endif #ifdef CONFIG_ACPI_FFH void acpi_init_ffh(void); extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt); extern int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context); #else static inline void acpi_init_ffh(void) { } #endif #ifdef CONFIG_ACPI extern void acpi_device_notify(struct device *dev); extern void acpi_device_notify_remove(struct device *dev); #else static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif static inline void acpi_use_parent_companion(struct device *dev) { ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent)); } #ifdef CONFIG_ACPI_HMAT int hmat_update_target_coordinates(int nid, struct access_coordinate *coord, enum access_coordinate_class access); #else static inline int hmat_update_target_coordinates(int nid, struct access_coordinate *coord, enum access_coordinate_class access) { return -EOPNOTSUPP; } #endif #ifdef CONFIG_ACPI_NUMA bool acpi_node_backed_by_real_pxm(int nid); #else static inline bool acpi_node_backed_by_real_pxm(int nid) { return false; } #endif #endif /*_LINUX_ACPI_H*/ |
| 18 18 18 18 18 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 | // SPDX-License-Identifier: GPL-2.0 /* * NFS exporting and validation. * * We maintain a list of clients, each of which has a list of * exports. To export an fs to a given client, you first have * to create the client entry with NFSCTL_ADDCLIENT, which * creates a client control block and adds it to the hash * table. Then, you call NFSCTL_EXPORT for each fs. * * * Copyright (C) 1995, 1996 Olaf Kirch, <okir@monad.swb.de> */ #include <linux/slab.h> #include <linux/namei.h> #include <linux/module.h> #include <linux/exportfs.h> #include <linux/sunrpc/svc_xprt.h> #include "nfsd.h" #include "nfsfh.h" #include "netns.h" #include "pnfs.h" #include "filecache.h" #include "trace.h" #define NFSDDBG_FACILITY NFSDDBG_EXPORT /* * We have two caches. * One maps client+vfsmnt+dentry to export options - the export map * The other maps client+filehandle-fragment to export options. - the expkey map * * The export options are actually stored in the first map, and the * second map contains a reference to the entry in the first map. */ #define EXPKEY_HASHBITS 8 #define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS) #define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) static void expkey_put(struct kref *ref) { struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); if (test_bit(CACHE_VALID, &key->h.flags) && !test_bit(CACHE_NEGATIVE, &key->h.flags)) path_put(&key->ek_path); auth_domain_put(key->ek_client); kfree_rcu(key, ek_rcu); } static int expkey_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall(cd, h); } static void expkey_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) { /* client fsidtype \xfsid */ struct svc_expkey *ek = container_of(h, struct svc_expkey, h); char type[5]; qword_add(bpp, blen, ek->ek_client->name); snprintf(type, 5, "%d", ek->ek_fsidtype); qword_add(bpp, blen, type); qword_addhex(bpp, blen, (char*)ek->ek_fsid, key_len(ek->ek_fsidtype)); (*bpp)[-1] = '\n'; } static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new, struct svc_expkey *old); static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *); static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) { /* client fsidtype fsid expiry [path] */ char *buf; int len; struct auth_domain *dom = NULL; int err; int fsidtype; char *ep; struct svc_expkey key; struct svc_expkey *ek = NULL; if (mesg[mlen - 1] != '\n') return -EINVAL; mesg[mlen-1] = 0; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); err = -ENOMEM; if (!buf) goto out; err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out; err = -ENOENT; dom = auth_domain_find(buf); if (!dom) goto out; dprintk("found domain %s\n", buf); err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out; fsidtype = simple_strtoul(buf, &ep, 10); if (*ep) goto out; dprintk("found fsidtype %d\n", fsidtype); if (key_len(fsidtype)==0) /* invalid type */ goto out; if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0) goto out; dprintk("found fsid length %d\n", len); if (len != key_len(fsidtype)) goto out; /* OK, we seem to have a valid key */ key.h.flags = 0; err = get_expiry(&mesg, &key.h.expiry_time); if (err) goto out; key.ek_client = dom; key.ek_fsidtype = fsidtype; memcpy(key.ek_fsid, buf, len); ek = svc_expkey_lookup(cd, &key); err = -ENOMEM; if (!ek) goto out; /* now we want a pathname, or empty meaning NEGATIVE */ err = -EINVAL; len = qword_get(&mesg, buf, PAGE_SIZE); if (len < 0) goto out; dprintk("Path seems to be <%s>\n", buf); err = 0; if (len == 0) { set_bit(CACHE_NEGATIVE, &key.h.flags); ek = svc_expkey_update(cd, &key, ek); if (ek) trace_nfsd_expkey_update(ek, NULL); else err = -ENOMEM; } else { err = kern_path(buf, 0, &key.ek_path); if (err) goto out; dprintk("Found the path %s\n", buf); ek = svc_expkey_update(cd, &key, ek); if (ek) trace_nfsd_expkey_update(ek, buf); else err = -ENOMEM; path_put(&key.ek_path); } cache_flush(); out: if (ek) cache_put(&ek->h, cd); if (dom) auth_domain_put(dom); kfree(buf); return err; } static int expkey_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct svc_expkey *ek ; int i; if (h ==NULL) { seq_puts(m, "#domain fsidtype fsid [path]\n"); return 0; } ek = container_of(h, struct svc_expkey, h); seq_printf(m, "%s %d 0x", ek->ek_client->name, ek->ek_fsidtype); for (i=0; i < key_len(ek->ek_fsidtype)/4; i++) seq_printf(m, "%08x", ek->ek_fsid[i]); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) { seq_printf(m, " "); seq_path(m, &ek->ek_path, "\\ \t\n"); } seq_printf(m, "\n"); return 0; } static inline int expkey_match (struct cache_head *a, struct cache_head *b) { struct svc_expkey *orig = container_of(a, struct svc_expkey, h); struct svc_expkey *new = container_of(b, struct svc_expkey, h); if (orig->ek_fsidtype != new->ek_fsidtype || orig->ek_client != new->ek_client || memcmp(orig->ek_fsid, new->ek_fsid, key_len(orig->ek_fsidtype)) != 0) return 0; return 1; } static inline void expkey_init(struct cache_head *cnew, struct cache_head *citem) { struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); struct svc_expkey *item = container_of(citem, struct svc_expkey, h); kref_get(&item->ek_client->ref); new->ek_client = item->ek_client; new->ek_fsidtype = item->ek_fsidtype; memcpy(new->ek_fsid, item->ek_fsid, sizeof(new->ek_fsid)); } static inline void expkey_update(struct cache_head *cnew, struct cache_head *citem) { struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); struct svc_expkey *item = container_of(citem, struct svc_expkey, h); new->ek_path = item->ek_path; path_get(&item->ek_path); } static struct cache_head *expkey_alloc(void) { struct svc_expkey *i = kmalloc(sizeof(*i), GFP_KERNEL); if (i) return &i->h; else return NULL; } static void expkey_flush(void) { /* * Take the nfsd_mutex here to ensure that the file cache is not * destroyed while we're in the middle of flushing. */ mutex_lock(&nfsd_mutex); nfsd_file_cache_purge(current->nsproxy->net_ns); mutex_unlock(&nfsd_mutex); } static const struct cache_detail svc_expkey_cache_template = { .owner = THIS_MODULE, .hash_size = EXPKEY_HASHMAX, .name = "nfsd.fh", .cache_put = expkey_put, .cache_upcall = expkey_upcall, .cache_request = expkey_request, .cache_parse = expkey_parse, .cache_show = expkey_show, .match = expkey_match, .init = expkey_init, .update = expkey_update, .alloc = expkey_alloc, .flush = expkey_flush, }; static int svc_expkey_hash(struct svc_expkey *item) { int hash = item->ek_fsidtype; char * cp = (char*)item->ek_fsid; int len = key_len(item->ek_fsidtype); hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); hash &= EXPKEY_HASHMASK; return hash; } static struct svc_expkey * svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item) { struct cache_head *ch; int hash = svc_expkey_hash(item); ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); if (ch) return container_of(ch, struct svc_expkey, h); else return NULL; } static struct svc_expkey * svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new, struct svc_expkey *old) { struct cache_head *ch; int hash = svc_expkey_hash(new); ch = sunrpc_cache_update(cd, &new->h, &old->h, hash); if (ch) return container_of(ch, struct svc_expkey, h); else return NULL; } #define EXPORT_HASHBITS 8 #define EXPORT_HASHMAX (1<< EXPORT_HASHBITS) static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc) { struct nfsd4_fs_location *locations = fsloc->locations; int i; if (!locations) return; for (i = 0; i < fsloc->locations_count; i++) { kfree(locations[i].path); kfree(locations[i].hosts); } kfree(locations); fsloc->locations = NULL; } static int export_stats_init(struct export_stats *stats) { stats->start_time = ktime_get_seconds(); return percpu_counter_init_many(stats->counter, 0, GFP_KERNEL, EXP_STATS_COUNTERS_NUM); } static void export_stats_reset(struct export_stats *stats) { if (stats) { int i; for (i = 0; i < EXP_STATS_COUNTERS_NUM; i++) percpu_counter_set(&stats->counter[i], 0); } } static void export_stats_destroy(struct export_stats *stats) { if (stats) percpu_counter_destroy_many(stats->counter, EXP_STATS_COUNTERS_NUM); } static void svc_export_put(struct kref *ref) { struct svc_export *exp = container_of(ref, struct svc_export, h.ref); path_put(&exp->ex_path); auth_domain_put(exp->ex_client); nfsd4_fslocs_free(&exp->ex_fslocs); export_stats_destroy(exp->ex_stats); kfree(exp->ex_stats); kfree(exp->ex_uuid); kfree_rcu(exp, ex_rcu); } static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall(cd, h); } static void svc_export_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) { /* client path */ struct svc_export *exp = container_of(h, struct svc_export, h); char *pth; qword_add(bpp, blen, exp->ex_client->name); pth = d_path(&exp->ex_path, *bpp, *blen); if (IS_ERR(pth)) { /* is this correct? */ (*bpp)[0] = '\n'; return; } qword_add(bpp, blen, pth); (*bpp)[-1] = '\n'; } static struct svc_export *svc_export_update(struct svc_export *new, struct svc_export *old); static struct svc_export *svc_export_lookup(struct svc_export *); static int check_export(struct path *path, int *flags, unsigned char *uuid) { struct inode *inode = d_inode(path->dentry); /* * We currently export only dirs, regular files, and (for v4 * pseudoroot) symlinks. */ if (!S_ISDIR(inode->i_mode) && !S_ISLNK(inode->i_mode) && !S_ISREG(inode->i_mode)) return -ENOTDIR; /* * Mountd should never pass down a writeable V4ROOT export, but, * just to make sure: */ if (*flags & NFSEXP_V4ROOT) *flags |= NFSEXP_READONLY; /* There are two requirements on a filesystem to be exportable. * 1: We must be able to identify the filesystem from a number. * either a device number (so FS_REQUIRES_DEV needed) * or an FSID number (so NFSEXP_FSID or ->uuid is needed). * 2: We must be able to find an inode from a filehandle. * This means that s_export_op must be set. * 3: We must not currently be on an idmapped mount. */ if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) && !(*flags & NFSEXP_FSID) && uuid == NULL) { dprintk("exp_export: export of non-dev fs without fsid\n"); return -EINVAL; } if (!exportfs_can_decode_fh(inode->i_sb->s_export_op)) { dprintk("exp_export: export of invalid fs type.\n"); return -EINVAL; } if (is_idmapped_mnt(path->mnt)) { dprintk("exp_export: export of idmapped mounts not yet supported.\n"); return -EINVAL; } if (inode->i_sb->s_export_op->flags & EXPORT_OP_NOSUBTREECHK && !(*flags & NFSEXP_NOSUBTREECHECK)) { dprintk("%s: %s does not support subtree checking!\n", __func__, inode->i_sb->s_type->name); return -EINVAL; } return 0; } #ifdef CONFIG_NFSD_V4 static int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc) { int len; int migrated, i, err; /* more than one fsloc */ if (fsloc->locations) return -EINVAL; /* listsize */ err = get_uint(mesg, &fsloc->locations_count); if (err) return err; if (fsloc->locations_count > MAX_FS_LOCATIONS) return -EINVAL; if (fsloc->locations_count == 0) return 0; fsloc->locations = kcalloc(fsloc->locations_count, sizeof(struct nfsd4_fs_location), GFP_KERNEL); if (!fsloc->locations) return -ENOMEM; for (i=0; i < fsloc->locations_count; i++) { /* colon separated host list */ err = -EINVAL; len = qword_get(mesg, buf, PAGE_SIZE); if (len <= 0) goto out_free_all; err = -ENOMEM; fsloc->locations[i].hosts = kstrdup(buf, GFP_KERNEL); if (!fsloc->locations[i].hosts) goto out_free_all; err = -EINVAL; /* slash separated path component list */ len = qword_get(mesg, buf, PAGE_SIZE); if (len <= 0) goto out_free_all; err = -ENOMEM; fsloc->locations[i].path = kstrdup(buf, GFP_KERNEL); if (!fsloc->locations[i].path) goto out_free_all; } /* migrated */ err = get_int(mesg, &migrated); if (err) goto out_free_all; err = -EINVAL; if (migrated < 0 || migrated > 1) goto out_free_all; fsloc->migrated = migrated; return 0; out_free_all: nfsd4_fslocs_free(fsloc); return err; } static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { struct exp_flavor_info *f; u32 listsize; int err; /* more than one secinfo */ if (exp->ex_nflavors) return -EINVAL; err = get_uint(mesg, &listsize); if (err) return err; if (listsize > MAX_SECINFO_LIST) return -EINVAL; for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) { err = get_uint(mesg, &f->pseudoflavor); if (err) return err; /* * XXX: It would be nice to also check whether this * pseudoflavor is supported, so we can discover the * problem at export time instead of when a client fails * to authenticate. */ err = get_uint(mesg, &f->flags); if (err) return err; /* Only some flags are allowed to differ between flavors: */ if (~NFSEXP_SECINFO_FLAGS & (f->flags ^ exp->ex_flags)) return -EINVAL; } exp->ex_nflavors = listsize; return 0; } #else /* CONFIG_NFSD_V4 */ static inline int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc){return 0;} static inline int secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; } #endif static int xprtsec_parse(char **mesg, char *buf, struct svc_export *exp) { unsigned int i, mode, listsize; int err; err = get_uint(mesg, &listsize); if (err) return err; if (listsize > NFSEXP_XPRTSEC_NUM) return -EINVAL; exp->ex_xprtsec_modes = 0; for (i = 0; i < listsize; i++) { err = get_uint(mesg, &mode); if (err) return err; if (mode > NFSEXP_XPRTSEC_MTLS) return -EINVAL; exp->ex_xprtsec_modes |= mode; } return 0; } static inline int nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid) { int len; /* more than one uuid */ if (*puuid) return -EINVAL; /* expect a 16 byte uuid encoded as \xXXXX... */ len = qword_get(mesg, buf, PAGE_SIZE); if (len != EX_UUID_LEN) return -EINVAL; *puuid = kmemdup(buf, EX_UUID_LEN, GFP_KERNEL); if (*puuid == NULL) return -ENOMEM; return 0; } static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) { /* client path expiry [flags anonuid anongid fsid] */ char *buf; int err; struct auth_domain *dom = NULL; struct svc_export exp = {}, *expp; int an_int; if (mesg[mlen-1] != '\n') return -EINVAL; mesg[mlen-1] = 0; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* client */ err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out; err = -ENOENT; dom = auth_domain_find(buf); if (!dom) goto out; /* path */ err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out1; err = kern_path(buf, 0, &exp.ex_path); if (err) goto out1; exp.ex_client = dom; exp.cd = cd; exp.ex_devid_map = NULL; exp.ex_xprtsec_modes = NFSEXP_XPRTSEC_ALL; /* expiry */ err = get_expiry(&mesg, &exp.h.expiry_time); if (err) goto out3; /* flags */ err = get_int(&mesg, &an_int); if (err == -ENOENT) { err = 0; set_bit(CACHE_NEGATIVE, &exp.h.flags); } else { if (err || an_int < 0) goto out3; exp.ex_flags= an_int; /* anon uid */ err = get_int(&mesg, &an_int); if (err) goto out3; exp.ex_anon_uid= make_kuid(current_user_ns(), an_int); /* anon gid */ err = get_int(&mesg, &an_int); if (err) goto out3; exp.ex_anon_gid= make_kgid(current_user_ns(), an_int); /* fsid */ err = get_int(&mesg, &an_int); if (err) goto out3; exp.ex_fsid = an_int; while (qword_get(&mesg, buf, PAGE_SIZE) > 0) { if (strcmp(buf, "fsloc") == 0) err = fsloc_parse(&mesg, buf, &exp.ex_fslocs); else if (strcmp(buf, "uuid") == 0) err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid); else if (strcmp(buf, "secinfo") == 0) err = secinfo_parse(&mesg, buf, &exp); else if (strcmp(buf, "xprtsec") == 0) err = xprtsec_parse(&mesg, buf, &exp); else /* quietly ignore unknown words and anything * following. Newer user-space can try to set * new values, then see what the result was. */ break; if (err) goto out4; } err = check_export(&exp.ex_path, &exp.ex_flags, exp.ex_uuid); if (err) goto out4; /* * No point caching this if it would immediately expire. * Also, this protects exportfs's dummy export from the * anon_uid/anon_gid checks: */ if (exp.h.expiry_time < seconds_since_boot()) goto out4; /* * For some reason exportfs has been passing down an * invalid (-1) uid & gid on the "dummy" export which it * uses to test export support. To make sure exportfs * sees errors from check_export we therefore need to * delay these checks till after check_export: */ err = -EINVAL; if (!uid_valid(exp.ex_anon_uid)) goto out4; if (!gid_valid(exp.ex_anon_gid)) goto out4; err = 0; nfsd4_setup_layout_type(&exp); } expp = svc_export_lookup(&exp); if (!expp) { err = -ENOMEM; goto out4; } expp = svc_export_update(&exp, expp); if (expp) { trace_nfsd_export_update(expp); cache_flush(); exp_put(expp); } else err = -ENOMEM; out4: nfsd4_fslocs_free(&exp.ex_fslocs); kfree(exp.ex_uuid); out3: path_put(&exp.ex_path); out1: auth_domain_put(dom); out: kfree(buf); return err; } static void exp_flags(struct seq_file *m, int flag, int fsid, kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fslocs); static void show_secinfo(struct seq_file *m, struct svc_export *exp); static int is_export_stats_file(struct seq_file *m) { /* * The export_stats file uses the same ops as the exports file. * We use the file's name to determine the reported info per export. * There is no rename in nsfdfs, so d_name.name is stable. */ return !strcmp(m->file->f_path.dentry->d_name.name, "export_stats"); } static int svc_export_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct svc_export *exp; bool export_stats = is_export_stats_file(m); if (h == NULL) { if (export_stats) seq_puts(m, "#path domain start-time\n#\tstats\n"); else seq_puts(m, "#path domain(flags)\n"); return 0; } exp = container_of(h, struct svc_export, h); seq_path(m, &exp->ex_path, " \t\n\\"); seq_putc(m, '\t'); seq_escape(m, exp->ex_client->name, " \t\n\\"); if (export_stats) { struct percpu_counter *counter = exp->ex_stats->counter; seq_printf(m, "\t%lld\n", exp->ex_stats->start_time); seq_printf(m, "\tfh_stale: %lld\n", percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE])); seq_printf(m, "\tio_read: %lld\n", percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ])); seq_printf(m, "\tio_write: %lld\n", percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE])); seq_putc(m, '\n'); return 0; } seq_putc(m, '('); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) { exp_flags(m, exp->ex_flags, exp->ex_fsid, exp->ex_anon_uid, exp->ex_anon_gid, &exp->ex_fslocs); if (exp->ex_uuid) { int i; seq_puts(m, ",uuid="); for (i = 0; i < EX_UUID_LEN; i++) { if ((i&3) == 0 && i) seq_putc(m, ':'); seq_printf(m, "%02x", exp->ex_uuid[i]); } } show_secinfo(m, exp); } seq_puts(m, ")\n"); return 0; } static int svc_export_match(struct cache_head *a, struct cache_head *b) { struct svc_export *orig = container_of(a, struct svc_export, h); struct svc_export *new = container_of(b, struct svc_export, h); return orig->ex_client == new->ex_client && path_equal(&orig->ex_path, &new->ex_path); } static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) { struct svc_export *new = container_of(cnew, struct svc_export, h); struct svc_export *item = container_of(citem, struct svc_export, h); kref_get(&item->ex_client->ref); new->ex_client = item->ex_client; new->ex_path = item->ex_path; path_get(&item->ex_path); new->ex_fslocs.locations = NULL; new->ex_fslocs.locations_count = 0; new->ex_fslocs.migrated = 0; new->ex_layout_types = 0; new->ex_uuid = NULL; new->cd = item->cd; export_stats_reset(new->ex_stats); } static void export_update(struct cache_head *cnew, struct cache_head *citem) { struct svc_export *new = container_of(cnew, struct svc_export, h); struct svc_export *item = container_of(citem, struct svc_export, h); int i; new->ex_flags = item->ex_flags; new->ex_anon_uid = item->ex_anon_uid; new->ex_anon_gid = item->ex_anon_gid; new->ex_fsid = item->ex_fsid; new->ex_devid_map = item->ex_devid_map; item->ex_devid_map = NULL; new->ex_uuid = item->ex_uuid; item->ex_uuid = NULL; new->ex_fslocs.locations = item->ex_fslocs.locations; item->ex_fslocs.locations = NULL; new->ex_fslocs.locations_count = item->ex_fslocs.locations_count; item->ex_fslocs.locations_count = 0; new->ex_fslocs.migrated = item->ex_fslocs.migrated; item->ex_fslocs.migrated = 0; new->ex_layout_types = item->ex_layout_types; new->ex_nflavors = item->ex_nflavors; for (i = 0; i < MAX_SECINFO_LIST; i++) { new->ex_flavors[i] = item->ex_flavors[i]; } new->ex_xprtsec_modes = item->ex_xprtsec_modes; } static struct cache_head *svc_export_alloc(void) { struct svc_export *i = kmalloc(sizeof(*i), GFP_KERNEL); if (!i) return NULL; i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL); if (!i->ex_stats) { kfree(i); return NULL; } if (export_stats_init(i->ex_stats)) { kfree(i->ex_stats); kfree(i); return NULL; } return &i->h; } static const struct cache_detail svc_export_cache_template = { .owner = THIS_MODULE, .hash_size = EXPORT_HASHMAX, .name = "nfsd.export", .cache_put = svc_export_put, .cache_upcall = svc_export_upcall, .cache_request = svc_export_request, .cache_parse = svc_export_parse, .cache_show = svc_export_show, .match = svc_export_match, .init = svc_export_init, .update = export_update, .alloc = svc_export_alloc, }; static int svc_export_hash(struct svc_export *exp) { int hash; hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS); hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS); return hash; } static struct svc_export * svc_export_lookup(struct svc_export *exp) { struct cache_head *ch; int hash = svc_export_hash(exp); ch = sunrpc_cache_lookup_rcu(exp->cd, &exp->h, hash); if (ch) return container_of(ch, struct svc_export, h); else return NULL; } static struct svc_export * svc_export_update(struct svc_export *new, struct svc_export *old) { struct cache_head *ch; int hash = svc_export_hash(old); ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash); if (ch) return container_of(ch, struct svc_export, h); else return NULL; } static struct svc_expkey * exp_find_key(struct cache_detail *cd, struct auth_domain *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) { struct svc_expkey key, *ek; int err; if (!clp) return ERR_PTR(-ENOENT); key.ek_client = clp; key.ek_fsidtype = fsid_type; memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); ek = svc_expkey_lookup(cd, &key); if (ek == NULL) return ERR_PTR(-ENOMEM); err = cache_check(cd, &ek->h, reqp); if (err) { trace_nfsd_exp_find_key(&key, err); return ERR_PTR(err); } return ek; } static struct svc_export * exp_get_by_name(struct cache_detail *cd, struct auth_domain *clp, const struct path *path, struct cache_req *reqp) { struct svc_export *exp, key; int err; if (!clp) return ERR_PTR(-ENOENT); key.ex_client = clp; key.ex_path = *path; key.cd = cd; exp = svc_export_lookup(&key); if (exp == NULL) return ERR_PTR(-ENOMEM); err = cache_check(cd, &exp->h, reqp); if (err) { trace_nfsd_exp_get_by_name(&key, err); return ERR_PTR(err); } return exp; } /* * Find the export entry for a given dentry. */ static struct svc_export * exp_parent(struct cache_detail *cd, struct auth_domain *clp, struct path *path) { struct dentry *saved = dget(path->dentry); struct svc_export *exp = exp_get_by_name(cd, clp, path, NULL); while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) { struct dentry *parent = dget_parent(path->dentry); dput(path->dentry); path->dentry = parent; exp = exp_get_by_name(cd, clp, path, NULL); } dput(path->dentry); path->dentry = saved; return exp; } /* * Obtain the root fh on behalf of a client. * This could be done in user space, but I feel that it adds some safety * since its harder to fool a kernel module than a user space program. */ int exp_rootfh(struct net *net, struct auth_domain *clp, char *name, struct knfsd_fh *f, int maxsize) { struct svc_export *exp; struct path path; struct inode *inode; struct svc_fh fh; int err; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct cache_detail *cd = nn->svc_export_cache; err = -EPERM; /* NB: we probably ought to check that it's NUL-terminated */ if (kern_path(name, 0, &path)) { printk("nfsd: exp_rootfh path not found %s", name); return err; } inode = d_inode(path.dentry); dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n", name, path.dentry, clp->name, inode->i_sb->s_id, inode->i_ino); exp = exp_parent(cd, clp, &path); if (IS_ERR(exp)) { err = PTR_ERR(exp); goto out; } /* * fh must be initialized before calling fh_compose */ fh_init(&fh, maxsize); if (fh_compose(&fh, exp, path.dentry, NULL)) err = -EINVAL; else err = 0; memcpy(f, &fh.fh_handle, sizeof(struct knfsd_fh)); fh_put(&fh); exp_put(exp); out: path_put(&path); return err; } static struct svc_export *exp_find(struct cache_detail *cd, struct auth_domain *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) { struct svc_export *exp; struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id); struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp); if (IS_ERR(ek)) return ERR_CAST(ek); exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp); cache_put(&ek->h, nn->svc_expkey_cache); if (IS_ERR(exp)) return ERR_CAST(exp); return exp; } /** * check_nfsd_access - check if access to export is allowed. * @exp: svc_export that is being accessed. * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO). * @may_bypass_gss: reduce strictness of authorization check * * Return values: * %nfs_ok if access is granted, or * %nfserr_wrongsec if access is denied */ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp, bool may_bypass_gss) { struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors; struct svc_xprt *xprt; /* * If rqstp is NULL, this is a LOCALIO request which will only * ever use a filehandle/credential pair for which access has * been affirmed (by ACCESS or OPEN NFS requests) over the * wire. So there is no need for further checks here. */ if (!rqstp) return nfs_ok; xprt = rqstp->rq_xprt; if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) { if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) goto ok; } if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) { if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && !test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) goto ok; } if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) { if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) goto ok; } goto denied; ok: /* legacy gss-only clients are always OK: */ if (exp->ex_client == rqstp->rq_gssclient) return nfs_ok; /* ip-address based client; check sec= export option: */ for (f = exp->ex_flavors; f < end; f++) { if (f->pseudoflavor == rqstp->rq_cred.cr_flavor) return nfs_ok; } /* defaults in absence of sec= options: */ if (exp->ex_nflavors == 0) { if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL || rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX) return nfs_ok; } /* If the compound op contains a spo_must_allowed op, * it will be sent with integrity/protection which * will have to be expressly allowed on mounts that * don't support it */ if (nfsd4_spo_must_allow(rqstp)) return nfs_ok; /* Some calls may be processed without authentication * on GSS exports. For example NFS2/3 calls on root * directory, see section 2.3.2 of rfc 2623. * For "may_bypass_gss" check that export has really * enabled some flavor with authentication (GSS or any * other) and also check that the used auth flavor is * without authentication (none or sys). */ if (may_bypass_gss && ( rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL || rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)) { for (f = exp->ex_flavors; f < end; f++) { if (f->pseudoflavor >= RPC_AUTH_DES) return 0; } } denied: return nfserr_wrongsec; } /* * Uses rq_client and rq_gssclient to find an export; uses rq_client (an * auth_unix client) if it's available and has secinfo information; * otherwise, will try to use rq_gssclient. * * Called from functions that handle requests; functions that do work on * behalf of mountd are passed a single client name to use, and should * use exp_get_by_name() or exp_find(). */ struct svc_export * rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path) { struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT); struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct cache_detail *cd = nn->svc_export_cache; if (rqstp->rq_client == NULL) goto gss; /* First try the auth_unix client: */ exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle); if (PTR_ERR(exp) == -ENOENT) goto gss; if (IS_ERR(exp)) return exp; /* If it has secinfo, assume there are no gss/... clients */ if (exp->ex_nflavors > 0) return exp; gss: /* Otherwise, try falling back on gss client */ if (rqstp->rq_gssclient == NULL) return exp; gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle); if (PTR_ERR(gssexp) == -ENOENT) return exp; if (!IS_ERR(exp)) exp_put(exp); return gssexp; } /** * rqst_exp_find - Find an svc_export in the context of a rqst or similar * @reqp: The handle to be used to suspend the request if a cache-upcall is needed * If NULL, missing in-cache information will result in failure. * @net: The network namespace in which the request exists * @cl: default auth_domain to use for looking up the export * @gsscl: an alternate auth_domain defined using deprecated gss/krb5 format. * @fsid_type: The type of fsid to look for * @fsidv: The actual fsid to look up in the context of either client. * * Perform a lookup for @cl/@fsidv in the given @net for an export. If * none found and @gsscl specified, repeat the lookup. * * Returns an export, or an error pointer. */ struct svc_export * rqst_exp_find(struct cache_req *reqp, struct net *net, struct auth_domain *cl, struct auth_domain *gsscl, int fsid_type, u32 *fsidv) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT); struct cache_detail *cd = nn->svc_export_cache; if (!cl) goto gss; /* First try the auth_unix client: */ exp = exp_find(cd, cl, fsid_type, fsidv, reqp); if (PTR_ERR(exp) == -ENOENT) goto gss; if (IS_ERR(exp)) return exp; /* If it has secinfo, assume there are no gss/... clients */ if (exp->ex_nflavors > 0) return exp; gss: /* Otherwise, try falling back on gss client */ if (!gsscl) return exp; gssexp = exp_find(cd, gsscl, fsid_type, fsidv, reqp); if (PTR_ERR(gssexp) == -ENOENT) return exp; if (!IS_ERR(exp)) exp_put(exp); return gssexp; } struct svc_export * rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) { struct dentry *saved = dget(path->dentry); struct svc_export *exp = rqst_exp_get_by_name(rqstp, path); while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) { struct dentry *parent = dget_parent(path->dentry); dput(path->dentry); path->dentry = parent; exp = rqst_exp_get_by_name(rqstp, path); } dput(path->dentry); path->dentry = saved; return exp; } struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp) { u32 fsidv[2]; mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); return rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp), rqstp->rq_client, rqstp->rq_gssclient, FSID_NUM, fsidv); } /* * Called when we need the filehandle for the root of the pseudofs, * for a given NFSv4 client. The root is defined to be the * export point with fsid==0 */ __be32 exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp) { struct svc_export *exp; __be32 rv; exp = rqst_find_fsidzero_export(rqstp); if (IS_ERR(exp)) return nfserrno(PTR_ERR(exp)); rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL); exp_put(exp); return rv; } static struct flags { int flag; char *name[2]; } expflags[] = { { NFSEXP_READONLY, {"ro", "rw"}}, { NFSEXP_INSECURE_PORT, {"insecure", ""}}, { NFSEXP_ROOTSQUASH, {"root_squash", "no_root_squash"}}, { NFSEXP_ALLSQUASH, {"all_squash", ""}}, { NFSEXP_ASYNC, {"async", "sync"}}, { NFSEXP_GATHERED_WRITES, {"wdelay", "no_wdelay"}}, { NFSEXP_NOREADDIRPLUS, {"nordirplus", ""}}, { NFSEXP_NOHIDE, {"nohide", ""}}, { NFSEXP_CROSSMOUNT, {"crossmnt", ""}}, { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}}, { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, { NFSEXP_V4ROOT, {"v4root", ""}}, { NFSEXP_PNFS, {"pnfs", ""}}, { NFSEXP_SECURITY_LABEL, {"security_label", ""}}, { 0, {"", ""}} }; static void show_expflags(struct seq_file *m, int flags, int mask) { struct flags *flg; int state, first = 0; for (flg = expflags; flg->flag; flg++) { if (flg->flag & ~mask) continue; state = (flg->flag & flags) ? 0 : 1; if (*flg->name[state]) seq_printf(m, "%s%s", first++?",":"", flg->name[state]); } } static void show_secinfo_flags(struct seq_file *m, int flags) { seq_printf(m, ","); show_expflags(m, flags, NFSEXP_SECINFO_FLAGS); } static bool secinfo_flags_equal(int f, int g) { f &= NFSEXP_SECINFO_FLAGS; g &= NFSEXP_SECINFO_FLAGS; return f == g; } static int show_secinfo_run(struct seq_file *m, struct exp_flavor_info **fp, struct exp_flavor_info *end) { int flags; flags = (*fp)->flags; seq_printf(m, ",sec=%d", (*fp)->pseudoflavor); (*fp)++; while (*fp != end && secinfo_flags_equal(flags, (*fp)->flags)) { seq_printf(m, ":%d", (*fp)->pseudoflavor); (*fp)++; } return flags; } static void show_secinfo(struct seq_file *m, struct svc_export *exp) { struct exp_flavor_info *f; struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors; int flags; if (exp->ex_nflavors == 0) return; f = exp->ex_flavors; flags = show_secinfo_run(m, &f, end); if (!secinfo_flags_equal(flags, exp->ex_flags)) show_secinfo_flags(m, flags); while (f != end) { flags = show_secinfo_run(m, &f, end); show_secinfo_flags(m, flags); } } static void exp_flags(struct seq_file *m, int flag, int fsid, kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fsloc) { struct user_namespace *userns = m->file->f_cred->user_ns; show_expflags(m, flag, NFSEXP_ALLFLAGS); if (flag & NFSEXP_FSID) seq_printf(m, ",fsid=%d", fsid); if (!uid_eq(anonu, make_kuid(userns, (uid_t)-2)) && !uid_eq(anonu, make_kuid(userns, 0x10000-2))) seq_printf(m, ",anonuid=%u", from_kuid_munged(userns, anonu)); if (!gid_eq(anong, make_kgid(userns, (gid_t)-2)) && !gid_eq(anong, make_kgid(userns, 0x10000-2))) seq_printf(m, ",anongid=%u", from_kgid_munged(userns, anong)); if (fsloc && fsloc->locations_count > 0) { char *loctype = (fsloc->migrated) ? "refer" : "replicas"; int i; seq_printf(m, ",%s=", loctype); seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\"); seq_putc(m, '@'); seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\"); for (i = 1; i < fsloc->locations_count; i++) { seq_putc(m, ';'); seq_escape(m, fsloc->locations[i].path, ",;@ \t\n\\"); seq_putc(m, '@'); seq_escape(m, fsloc->locations[i].hosts, ",;@ \t\n\\"); } } } static int e_show(struct seq_file *m, void *p) { struct cache_head *cp = p; struct svc_export *exp = container_of(cp, struct svc_export, h); struct cache_detail *cd = m->private; bool export_stats = is_export_stats_file(m); if (p == SEQ_START_TOKEN) { seq_puts(m, "# Version 1.1\n"); if (export_stats) seq_puts(m, "# Path Client Start-time\n#\tStats\n"); else seq_puts(m, "# Path Client(Flags) # IPs\n"); return 0; } if (!cache_get_rcu(&exp->h)) return 0; if (cache_check(cd, &exp->h, NULL)) return 0; exp_put(exp); return svc_export_show(m, cd, cp); } const struct seq_operations nfs_exports_op = { .start = cache_seq_start_rcu, .next = cache_seq_next_rcu, .stop = cache_seq_stop_rcu, .show = e_show, }; /* * Initialize the exports module. */ int nfsd_export_init(struct net *net) { int rv; struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum); nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net); if (IS_ERR(nn->svc_export_cache)) return PTR_ERR(nn->svc_export_cache); rv = cache_register_net(nn->svc_export_cache, net); if (rv) goto destroy_export_cache; nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net); if (IS_ERR(nn->svc_expkey_cache)) { rv = PTR_ERR(nn->svc_expkey_cache); goto unregister_export_cache; } rv = cache_register_net(nn->svc_expkey_cache, net); if (rv) goto destroy_expkey_cache; return 0; destroy_expkey_cache: cache_destroy_net(nn->svc_expkey_cache, net); unregister_export_cache: cache_unregister_net(nn->svc_export_cache, net); destroy_export_cache: cache_destroy_net(nn->svc_export_cache, net); return rv; } /* * Flush exports table - called when last nfsd thread is killed */ void nfsd_export_flush(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); cache_purge(nn->svc_expkey_cache); cache_purge(nn->svc_export_cache); } /* * Shutdown the exports module. */ void nfsd_export_shutdown(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum); cache_unregister_net(nn->svc_expkey_cache, net); cache_unregister_net(nn->svc_export_cache, net); cache_destroy_net(nn->svc_expkey_cache, net); cache_destroy_net(nn->svc_export_cache, net); svcauth_unix_purge(net); dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum); } |
| 23 85 44629 44395 10 44592 44708 44623 25712 10 8541 8570 8632 8596 66 33166 33096 33312 33185 2596 56 884 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * http://lse.sourceforge.net/locking/rcupdate.html * */ #ifndef __LINUX_RCUPDATE_H #define __LINUX_RCUPDATE_H #include <linux/types.h> #include <linux/compiler.h> #include <linux/atomic.h> #include <linux/irqflags.h> #include <linux/preempt.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> #include <linux/cleanup.h> #include <asm/processor.h> #include <linux/context_tracking_irq.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define RCU_SEQ_CTR_SHIFT 2 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void synchronize_rcu(void); struct rcu_gp_oldstate; unsigned long get_completed_synchronize_rcu(void); void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); // Maximum number of unsigned long values corresponding to // not-yet-completed RCU grace periods. #define NUM_ACTIVE_RCU_POLL_OLDSTATE 2 /** * same_state_synchronize_rcu - Are two old-state values identical? * @oldstate1: First old-state value. * @oldstate2: Second old-state value. * * The two old-state values must have been obtained from either * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or * get_completed_synchronize_rcu(). Returns @true if the two values are * identical and @false otherwise. This allows structures whose lifetimes * are tracked by old-state values to push these values to a list header, * allowing those structures to be slightly smaller. */ static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2) { return oldstate1 == oldstate2; } #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); void __rcu_read_unlock(void); /* * Defined as a macro as it is a very low level header included from * areas that don't even know about current. This gives the rcu_read_lock() * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TINY_RCU #define rcu_read_unlock_strict() do { } while (0) #else void rcu_read_unlock_strict(void); #endif static inline void __rcu_read_lock(void) { preempt_disable(); } static inline void __rcu_read_unlock(void) { preempt_enable(); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) rcu_read_unlock_strict(); } static inline int rcu_preempt_depth(void) { return 0; } #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_RCU_LAZY void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func); #else static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } #endif /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); #ifdef CONFIG_TASKS_RCU_GENERIC void rcu_init_tasks_generic(void); #else static inline void rcu_init_tasks_generic(void) { } #endif #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else static inline void rcu_irq_work_resched(void) { } #endif #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); void rcu_nocb_flush_deferred_wakeup(void); #define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } static inline void rcu_nocb_flush_deferred_wakeup(void) { } #define RCU_NOCB_LOCKDEP_WARN(c, s) #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /* * Note a quasi-voluntary context switch for RCU-tasks's benefit. * This is a macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU_GENERIC # ifdef CONFIG_TASKS_RCU # define rcu_tasks_classic_qs(t, preempt) \ do { \ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void rcu_tasks_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_classic_qs(t, preempt) do { } while (0) # define call_rcu_tasks call_rcu # define synchronize_rcu_tasks synchronize_rcu # endif # ifdef CONFIG_TASKS_TRACE_RCU // Bits for ->trc_reader_special.b.need_qs field. #define TRC_NEED_QS 0x1 // Task needs a quiescent state. #define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); void rcu_tasks_trace_qs_blkd(struct task_struct *t); # define rcu_tasks_trace_qs(t) \ do { \ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ \ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \ likely(!___rttq_nesting)) { \ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ rcu_tasks_trace_qs_blkd(t); \ } \ } while (0) void rcu_tasks_trace_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_trace_qs(t) do { } while (0) # endif #define rcu_tasks_qs(t, preempt) \ do { \ rcu_tasks_classic_qs((t), (preempt)); \ rcu_tasks_trace_qs(t); \ } while (0) # ifdef CONFIG_TASKS_RUDE_RCU void synchronize_rcu_tasks_rude(void); void rcu_tasks_rude_torture_stats_print(char *tt, char *tf); # endif #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #define rcu_tasks_classic_qs(t, preempt) do { } while (0) #define rcu_tasks_qs(t, preempt) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? * * As an accident of implementation, an RCU Tasks Trace grace period also * acts as an RCU grace period. However, this could change at any time. * Code relying on this accident must call this function to verify that * this accident is still happening. * * You have been warned! */ static inline bool rcu_trace_implies_rcu_gp(void) { return true; } /** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to * report potential quiescent states to RCU-tasks even if the cond_resched() * machinery were to be shut off, as some advocate for PREEMPTION kernels. */ #define cond_resched_tasks_rcu_qs() \ do { \ rcu_tasks_qs(current, false); \ cond_resched(); \ } while (0) /** * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states * @old_ts: jiffies at start of processing. * * This helper is for long-running softirq handlers, such as NAPI threads in * networking. The caller should initialize the variable passed in as @old_ts * at the beginning of the softirq handler. When invoked frequently, this macro * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will * provide both RCU and RCU-Tasks quiescent states. Note that this macro * modifies its old_ts argument. * * Because regions of code that have disabled softirq act as RCU read-side * critical sections, this macro should be invoked with softirq (and * preemption) enabled. * * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would * have more chance to invoke schedule() calls and provide necessary quiescent * states. As a contrast, calling cond_resched() only won't achieve the same * effect because cond_resched() does not provide RCU-Tasks quiescent states. */ #define rcu_softirq_qs_periodic(old_ts) \ do { \ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ time_after(jiffies, (old_ts) + HZ / 10)) { \ preempt_disable(); \ rcu_softirq_qs(); \ preempt_enable(); \ (old_ts) = jiffies; \ } \ } while (0) /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ #if defined(CONFIG_TREE_RCU) #include <linux/rcutree.h> #elif defined(CONFIG_TINY_RCU) #include <linux/rcutiny.h> #else #error "Unknown RCU implementation specified to kernel configuration" #endif /* * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls * are needed for dynamic initialization and destruction of rcu_head * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for * dynamic initialization and destruction of statically allocated rcu_head * structures. However, rcu_head structures allocated dynamically in the * heap don't need any initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head); void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ static inline void init_rcu_head(struct rcu_head *head) { } static inline void destroy_rcu_head(struct rcu_head *head) { } static inline void init_rcu_head_on_stack(struct rcu_head *head) { } static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); } static inline void rcu_try_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_); } static inline void rcu_lock_release(struct lockdep_map *map) { lock_release(map, _THIS_IP_); } int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ # define rcu_lock_acquire(a) do { } while (0) # define rcu_try_lock_acquire(a) do { } while (0) # define rcu_lock_release(a) do { } while (0) static inline int rcu_read_lock_held(void) { return 1; } static inline int rcu_read_lock_bh_held(void) { return 1; } static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } static inline int rcu_read_lock_any_held(void) { return !preemptible(); } static inline int debug_lockdep_rcu_enabled(void) { return 0; } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU /** * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message * * This checks debug_lockdep_rcu_enabled() before checking (c) to * prevent early boot splats due to lockdep not yet being initialized, * and rechecks it after checking (c) to prevent false-positive splats * due to races with lockdep being disabled. See commit 3066820034b5dd * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail. */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data..unlikely") __warned; \ if (debug_lockdep_rcu_enabled() && (c) && \ debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ } while (0) #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); } #else // #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { } #endif // #else // #ifndef CONFIG_PREEMPT_RCU #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ } while (0) // See RCU_LOCKDEP_WARN() for an explanation of the double call to // debug_lockdep_rcu_enabled(). static inline bool lockdep_assert_rcu_helper(bool c) { return debug_lockdep_rcu_enabled() && (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) && debug_lockdep_rcu_enabled(); } /** * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock() * * Splats if lockdep is enabled and there is no rcu_read_lock() in effect. */ #define lockdep_assert_in_rcu_read_lock() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map))) /** * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh() * * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect. * Note that local_bh_disable() and friends do not suffice here, instead an * actual rcu_read_lock_bh() is required. */ #define lockdep_assert_in_rcu_read_lock_bh() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map))) /** * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched() * * Splats if lockdep is enabled and there is no rcu_read_lock_sched() * in effect. Note that preempt_disable() and friends do not suffice here, * instead an actual rcu_read_lock_sched() is required. */ #define lockdep_assert_in_rcu_read_lock_sched() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map))) /** * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader * * Splats if lockdep is enabled and there is no RCU reader of any * type in effect. Note that regions of code protected by things like * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify * as RCU readers. * * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY * kernels that are not also built with PREEMPT_COUNT. But if you have * lockdep enabled, you might as well also enable PREEMPT_COUNT. */ #define lockdep_assert_in_rcu_reader() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \ !lock_is_held(&rcu_bh_lock_map) && \ !lock_is_held(&rcu_sched_lock_map) && \ preemptible())) #else /* #ifdef CONFIG_PROVE_RCU */ #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) #define rcu_sleep_check() do { } while (0) #define lockdep_assert_in_rcu_read_lock() do { } while (0) #define lockdep_assert_in_rcu_read_lock_bh() do { } while (0) #define lockdep_assert_in_rcu_read_lock_sched() do { } while (0) #define lockdep_assert_in_rcu_reader() do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ /* * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of * multiple pointers markings to match different RCU implementations * (e.g., __srcu), should this make sense in the future. */ #ifdef __CHECKER__ #define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __unrcu_pointer(p, local) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)(p); \ rcu_check_sparse(p, __rcu); \ ((typeof(*p) __force __kernel *)(local)); \ }) /** * unrcu_pointer - mark a pointer as not being RCU protected * @p: pointer needing to lose its __rcu property * * Converts @p from an __rcu pointer to a __kernel pointer. * This allows an __rcu pointer to be used with xchg() and friends. */ #define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu)) #define __rcu_access_pointer(p, local, space) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_check(p, local, c, space) \ ({ \ /* Dependency order vs. p above. */ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_protected(p, local, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define __rcu_dereference_raw(p, local) \ ({ \ /* Dependency order vs. p above. */ \ typeof(p) local = READ_ONCE(p); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu)) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. */ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) /** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to * @v: value to assign (publish) * * Assigns the specified value to the specified RCU-protected * pointer, ensuring that any concurrent RCU readers will see * any prior initialization. * * Inserts memory barriers on architectures that require them * (which is most of them), and also prevents the compiler from * reordering the code that initializes the structure after the pointer * assignment. More importantly, this call documents which pointers * will be dereferenced by RCU read-side code. * * In some special cases, you may use RCU_INIT_POINTER() instead * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due * to the fact that it does not constrain either the CPU or the compiler. * That said, using RCU_INIT_POINTER() when you should have used * rcu_assign_pointer() is a very bad thing that results in * impossible-to-diagnose memory corruption. So please be careful. * See the RCU_INIT_POINTER() comment header for details. * * Note that rcu_assign_pointer() evaluates each of its arguments only * once, appearances notwithstanding. One of the "extra" evaluations * is in typeof() and the other visible only to sparse (__CHECKER__), * neither of which actually execute the argument. As with most cpp * macros, this execute-arguments-only-once property is important, so * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ do { \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ } while (0) /** * rcu_replace_pointer() - replace an RCU pointer, returning its old value * @rcu_ptr: RCU pointer, whose old value is returned * @ptr: regular pointer * @c: the lockdep conditions under which the dereference will take place * * Perform a replacement, where @rcu_ptr is an RCU-annotated * pointer and @c is the lockdep argument that is passed to the * rcu_dereference_protected() call used to read that pointer. The old * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. */ #define rcu_replace_pointer(rcu_ptr, ptr, c) \ ({ \ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ __tmp; \ }) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the * lockdep checks for being in an RCU read-side critical section. This is * useful when the value of this pointer is accessed, but the pointer is * not dereferenced, for example, when testing an RCU-protected pointer * against NULL. Although rcu_access_pointer() may also be used in cases * where update-side locks prevent the value of the pointer from changing, * you should instead use rcu_dereference_protected() for this use case. * Within an RCU read-side critical section, there is little reason to * use rcu_access_pointer(). * * It is usually best to test the rcu_access_pointer() return value * directly in order to avoid accidental dereferences being introduced * by later inattentive changes. In other words, assigning the * rcu_access_pointer() return value to a local variable results in an * accident waiting to happen. * * It is also permissible to use rcu_access_pointer() when read-side * access to the pointer was removed at least one grace period ago, as is * the case in the context of the RCU callback that is freeing up the data, * or after a synchronize_rcu() returns. This can be useful when tearing * down multi-linked structures after a grace period has elapsed. However, * rcu_dereference_protected() is normally preferred for this use case. */ #define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu) /** * rcu_dereference_check() - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Do an rcu_dereference(), but check that the conditions under which the * dereference will take place are correct. Typically the conditions * indicate the various locking conditions that should be held at that * point. The check should return true if the conditions are satisfied. * An implicit check for being in an RCU read-side critical section * (rcu_read_lock()) is included. * * For example: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); * * could be used to indicate to lockdep that foo->bar may only be dereferenced * if either rcu_read_lock() is held, or that the lock required to replace * the bar struct at foo->bar is held. * * Note that the list of conditions may also include indications of when a lock * need not be held, for example during initialisation or destruction of the * target struct: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || * atomic_read(&foo->usage) == 0); * * Inserts memory barriers on architectures that require them * (currently only the Alpha), prevents the compiler from refetching * (and from merging fetches), and, more importantly, documents exactly * which pointers are protected by RCU and checks that the pointer is * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_held(), __rcu) /** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-bh counterpart to rcu_dereference_check(). However, * please note that starting in v5.0 kernels, vanilla RCU grace periods * wait for local_bh_disable() regions of code in addition to regions of * code demarked by rcu_read_lock() and rcu_read_unlock(). This means * that synchronize_rcu(), call_rcu, and friends all take not only * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_bh_held(), __rcu) /** * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). * However, please note that starting in v5.0 kernels, vanilla RCU grace * periods wait for preempt_disable() regions of code in addition to * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). * This means that synchronize_rcu(), call_rcu, and friends all take not * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_sched_held(), \ __rcu) /* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_check(p) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit * the READ_ONCE(). This is useful in cases where update-side locks * prevent the value of the pointer from changing. Please note that this * primitive does *not* prevent the compiler from repeating this reference * or combining it with other references, so it should not be used without * protection of appropriate locks. * * This function is only for update-side use. Using this function * when protected only by rcu_read_lock() will result in infrequent * but very ugly failures. */ #define rcu_dereference_protected(p, c) \ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu) /** * rcu_dereference() - fetch RCU-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * This is a simple wrapper around rcu_dereference_check(). */ #define rcu_dereference(p) rcu_dereference_check(p, 0) /** * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) /** * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism * @p: The pointer to hand off * * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to * kill_dependency(). It could be used as follows:: * * rcu_read_lock(); * p = rcu_dereference(gp); * long_lived = is_long_lived(p); * if (long_lived) { * if (!atomic_inc_not_zero(p->refcnt)) * long_lived = false; * else * p = rcu_pointer_handoff(p); * } * rcu_read_unlock(); */ #define rcu_pointer_handoff(p) (p) /** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the * synchronize_rcu() is guaranteed to block until after all the other * CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also * wait for regions of code with preemption disabled, including regions of * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which * define synchronize_sched(), only code enclosed within rcu_read_lock() * and rcu_read_unlock() are guaranteed to be waited for. * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU * read-side critical section, (2) CPU 1 invokes call_rcu() to register * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU * callback is invoked. This is legal, because the RCU read-side critical * section that was running concurrently with the call_rcu() (and which * therefore might be referencing something that the corresponding RCU * callback would free up) has completed before the corresponding * RCU callback is invoked. * * RCU read-side critical sections may be nested. Any deferred actions * will be deferred until the outermost RCU read-side critical section * completes. * * You can avoid reading and understanding the next paragraph by * following this rule: don't put anything in an rcu_read_lock() RCU * read-side critical section that would block in a !PREEMPTION kernel. * But if you want the full story, read on! * * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock() used illegally while idle"); } /* * So where is rcu_write_lock()? It does not exist, as there is no * way for writers to lock out RCU readers. This is a feature, not * a bug -- this property is what provides RCU's performance benefits. * Of course, writers must coordinate with each other. The normal * spinlock primitives work well for this, but any other technique may be * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * * In almost all situations, rcu_read_unlock() is immune from deadlock. * In recent kernels that have consolidated synchronize_sched() and * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity * also extends to the scheduler's runqueue and priority-inheritance * spinlocks, courtesy of the quiescent-state deferral that is carried * out when rcu_read_unlock() is invoked with interrupts disabled. * * See rcu_read_lock() for more information. */ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ __release(RCU); __rcu_read_unlock(); } /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * * This is equivalent to rcu_read_lock(), but also disables softirqs. * Note that anything else that disables softirqs can also serve as an RCU * read-side critical section. However, please note that this equivalence * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() * was invoked from some other task. */ static inline void rcu_read_lock_bh(void) { local_bh_disable(); __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); } /** * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ static inline void rcu_read_unlock_bh(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); local_bh_enable(); } /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * * This is equivalent to rcu_read_lock(), but also disables preemption. * Read-side critical sections can also be introduced by anything else that * disables preemption, including local_irq_disable() and friends. However, * please note that the equivalence to rcu_read_lock() applies only to * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_sched() from process context if the matching * rcu_read_lock_sched() was invoked from an NMI handler. */ static inline void rcu_read_lock_sched(void) { preempt_disable(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_sched() used illegally while idle"); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); __acquire(RCU_SCHED); } /** * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched() for more information. */ static inline void rcu_read_unlock_sched(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); preempt_enable(); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); preempt_enable_notrace(); } /** * RCU_INIT_POINTER() - initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * Initialize an RCU-protected pointer in special cases where readers * do not need ordering constraints on the CPU or the compiler. These * special cases are: * * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* * 2. The caller has taken whatever steps are required to prevent * RCU readers from concurrently accessing this pointer *or* * 3. The referenced data structure has already been exposed to * readers either at compile time or via rcu_assign_pointer() *and* * * a. You have not made *any* reader-visible changes to * this structure since then *or* * b. It is OK for readers accessing this structure from its * new location to see the old state of the structure. (For * example, the changes were to statistical counters or to * other state where exact synchronization is not required.) * * Failure to follow these rules governing use of RCU_INIT_POINTER() will * result in impossible-to-diagnose memory corruption. As in the structures * will look OK in crash dumps, but any concurrent RCU readers might * see pre-initialized values of the referenced data structure. So * please be very careful how you use RCU_INIT_POINTER()!!! * * If you are creating an RCU-protected linked structure that is accessed * by a single external-to-structure RCU-protected pointer, then you may * use RCU_INIT_POINTER() to initialize the internal RCU-protected * pointers, but you must use rcu_assign_pointer() to initialize the * external-to-structure pointer *after* you have completely initialized * the reader-accessible portions of the linked structure. * * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no * ordering guarantees for either the CPU or the compiler. */ #define RCU_INIT_POINTER(p, v) \ do { \ rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) /** * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * GCC-style initialization for an RCU-protected pointer in a structure field. */ #define RCU_POINTER_INITIALIZER(p, v) \ .p = RCU_INITIALIZER(v) /* * Does the specified offset indicate that the corresponding rcu_head * structure can be handled by kvfree_rcu()? */ #define __is_kvfree_rcu_offset(offset) ((offset) < 4096) /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree for double-argument invocations. * @rhf: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore * when they are used in a kernel module, that module must invoke the * high-latency rcu_barrier() function at module-unload time. * * The kfree_rcu() function handles this issue. Rather than encoding a * function address in the embedded rcu_head structure, kfree_rcu() instead * encodes the offset of the rcu_head structure within the base structure. * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * * The object to be freed can be allocated either by kmalloc() or * kmem_cache_alloc(). * * Note that the allowable offset might decrease in the future. * * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ #define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) #define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) /** * kfree_rcu_mightsleep() - kfree an object after a grace period. * @ptr: pointer to kfree for single-argument invocations. * * When it comes to head-less variant, only one argument * is passed and that is just a pointer which has to be * freed after a grace period. Therefore the semantic is * * kfree_rcu_mightsleep(ptr); * * where @ptr is the pointer to be freed by kvfree(). * * Please note, head-less way of freeing is permitted to * use from a context that has to follow might_sleep() * annotation. Otherwise, please switch and embed the * rcu_head structure within the type of @ptr. */ #define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) #define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) #define kvfree_rcu_arg_2(ptr, rhf) \ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) { \ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \ } \ } while (0) #define kvfree_rcu_arg_1(ptr) \ do { \ typeof(ptr) ___p = (ptr); \ \ if (___p) \ kvfree_call_rcu(NULL, (void *) (___p)); \ } while (0) /* * Place this after a lock-acquisition primitive to guarantee that * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies * if the UNLOCK and LOCK are executed by the same CPU or if the * UNLOCK and LOCK operate on the same lock variable. */ #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ #define smp_mb__after_unlock_lock() do { } while (0) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ /* Has the specified rcu_head structure been handed to call_rcu()? */ /** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * * If you intend to invoke rcu_head_after_call_rcu() to test whether a * given rcu_head structure has already been passed to call_rcu(), then * you must also invoke this rcu_head_init() function on it just after * allocating that structure. Calls to this function must not race with * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. */ static inline void rcu_head_init(struct rcu_head *rhp) { rhp->func = (rcu_callback_t)~0L; } /** * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including * the case where @rhp has already been invoked after a grace period. * Calls to this function must not race with callback invocation. One way * to avoid such races is to enclose the call to rcu_head_after_call_rcu() * in an RCU read-side critical section that includes a read-side fetch * of the pointer to the structure containing @rhp. */ static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { rcu_callback_t func = READ_ONCE(rhp->func); if (func == f) return true; WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } /* kernel/ksysfs.c definitions */ extern int rcu_expedited; extern int rcu_normal; DEFINE_LOCK_GUARD_0(rcu, do { rcu_read_lock(); /* * sparse doesn't call the cleanup function, * so just release immediately and don't track * the context. We don't need to anyway, since * the whole point of the guard is to not need * the explicit unlock. */ __release(RCU); } while (0), rcu_read_unlock()) #endif /* __LINUX_RCUPDATE_H */ |
| 186 243 303 265 389 274 3 489 350 8 71 71 8 5 3 112 244 103 101 21 343 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NET Generic infrastructure for INET connection oriented protocols. * * Definitions for inet_connection_sock * * Authors: Many people, see the TCP sources * * From code originally in TCP */ #ifndef _INET_CONNECTION_SOCK_H #define _INET_CONNECTION_SOCK_H #include <linux/compiler.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/poll.h> #include <linux/kernel.h> #include <linux/sockptr.h> #include <net/inet_sock.h> #include <net/request_sock.h> /* Cancel timers, when they are not required. */ #undef INET_CSK_CLEAR_TIMERS struct inet_bind_bucket; struct inet_bind2_bucket; struct tcp_congestion_ops; /* * Pointers to address related TCP functions * (i.e. things that depend on the address family) */ struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); void (*send_check)(struct sock *sk, struct sk_buff *skb); int (*rebuild_header)(struct sock *sk); void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req); u16 net_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); void (*mtu_reduced)(struct sock *sk); }; /** inet_connection_sock - INET connection oriented sock * * @icsk_accept_queue: FIFO of established children * @icsk_bind_hash: Bind node * @icsk_bind2_hash: Bind node in the bhash2 table * @icsk_timeout: Timeout * @icsk_retransmit_timer: Resend (no ack) * @icsk_rto: Retransmit timeout * @icsk_pmtu_cookie Last pmtu seen by socket * @icsk_ca_ops Pluggable congestion control hook * @icsk_af_ops Operations which are AF_INET{4,6} specific * @icsk_ulp_ops Pluggable ULP control hook * @icsk_ulp_data ULP private data * @icsk_clean_acked Clean acked data hook * @icsk_ca_state: Congestion control state * @icsk_retransmits: Number of unrecovered [RTO] timeouts * @icsk_pending: Scheduled timer event * @icsk_backoff: Backoff * @icsk_syn_retries: Number of allowed SYN (or equivalent) retries * @icsk_probes_out: unanswered 0 window probes * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options) * @icsk_ack: Delayed ACK control data * @icsk_mtup; MTU probing control data * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack) * @icsk_user_timeout: TCP_USER_TIMEOUT value */ struct inet_connection_sock { /* inet_sock has to be the first member! */ struct inet_sock icsk_inet; struct request_sock_queue icsk_accept_queue; struct inet_bind_bucket *icsk_bind_hash; struct inet_bind2_bucket *icsk_bind2_hash; unsigned long icsk_timeout; struct timer_list icsk_retransmit_timer; struct timer_list icsk_delack_timer; __u32 icsk_rto; __u32 icsk_rto_min; __u32 icsk_delack_max; __u32 icsk_pmtu_cookie; const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; const struct tcp_ulp_ops *icsk_ulp_ops; void __rcu *icsk_ulp_data; void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq); unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); __u8 icsk_ca_state:5, icsk_ca_initialized:1, icsk_ca_setsockopt:1, icsk_ca_dst_locked:1; __u8 icsk_retransmits; __u8 icsk_pending; __u8 icsk_backoff; __u8 icsk_syn_retries; __u8 icsk_probes_out; __u16 icsk_ext_hdr_len; struct { __u8 pending; /* ACK is pending */ __u8 quick; /* Scheduled number of quick acks */ __u8 pingpong; /* The session is interactive */ __u8 retry; /* Number of attempts */ #define ATO_BITS 8 __u32 ato:ATO_BITS, /* Predicted tick of soft clock */ lrcv_flowlabel:20, /* last received ipv6 flowlabel */ unused:4; unsigned long timeout; /* Currently scheduled timeout */ __u32 lrcvtime; /* timestamp of last received data packet */ __u16 last_seg_size; /* Size of last incoming segment */ __u16 rcv_mss; /* MSS used for delayed ACK decisions */ } icsk_ack; struct { /* Range of MTUs to search */ int search_high; int search_low; /* Information on the current probe. */ u32 probe_size:31, /* Is the MTUP feature enabled for this connection? */ enabled:1; u32 probe_timestamp; } icsk_mtup; u32 icsk_probes_tstamp; u32 icsk_user_timeout; u64 icsk_ca_priv[104 / sizeof(u64)]; #define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ #define ICSK_TIME_DACK 2 /* Delayed ack timer */ #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ #define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ #define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */ #define inet_csk(ptr) container_of_const(ptr, struct inet_connection_sock, icsk_inet.sk) static inline void *inet_csk_ca(const struct sock *sk) { return (void *)inet_csk(sk)->icsk_ca_priv; } struct sock *inet_csk_clone_lock(const struct sock *sk, const struct request_sock *req, const gfp_t priority); enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, ICSK_ACK_TIMER = 2, ICSK_ACK_PUSHED = 4, ICSK_ACK_PUSHED2 = 8, ICSK_ACK_NOW = 16, /* Send the next ACK immediately (once) */ ICSK_ACK_NOMEM = 32, }; void inet_csk_init_xmit_timers(struct sock *sk, void (*retransmit_handler)(struct timer_list *), void (*delack_handler)(struct timer_list *), void (*keepalive_handler)(struct timer_list *)); void inet_csk_clear_xmit_timers(struct sock *sk); void inet_csk_clear_xmit_timers_sync(struct sock *sk); static inline void inet_csk_schedule_ack(struct sock *sk) { inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; } static inline int inet_csk_ack_scheduled(const struct sock *sk) { return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; } static inline void inet_csk_delack_init(struct sock *sk) { memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); } void inet_csk_delete_keepalive_timer(struct sock *sk); void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) { struct inet_connection_sock *icsk = inet_csk(sk); if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { smp_store_release(&icsk->icsk_pending, 0); #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_retransmit_timer); #endif } else if (what == ICSK_TIME_DACK) { smp_store_release(&icsk->icsk_ack.pending, 0); icsk->icsk_ack.retry = 0; #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_delack_timer); #endif } else { pr_debug("inet_csk BUG: unknown timer value\n"); } } /* * Reset the retransmission timer */ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, unsigned long when, const unsigned long max_when) { struct inet_connection_sock *icsk = inet_csk(sk); if (when > max_when) { pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, (void *)_THIS_IP_); when = max_when; } if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) { smp_store_release(&icsk->icsk_pending, what); icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); } else if (what == ICSK_TIME_DACK) { smp_store_release(&icsk->icsk_ack.pending, icsk->icsk_ack.pending | ICSK_ACK_TIMER); icsk->icsk_ack.timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); } else { pr_debug("inet_csk BUG: unknown timer value\n"); } } static inline unsigned long inet_csk_rto_backoff(const struct inet_connection_sock *icsk, unsigned long max_when) { u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; return (unsigned long)min_t(u64, when, max_when); } struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg); int inet_csk_get_port(struct sock *sk, unsigned short snum); struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4, const struct request_sock *req); struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, struct sock *newsk, const struct request_sock *req); struct sock *inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, struct sock *child); bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout); struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, struct request_sock *req, bool own_req); static inline void inet_csk_reqsk_queue_added(struct sock *sk) { reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); } static inline int inet_csk_reqsk_queue_len(const struct sock *sk) { return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); } static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) { return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog); } bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); static inline unsigned long reqsk_timeout(struct request_sock *req, unsigned long max_timeout) { u64 timeout = (u64)req->timeout << req->num_timeout; return (unsigned long)min_t(u64, timeout, max_timeout); } static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) { /* The below has to be done to allow calling inet_csk_destroy_sock */ sock_set_flag(sk, SOCK_DEAD); this_cpu_inc(*sk->sk_prot->orphan_count); } void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); /* * LISTEN is a special case for poll.. */ static inline __poll_t inet_csk_listen_poll(const struct sock *sk) { return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (EPOLLIN | EPOLLRDNORM) : 0; } int inet_csk_listen_start(struct sock *sk); void inet_csk_listen_stop(struct sock *sk); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); /* update the fast reuse flag when adding a socket */ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct sock *sk); struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { inet_csk(sk)->icsk_ack.pingpong = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); } static inline void inet_csk_exit_pingpong_mode(struct sock *sk) { inet_csk(sk)->icsk_ack.pingpong = 0; } static inline bool inet_csk_in_pingpong_mode(struct sock *sk) { return inet_csk(sk)->icsk_ack.pingpong >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); } static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ack.pingpong < U8_MAX) icsk->icsk_ack.pingpong++; } static inline bool inet_csk_has_ulp(const struct sock *sk) { return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops; } static inline void inet_init_csk_locks(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); spin_lock_init(&icsk->icsk_accept_queue.rskq_lock); spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock); } #endif /* _INET_CONNECTION_SOCK_H */ |
| 51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | // SPDX-License-Identifier: GPL-2.0 #ifndef _LINUX_MM_SLOT_H #define _LINUX_MM_SLOT_H #include <linux/hashtable.h> #include <linux/slab.h> /* * struct mm_slot - hash lookup from mm to mm_slot * @hash: link to the mm_slots hash list * @mm_node: link into the mm_slots list * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node hash; struct list_head mm_node; struct mm_struct *mm; }; #define mm_slot_entry(ptr, type, member) \ container_of(ptr, type, member) static inline void *mm_slot_alloc(struct kmem_cache *cache) { if (!cache) /* initialization failed */ return NULL; return kmem_cache_zalloc(cache, GFP_KERNEL); } static inline void mm_slot_free(struct kmem_cache *cache, void *objp) { kmem_cache_free(cache, objp); } #define mm_slot_lookup(_hashtable, _mm) \ ({ \ struct mm_slot *tmp_slot, *mm_slot = NULL; \ \ hash_for_each_possible(_hashtable, tmp_slot, hash, (unsigned long)_mm) \ if (_mm == tmp_slot->mm) { \ mm_slot = tmp_slot; \ break; \ } \ \ mm_slot; \ }) #define mm_slot_insert(_hashtable, _mm, _mm_slot) \ ({ \ _mm_slot->mm = _mm; \ hash_add(_hashtable, &_mm_slot->hash, (unsigned long)_mm); \ }) #endif /* _LINUX_MM_SLOT_H */ |
| 8 8 19 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 | // SPDX-License-Identifier: GPL-2.0+ // // Empiatech em28x1 audio extension // // Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com> // // Copyright (C) 2007-2016 Mauro Carvalho Chehab // - Port to work with the in-kernel driver // - Cleanups, fixes, alsa-controls, etc. // // This driver is based on my previous au600 usb pstn audio driver // and inherits all the copyrights #include "em28xx.h" #include <linux/kernel.h> #include <linux/usb.h> #include <linux/init.h> #include <linux/sound.h> #include <linux/spinlock.h> #include <linux/soundcard.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/initval.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/ac97_codec.h> #include <media/v4l2-common.h> static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "activates debug info"); #define EM28XX_MAX_AUDIO_BUFS 5 #define EM28XX_MIN_AUDIO_PACKETS 64 #define dprintk(fmt, arg...) do { \ if (debug) \ dev_printk(KERN_DEBUG, &dev->intf->dev, \ "video: %s: " fmt, __func__, ## arg); \ } while (0) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static int em28xx_deinit_isoc_audio(struct em28xx *dev) { int i; dprintk("Stopping isoc\n"); for (i = 0; i < dev->adev.num_urb; i++) { struct urb *urb = dev->adev.urb[i]; if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); } return 0; } static void em28xx_audio_isocirq(struct urb *urb) { struct em28xx *dev = urb->context; int i; unsigned int oldptr; int period_elapsed = 0; int status; unsigned char *cp; unsigned int stride; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; if (dev->disconnected) { dprintk("device disconnected while streaming. URB status=%d.\n", urb->status); atomic_set(&dev->adev.stream_started, 0); return; } switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dprintk("urb completion error %d.\n", urb->status); break; } if (atomic_read(&dev->adev.stream_started) == 0) return; if (dev->adev.capture_pcm_substream) { substream = dev->adev.capture_pcm_substream; runtime = substream->runtime; stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { unsigned long flags; int length = urb->iso_frame_desc[i].actual_length / stride; cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (!length) continue; oldptr = dev->adev.hwptr_done_capture; if (oldptr + length >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride); memcpy(runtime->dma_area, cp + cnt * stride, length * stride - cnt * stride); } else { memcpy(runtime->dma_area + oldptr * stride, cp, length * stride); } snd_pcm_stream_lock_irqsave(substream, flags); dev->adev.hwptr_done_capture += length; if (dev->adev.hwptr_done_capture >= runtime->buffer_size) dev->adev.hwptr_done_capture -= runtime->buffer_size; dev->adev.capture_transfer_done += length; if (dev->adev.capture_transfer_done >= runtime->period_size) { dev->adev.capture_transfer_done -= runtime->period_size; period_elapsed = 1; } snd_pcm_stream_unlock_irqrestore(substream, flags); } if (period_elapsed) snd_pcm_period_elapsed(substream); } urb->status = 0; status = usb_submit_urb(urb, GFP_ATOMIC); if (status < 0) dev_err(&dev->intf->dev, "resubmit of audio urb failed (error=%i)\n", status); } static int em28xx_init_audio_isoc(struct em28xx *dev) { int i, err; dprintk("Starting isoc transfers\n"); /* Start streaming */ for (i = 0; i < dev->adev.num_urb; i++) { memset(dev->adev.transfer_buffer[i], 0x80, dev->adev.urb[i]->transfer_buffer_length); err = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC); if (err) { dev_err(&dev->intf->dev, "submit of audio urb failed (error=%i)\n", err); em28xx_deinit_isoc_audio(dev); atomic_set(&dev->adev.stream_started, 0); return err; } } return 0; } static const struct snd_pcm_hardware snd_em28xx_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */ /* * The period is 12.288 bytes. Allow a 10% of variation along its * value, in order to avoid overruns/underruns due to some clock * drift. * * FIXME: This period assumes 64 packets, and a 48000 PCM rate. * Calculate it dynamically. */ .period_bytes_min = 11059, .period_bytes_max = 13516, .periods_min = 2, .periods_max = 98, /* 12544, */ }; static int snd_em28xx_capture_open(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int nonblock, ret = 0; if (!dev) { pr_err("em28xx-audio: BUG: em28xx can't find device struct. Can't proceed with open\n"); return -ENODEV; } if (dev->disconnected) return -ENODEV; dprintk("opening device and trying to acquire exclusive lock\n"); nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else { mutex_lock(&dev->lock); } runtime->hw = snd_em28xx_hw_capture; if (dev->adev.users == 0) { if (!dev->alt || dev->is_audio_only) { struct usb_device *udev; udev = interface_to_usbdev(dev->intf); if (dev->is_audio_only) /* audio is on a separate interface */ dev->alt = 1; else /* audio is on the same interface as video */ dev->alt = 7; /* * FIXME: The intention seems to be to select * the alt setting with the largest * wMaxPacketSize for the video endpoint. * At least dev->alt should be used instead, but * we should probably not touch it at all if it * is already >0, because wMaxPacketSize of the * audio endpoints seems to be the same for all. */ dprintk("changing alternate number on interface %d to %d\n", dev->ifnum, dev->alt); usb_set_interface(udev, dev->ifnum, dev->alt); } /* Sets volume, mute, etc */ dev->mute = 0; ret = em28xx_audio_analog_set(dev); if (ret < 0) goto err; } kref_get(&dev->ref); dev->adev.users++; mutex_unlock(&dev->lock); /* Dynamically adjust the period size */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, dev->adev.period * 95 / 100, dev->adev.period * 105 / 100); dev->adev.capture_pcm_substream = substream; return 0; err: mutex_unlock(&dev->lock); dev_err(&dev->intf->dev, "Error while configuring em28xx mixer\n"); return ret; } static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); dprintk("closing device\n"); dev->mute = 1; mutex_lock(&dev->lock); dev->adev.users--; if (atomic_read(&dev->adev.stream_started) > 0) { atomic_set(&dev->adev.stream_started, 0); schedule_work(&dev->adev.wq_trigger); } em28xx_audio_analog_set(dev); mutex_unlock(&dev->lock); kref_put(&dev->ref, em28xx_free_device); return 0; } static int snd_em28xx_prepare(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return -ENODEV; dev->adev.hwptr_done_capture = 0; dev->adev.capture_transfer_done = 0; return 0; } static void audio_trigger(struct work_struct *work) { struct em28xx_audio *adev = container_of(work, struct em28xx_audio, wq_trigger); struct em28xx *dev = container_of(adev, struct em28xx, adev); if (atomic_read(&adev->stream_started)) { dprintk("starting capture"); em28xx_init_audio_isoc(dev); } else { dprintk("stopping capture"); em28xx_deinit_isoc_audio(dev); } } static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct em28xx *dev = snd_pcm_substream_chip(substream); int retval = 0; if (dev->disconnected) return -ENODEV; switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_START: atomic_set(&dev->adev.stream_started, 1); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: atomic_set(&dev->adev.stream_started, 0); break; default: retval = -EINVAL; } schedule_work(&dev->adev.wq_trigger); return retval; } static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream *substream) { unsigned long flags; struct em28xx *dev; snd_pcm_uframes_t hwptr_done; dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return SNDRV_PCM_POS_XRUN; spin_lock_irqsave(&dev->adev.slock, flags); hwptr_done = dev->adev.hwptr_done_capture; spin_unlock_irqrestore(&dev->adev.slock, flags); return hwptr_done; } /* * AC97 volume control support */ static int em28xx_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); if (dev->disconnected) return -ENODEV; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0; info->value.integer.max = 0x1f; return 0; } static int em28xx_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) | (0x1f - (value->value.integer.value[1] & 0x1f)) << 8; int nonblock = 0; int rc; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else { mutex_lock(&dev->lock); } rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; val |= rc & 0x8000; /* Preserve the mute flag */ rc = em28xx_write_ac97(dev, kcontrol->private_value, val); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int val; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else { mutex_lock(&dev->lock); } val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); value->value.integer.value[0] = 0x1f - (val & 0x1f); value->value.integer.value[1] = 0x1f - ((val >> 8) & 0x1f); return 0; } static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); u16 val = value->value.integer.value[0]; struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int rc; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else { mutex_lock(&dev->lock); } rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; if (val) rc &= 0x1f1f; else rc |= 0x8000; rc = em28xx_write_ac97(dev, kcontrol->private_value, rc); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int val; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else { mutex_lock(&dev->lock); } val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; if (val & 0x8000) value->value.integer.value[0] = 0; else value->value.integer.value[0] = 1; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); return 0; } static const DECLARE_TLV_DB_SCALE(em28xx_db_scale, -3450, 150, 0); static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev, char *name, int id) { int err; char ctl_name[44]; struct snd_kcontrol *kctl; struct snd_kcontrol_new tmp; memset(&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER; tmp.private_value = id; tmp.name = ctl_name; /* Add Mute Control */ sprintf(ctl_name, "%s Switch", name); tmp.get = em28xx_vol_get_mute; tmp.put = em28xx_vol_put_mute; tmp.info = snd_ctl_boolean_mono_info; kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); memset(&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER; tmp.private_value = id; tmp.name = ctl_name; /* Add Volume Control */ sprintf(ctl_name, "%s Volume", name); tmp.get = em28xx_vol_get; tmp.put = em28xx_vol_put; tmp.info = em28xx_vol_info; tmp.tlv.p = em28xx_db_scale; kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); return 0; } /* * register/unregister code and data */ static const struct snd_pcm_ops snd_em28xx_pcm_capture = { .open = snd_em28xx_capture_open, .close = snd_em28xx_pcm_close, .prepare = snd_em28xx_prepare, .trigger = snd_em28xx_capture_trigger, .pointer = snd_em28xx_capture_pointer, }; static void em28xx_audio_free_urb(struct em28xx *dev) { struct usb_device *udev = interface_to_usbdev(dev->intf); int i; for (i = 0; i < dev->adev.num_urb; i++) { struct urb *urb = dev->adev.urb[i]; if (!urb) continue; usb_free_coherent(udev, urb->transfer_buffer_length, dev->adev.transfer_buffer[i], urb->transfer_dma); usb_free_urb(urb); } kfree(dev->adev.urb); kfree(dev->adev.transfer_buffer); dev->adev.num_urb = 0; } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ static int em28xx_audio_ep_packet_size(struct usb_device *udev, struct usb_endpoint_descriptor *e) { int size = le16_to_cpu(e->wMaxPacketSize); if (udev->speed == USB_SPEED_HIGH) return (size & 0x7ff) * (1 + (((size) >> 11) & 0x03)); return size & 0x7ff; } static int em28xx_audio_urb_init(struct em28xx *dev) { struct usb_interface *intf; struct usb_endpoint_descriptor *e, *ep = NULL; struct usb_device *udev = interface_to_usbdev(dev->intf); int i, ep_size, interval, num_urb, npackets; int urb_size, bytes_per_transfer; u8 alt; if (dev->ifnum) alt = 1; else alt = 7; intf = usb_ifnum_to_if(udev, dev->ifnum); if (intf->num_altsetting <= alt) { dev_err(&dev->intf->dev, "alt %d doesn't exist on interface %d\n", dev->ifnum, alt); return -ENODEV; } for (i = 0; i < intf->altsetting[alt].desc.bNumEndpoints; i++) { e = &intf->altsetting[alt].endpoint[i].desc; if (!usb_endpoint_dir_in(e)) continue; if (e->bEndpointAddress == EM28XX_EP_AUDIO) { ep = e; break; } } if (!ep) { dev_err(&dev->intf->dev, "Couldn't find an audio endpoint"); return -ENODEV; } ep_size = em28xx_audio_ep_packet_size(udev, ep); interval = 1 << (ep->bInterval - 1); dev_info(&dev->intf->dev, "Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n", EM28XX_EP_AUDIO, usb_speed_string(udev->speed), dev->ifnum, alt, interval, ep_size); /* Calculate the number and size of URBs to better fit the audio samples */ /* * Estimate the number of bytes per DMA transfer. * * This is given by the bit rate (for now, only 48000 Hz) multiplied * by 2 channels and 2 bytes/sample divided by the number of microframe * intervals and by the microframe rate (125 us) */ bytes_per_transfer = DIV_ROUND_UP(48000 * 2 * 2, 125 * interval); /* * Estimate the number of transfer URBs. Don't let it go past the * maximum number of URBs that is known to be supported by the device. */ num_urb = DIV_ROUND_UP(bytes_per_transfer, ep_size); if (num_urb > EM28XX_MAX_AUDIO_BUFS) num_urb = EM28XX_MAX_AUDIO_BUFS; /* * Now that we know the number of bytes per transfer and the number of * URBs, estimate the typical size of an URB, in order to adjust the * minimal number of packets. */ urb_size = bytes_per_transfer / num_urb; /* * Now, calculate the amount of audio packets to be filled on each * URB. In order to preserve the old behaviour, use a minimal * threshold for this value. */ npackets = EM28XX_MIN_AUDIO_PACKETS; if (urb_size > ep_size * npackets) npackets = DIV_ROUND_UP(urb_size, ep_size); dev_info(&dev->intf->dev, "Number of URBs: %d, with %d packets and %d size\n", num_urb, npackets, urb_size); /* Estimate the bytes per period */ dev->adev.period = urb_size * npackets; /* Allocate space to store the number of URBs to be used */ dev->adev.transfer_buffer = kcalloc(num_urb, sizeof(*dev->adev.transfer_buffer), GFP_KERNEL); if (!dev->adev.transfer_buffer) return -ENOMEM; dev->adev.urb = kcalloc(num_urb, sizeof(*dev->adev.urb), GFP_KERNEL); if (!dev->adev.urb) { kfree(dev->adev.transfer_buffer); return -ENOMEM; } /* Alloc memory for each URB and for each transfer buffer */ dev->adev.num_urb = num_urb; for (i = 0; i < num_urb; i++) { struct urb *urb; int j, k; void *buf; urb = usb_alloc_urb(npackets, GFP_KERNEL); if (!urb) { em28xx_audio_free_urb(dev); return -ENOMEM; } dev->adev.urb[i] = urb; buf = usb_alloc_coherent(udev, npackets * ep_size, GFP_KERNEL, &urb->transfer_dma); if (!buf) { dev_err(&dev->intf->dev, "usb_alloc_coherent failed!\n"); em28xx_audio_free_urb(dev); return -ENOMEM; } dev->adev.transfer_buffer[i] = buf; urb->dev = udev; urb->context = dev; urb->pipe = usb_rcvisocpipe(udev, EM28XX_EP_AUDIO); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer = buf; urb->interval = interval; urb->complete = em28xx_audio_isocirq; urb->number_of_packets = npackets; urb->transfer_buffer_length = ep_size * npackets; for (j = k = 0; j < npackets; j++, k += ep_size) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = ep_size; } } return 0; } static int em28xx_audio_init(struct em28xx *dev) { struct em28xx_audio *adev = &dev->adev; struct usb_device *udev = interface_to_usbdev(dev->intf); struct snd_pcm *pcm; struct snd_card *card; static int devnr; int err; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) { /* * This device does not support the extension (in this case * the device is expecting the snd-usb-audio module or * doesn't have analog audio support at all) */ return 0; } dev_info(&dev->intf->dev, "Binding audio extension\n"); kref_get(&dev->ref); dev_info(&dev->intf->dev, "em28xx-audio.c: Copyright (C) 2006 Markus Rechberger\n"); dev_info(&dev->intf->dev, "em28xx-audio.c: Copyright (C) 2007-2016 Mauro Carvalho Chehab\n"); err = snd_card_new(&dev->intf->dev, index[devnr], "Em28xx Audio", THIS_MODULE, 0, &card); if (err < 0) return err; spin_lock_init(&adev->slock); adev->sndcard = card; adev->udev = udev; err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm); if (err < 0) goto card_free; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture); snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0); pcm->info_flags = 0; pcm->private_data = dev; strscpy(pcm->name, "Empia 28xx Capture", sizeof(pcm->name)); strscpy(card->driver, "Em28xx-Audio", sizeof(card->driver)); strscpy(card->shortname, "Em28xx Audio", sizeof(card->shortname)); strscpy(card->longname, "Empia Em28xx Audio", sizeof(card->longname)); INIT_WORK(&adev->wq_trigger, audio_trigger); if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { em28xx_cvol_new(card, dev, "Video", AC97_VIDEO); em28xx_cvol_new(card, dev, "Line In", AC97_LINE); em28xx_cvol_new(card, dev, "Phone", AC97_PHONE); em28xx_cvol_new(card, dev, "Microphone", AC97_MIC); em28xx_cvol_new(card, dev, "CD", AC97_CD); em28xx_cvol_new(card, dev, "AUX", AC97_AUX); em28xx_cvol_new(card, dev, "PCM", AC97_PCM); em28xx_cvol_new(card, dev, "Master", AC97_MASTER); em28xx_cvol_new(card, dev, "Line", AC97_HEADPHONE); em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO); em28xx_cvol_new(card, dev, "LFE", AC97_CENTER_LFE_MASTER); em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER); } err = em28xx_audio_urb_init(dev); if (err) goto card_free; err = snd_card_register(card); if (err < 0) goto urb_free; dev_info(&dev->intf->dev, "Audio extension successfully initialized\n"); return 0; urb_free: em28xx_audio_free_urb(dev); card_free: snd_card_free(card); adev->sndcard = NULL; return err; } static int em28xx_audio_fini(struct em28xx *dev) { if (!dev) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) { /* * This device does not support the extension (in this case * the device is expecting the snd-usb-audio module or * doesn't have analog audio support at all) */ return 0; } dev_info(&dev->intf->dev, "Closing audio extension\n"); if (dev->adev.sndcard) { snd_card_disconnect(dev->adev.sndcard); flush_work(&dev->adev.wq_trigger); em28xx_audio_free_urb(dev); snd_card_free(dev->adev.sndcard); dev->adev.sndcard = NULL; } kref_put(&dev->ref, em28xx_free_device); return 0; } static int em28xx_audio_suspend(struct em28xx *dev) { if (!dev) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) return 0; dev_info(&dev->intf->dev, "Suspending audio extension\n"); em28xx_deinit_isoc_audio(dev); atomic_set(&dev->adev.stream_started, 0); return 0; } static int em28xx_audio_resume(struct em28xx *dev) { if (!dev) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) return 0; dev_info(&dev->intf->dev, "Resuming audio extension\n"); /* Nothing to do other than schedule_work() ?? */ schedule_work(&dev->adev.wq_trigger); return 0; } static struct em28xx_ops audio_ops = { .id = EM28XX_AUDIO, .name = "Em28xx Audio Extension", .init = em28xx_audio_init, .fini = em28xx_audio_fini, .suspend = em28xx_audio_suspend, .resume = em28xx_audio_resume, }; static int __init em28xx_alsa_register(void) { return em28xx_register_extension(&audio_ops); } static void __exit em28xx_alsa_unregister(void) { em28xx_unregister_extension(&audio_ops); } MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_DESCRIPTION(DRIVER_DESC " - audio interface"); MODULE_VERSION(EM28XX_VERSION); module_init(em28xx_alsa_register); module_exit(em28xx_alsa_unregister); |
| 5 1 5 4 4 5 5 5 5 2 5 26 5 21 21 5 16 21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 | // SPDX-License-Identifier: GPL-2.0-only /* * AppArmor security module * * This file contains AppArmor policy manipulation functions * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * AppArmor policy is based around profiles, which contain the rules a * task is confined by. Every task in the system has a profile attached * to it determined either by matching "unconfined" tasks against the * visible set of profiles or by following a profiles attachment rules. * * Each profile exists in a profile namespace which is a container of * visible profiles. Each namespace contains a special "unconfined" profile, * which doesn't enforce any confinement on a task beyond DAC. * * Namespace and profile names can be written together in either * of two syntaxes. * :namespace:profile - used by kernel interfaces for easy detection * namespace://profile - used by policy * * Profile names can not start with : or @ or ^ and may not contain \0 * * Reserved profile names * unconfined - special automatically generated unconfined profile * inherit - special name to indicate profile inheritance * null-XXXX-YYYY - special automatically generated learning profiles * * Namespace names may not start with / or @ and may not contain \0 or : * Reserved namespace names * user-XXXX - user defined profiles * * a // in a profile or namespace name indicates a hierarchical name with the * name before the // being the parent and the name after the child. * * Profile and namespace hierarchies serve two different but similar purposes. * The namespace contains the set of visible profiles that are considered * for attachment. The hierarchy of namespaces allows for virtualizing * the namespace so that for example a chroot can have its own set of profiles * which may define some local user namespaces. * The profile hierarchy severs two distinct purposes, * - it allows for sub profiles or hats, which allows an application to run * subprograms under its own profile with different restriction than it * self, and not have it use the system profile. * eg. if a mail program starts an editor, the policy might make the * restrictions tighter on the editor tighter than the mail program, * and definitely different than general editor restrictions * - it allows for binary hierarchy of profiles, so that execution history * is preserved. This feature isn't exploited by AppArmor reference policy * but is allowed. NOTE: this is currently suboptimal because profile * aliasing is not currently implemented so that a profile for each * level must be defined. * eg. /bin/bash///bin/ls as a name would indicate /bin/ls was started * from /bin/bash * * A profile or namespace name that can contain one or more // separators * is referred to as an hname (hierarchical). * eg. /bin/bash//bin/ls * * An fqname is a name that may contain both namespace and profile hnames. * eg. :ns:/bin/bash//bin/ls * * NOTES: * - locking of profile lists is currently fairly coarse. All profile * lists within a namespace use the namespace lock. * FIXME: move profile lists to using rcu_lists */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/cred.h> #include <linux/rculist.h> #include <linux/user_namespace.h> #include "include/apparmor.h" #include "include/capability.h" #include "include/cred.h" #include "include/file.h" #include "include/ipc.h" #include "include/match.h" #include "include/path.h" #include "include/policy.h" #include "include/policy_ns.h" #include "include/policy_unpack.h" #include "include/resource.h" int unprivileged_userns_apparmor_policy = 1; int aa_unprivileged_unconfined_restricted; const char *const aa_profile_mode_names[] = { "enforce", "complain", "kill", "unconfined", "user", }; static void aa_free_pdb(struct aa_policydb *pdb) { if (pdb) { aa_put_dfa(pdb->dfa); kvfree(pdb->perms); aa_free_str_table(&pdb->trans); kfree(pdb); } } /** * aa_pdb_free_kref - free aa_policydb by kref (called by aa_put_pdb) * @kref: kref callback for freeing of a dfa (NOT NULL) */ void aa_pdb_free_kref(struct kref *kref) { struct aa_policydb *pdb = container_of(kref, struct aa_policydb, count); aa_free_pdb(pdb); } struct aa_policydb *aa_alloc_pdb(gfp_t gfp) { struct aa_policydb *pdb = kzalloc(sizeof(struct aa_policydb), gfp); if (!pdb) return NULL; kref_init(&pdb->count); return pdb; } /** * __add_profile - add a profiles to list and label tree * @list: list to add it to (NOT NULL) * @profile: the profile to add (NOT NULL) * * refcount @profile, should be put by __list_remove_profile * * Requires: namespace lock be held, or list not be shared */ static void __add_profile(struct list_head *list, struct aa_profile *profile) { struct aa_label *l; AA_BUG(!list); AA_BUG(!profile); AA_BUG(!profile->ns); AA_BUG(!mutex_is_locked(&profile->ns->lock)); list_add_rcu(&profile->base.list, list); /* get list reference */ aa_get_profile(profile); l = aa_label_insert(&profile->ns->labels, &profile->label); AA_BUG(l != &profile->label); aa_put_label(l); } /** * __list_remove_profile - remove a profile from the list it is on * @profile: the profile to remove (NOT NULL) * * remove a profile from the list, warning generally removal should * be done with __replace_profile as most profile removals are * replacements to the unconfined profile. * * put @profile list refcount * * Requires: namespace lock be held, or list not have been live */ static void __list_remove_profile(struct aa_profile *profile) { AA_BUG(!profile); AA_BUG(!profile->ns); AA_BUG(!mutex_is_locked(&profile->ns->lock)); list_del_rcu(&profile->base.list); aa_put_profile(profile); } /** * __remove_profile - remove old profile, and children * @profile: profile to be replaced (NOT NULL) * * Requires: namespace list lock be held, or list not be shared */ static void __remove_profile(struct aa_profile *profile) { AA_BUG(!profile); AA_BUG(!profile->ns); AA_BUG(!mutex_is_locked(&profile->ns->lock)); /* release any children lists first */ __aa_profile_list_release(&profile->base.profiles); /* released by free_profile */ aa_label_remove(&profile->label); __aafs_profile_rmdir(profile); __list_remove_profile(profile); } /** * __aa_profile_list_release - remove all profiles on the list and put refs * @head: list of profiles (NOT NULL) * * Requires: namespace lock be held */ void __aa_profile_list_release(struct list_head *head) { struct aa_profile *profile, *tmp; list_for_each_entry_safe(profile, tmp, head, base.list) __remove_profile(profile); } /** * aa_free_data - free a data blob * @ptr: data to free * @arg: unused */ static void aa_free_data(void *ptr, void *arg) { struct aa_data *data = ptr; kvfree_sensitive(data->data, data->size); kfree_sensitive(data->key); kfree_sensitive(data); } static void free_attachment(struct aa_attachment *attach) { int i; for (i = 0; i < attach->xattr_count; i++) kfree_sensitive(attach->xattrs[i]); kfree_sensitive(attach->xattrs); aa_put_pdb(attach->xmatch); } static void free_ruleset(struct aa_ruleset *rules) { int i; aa_put_pdb(rules->file); aa_put_pdb(rules->policy); aa_free_cap_rules(&rules->caps); aa_free_rlimit_rules(&rules->rlimits); for (i = 0; i < rules->secmark_count; i++) kfree_sensitive(rules->secmark[i].label); kfree_sensitive(rules->secmark); kfree_sensitive(rules); } struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp) { struct aa_ruleset *rules; rules = kzalloc(sizeof(*rules), gfp); if (rules) INIT_LIST_HEAD(&rules->list); return rules; } /** * aa_free_profile - free a profile * @profile: the profile to free (MAYBE NULL) * * Free a profile, its hats and null_profile. All references to the profile, * its hats and null_profile must have been put. * * If the profile was referenced from a task context, free_profile() will * be called from an rcu callback routine, so we must not sleep here. */ void aa_free_profile(struct aa_profile *profile) { struct aa_ruleset *rule, *tmp; struct rhashtable *rht; AA_DEBUG("%s(%p)\n", __func__, profile); if (!profile) return; /* free children profiles */ aa_policy_destroy(&profile->base); aa_put_profile(rcu_access_pointer(profile->parent)); aa_put_ns(profile->ns); kfree_sensitive(profile->rename); kfree_sensitive(profile->disconnected); free_attachment(&profile->attach); /* * at this point there are no tasks that can have a reference * to rules */ list_for_each_entry_safe(rule, tmp, &profile->rules, list) { list_del_init(&rule->list); free_ruleset(rule); } kfree_sensitive(profile->dirname); if (profile->data) { rht = profile->data; profile->data = NULL; rhashtable_free_and_destroy(rht, aa_free_data, NULL); kfree_sensitive(rht); } kfree_sensitive(profile->hash); aa_put_loaddata(profile->rawdata); aa_label_destroy(&profile->label); kfree_sensitive(profile); } /** * aa_alloc_profile - allocate, initialize and return a new profile * @hname: name of the profile (NOT NULL) * @proxy: proxy to use OR null if to allocate a new one * @gfp: allocation type * * Returns: refcount profile or NULL on failure */ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy, gfp_t gfp) { struct aa_profile *profile; struct aa_ruleset *rules; /* freed by free_profile - usually through aa_put_profile */ profile = kzalloc(struct_size(profile, label.vec, 2), gfp); if (!profile) return NULL; if (!aa_policy_init(&profile->base, NULL, hname, gfp)) goto fail; if (!aa_label_init(&profile->label, 1, gfp)) goto fail; INIT_LIST_HEAD(&profile->rules); /* allocate the first ruleset, but leave it empty */ rules = aa_alloc_ruleset(gfp); if (!rules) goto fail; list_add(&rules->list, &profile->rules); /* update being set needed by fs interface */ if (!proxy) { proxy = aa_alloc_proxy(&profile->label, gfp); if (!proxy) goto fail; } else aa_get_proxy(proxy); profile->label.proxy = proxy; profile->label.hname = profile->base.hname; profile->label.flags |= FLAG_PROFILE; profile->label.vec[0] = profile; /* refcount released by caller */ return profile; fail: aa_free_profile(profile); return NULL; } /* TODO: profile accounting - setup in remove */ /** * __strn_find_child - find a profile on @head list using substring of @name * @head: list to search (NOT NULL) * @name: name of profile (NOT NULL) * @len: length of @name substring to match * * Requires: rcu_read_lock be held * * Returns: unrefcounted profile ptr, or NULL if not found */ static struct aa_profile *__strn_find_child(struct list_head *head, const char *name, int len) { return (struct aa_profile *)__policy_strn_find(head, name, len); } /** * __find_child - find a profile on @head list with a name matching @name * @head: list to search (NOT NULL) * @name: name of profile (NOT NULL) * * Requires: rcu_read_lock be held * * Returns: unrefcounted profile ptr, or NULL if not found */ static struct aa_profile *__find_child(struct list_head *head, const char *name) { return __strn_find_child(head, name, strlen(name)); } /** * aa_find_child - find a profile by @name in @parent * @parent: profile to search (NOT NULL) * @name: profile name to search for (NOT NULL) * * Returns: a refcounted profile or NULL if not found */ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name) { struct aa_profile *profile; rcu_read_lock(); do { profile = __find_child(&parent->base.profiles, name); } while (profile && !aa_get_profile_not0(profile)); rcu_read_unlock(); /* refcount released by caller */ return profile; } /** * __lookup_parent - lookup the parent of a profile of name @hname * @ns: namespace to lookup profile in (NOT NULL) * @hname: hierarchical profile name to find parent of (NOT NULL) * * Lookups up the parent of a fully qualified profile name, the profile * that matches hname does not need to exist, in general this * is used to load a new profile. * * Requires: rcu_read_lock be held * * Returns: unrefcounted policy or NULL if not found */ static struct aa_policy *__lookup_parent(struct aa_ns *ns, const char *hname) { struct aa_policy *policy; struct aa_profile *profile = NULL; char *split; policy = &ns->base; for (split = strstr(hname, "//"); split;) { profile = __strn_find_child(&policy->profiles, hname, split - hname); if (!profile) return NULL; policy = &profile->base; hname = split + 2; split = strstr(hname, "//"); } if (!profile) return &ns->base; return &profile->base; } /** * __create_missing_ancestors - create place holders for missing ancestores * @ns: namespace to lookup profile in (NOT NULL) * @hname: hierarchical profile name to find parent of (NOT NULL) * @gfp: type of allocation. * * Requires: ns mutex lock held * * Return: unrefcounted parent policy on success or %NULL if error creating * place holder profiles. */ static struct aa_policy *__create_missing_ancestors(struct aa_ns *ns, const char *hname, gfp_t gfp) { struct aa_policy *policy; struct aa_profile *parent, *profile = NULL; char *split; AA_BUG(!ns); AA_BUG(!hname); policy = &ns->base; for (split = strstr(hname, "//"); split;) { parent = profile; profile = __strn_find_child(&policy->profiles, hname, split - hname); if (!profile) { const char *name = kstrndup(hname, split - hname, gfp); if (!name) return NULL; profile = aa_alloc_null(parent, name, gfp); kfree(name); if (!profile) return NULL; if (!parent) profile->ns = aa_get_ns(ns); } policy = &profile->base; hname = split + 2; split = strstr(hname, "//"); } if (!profile) return &ns->base; return &profile->base; } /** * __lookupn_profile - lookup the profile matching @hname * @base: base list to start looking up profile name from (NOT NULL) * @hname: hierarchical profile name (NOT NULL) * @n: length of @hname * * Requires: rcu_read_lock be held * * Returns: unrefcounted profile pointer or NULL if not found * * Do a relative name lookup, recursing through profile tree. */ static struct aa_profile *__lookupn_profile(struct aa_policy *base, const char *hname, size_t n) { struct aa_profile *profile = NULL; const char *split; for (split = strnstr(hname, "//", n); split; split = strnstr(hname, "//", n)) { profile = __strn_find_child(&base->profiles, hname, split - hname); if (!profile) return NULL; base = &profile->base; n -= split + 2 - hname; hname = split + 2; } if (n) return __strn_find_child(&base->profiles, hname, n); return NULL; } static struct aa_profile *__lookup_profile(struct aa_policy *base, const char *hname) { return __lookupn_profile(base, hname, strlen(hname)); } /** * aa_lookupn_profile - find a profile by its full or partial name * @ns: the namespace to start from (NOT NULL) * @hname: name to do lookup on. Does not contain namespace prefix (NOT NULL) * @n: size of @hname * * Returns: refcounted profile or NULL if not found */ struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname, size_t n) { struct aa_profile *profile; rcu_read_lock(); do { profile = __lookupn_profile(&ns->base, hname, n); } while (profile && !aa_get_profile_not0(profile)); rcu_read_unlock(); /* the unconfined profile is not in the regular profile list */ if (!profile && strncmp(hname, "unconfined", n) == 0) profile = aa_get_newest_profile(ns->unconfined); /* refcount released by caller */ return profile; } struct aa_profile *aa_fqlookupn_profile(struct aa_label *base, const char *fqname, size_t n) { struct aa_profile *profile; struct aa_ns *ns; const char *name, *ns_name; size_t ns_len; name = aa_splitn_fqname(fqname, n, &ns_name, &ns_len); if (ns_name) { ns = aa_lookupn_ns(labels_ns(base), ns_name, ns_len); if (!ns) return NULL; } else ns = aa_get_ns(labels_ns(base)); if (name) profile = aa_lookupn_profile(ns, name, n - (name - fqname)); else if (ns) /* default profile for ns, currently unconfined */ profile = aa_get_newest_profile(ns->unconfined); else profile = NULL; aa_put_ns(ns); return profile; } struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name, gfp_t gfp) { struct aa_profile *profile; struct aa_ruleset *rules; profile = aa_alloc_profile(name, NULL, gfp); if (!profile) return NULL; /* TODO: ideally we should inherit abi from parent */ profile->label.flags |= FLAG_NULL; profile->attach.xmatch = aa_get_pdb(nullpdb); rules = list_first_entry(&profile->rules, typeof(*rules), list); rules->file = aa_get_pdb(nullpdb); rules->policy = aa_get_pdb(nullpdb); if (parent) { profile->path_flags = parent->path_flags; /* released on free_profile */ rcu_assign_pointer(profile->parent, aa_get_profile(parent)); profile->ns = aa_get_ns(parent->ns); } return profile; } /** * aa_new_learning_profile - create or find a null-X learning profile * @parent: profile that caused this profile to be created (NOT NULL) * @hat: true if the null- learning profile is a hat * @base: name to base the null profile off of * @gfp: type of allocation * * Find/Create a null- complain mode profile used in learning mode. The * name of the profile is unique and follows the format of parent//null-XXX. * where XXX is based on the @name or if that fails or is not supplied * a unique number * * null profiles are added to the profile list but the list does not * hold a count on them so that they are automatically released when * not in use. * * Returns: new refcounted profile else NULL on failure */ struct aa_profile *aa_new_learning_profile(struct aa_profile *parent, bool hat, const char *base, gfp_t gfp) { struct aa_profile *p, *profile; const char *bname; char *name = NULL; AA_BUG(!parent); if (base) { name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base), gfp); if (name) { sprintf(name, "%s//null-%s", parent->base.hname, base); goto name; } /* fall through to try shorter uniq */ } name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp); if (!name) return NULL; sprintf(name, "%s//null-%x", parent->base.hname, atomic_inc_return(&parent->ns->uniq_null)); name: /* lookup to see if this is a dup creation */ bname = basename(name); profile = aa_find_child(parent, bname); if (profile) goto out; profile = aa_alloc_null(parent, name, gfp); if (!profile) goto fail; profile->mode = APPARMOR_COMPLAIN; if (hat) profile->label.flags |= FLAG_HAT; mutex_lock_nested(&profile->ns->lock, profile->ns->level); p = __find_child(&parent->base.profiles, bname); if (p) { aa_free_profile(profile); profile = aa_get_profile(p); } else { __add_profile(&parent->base.profiles, profile); } mutex_unlock(&profile->ns->lock); /* refcount released by caller */ out: kfree(name); return profile; fail: kfree(name); aa_free_profile(profile); return NULL; } /** * replacement_allowed - test to see if replacement is allowed * @profile: profile to test if it can be replaced (MAYBE NULL) * @noreplace: true if replacement shouldn't be allowed but addition is okay * @info: Returns - info about why replacement failed (NOT NULL) * * Returns: %0 if replacement allowed else error code */ static int replacement_allowed(struct aa_profile *profile, int noreplace, const char **info) { if (profile) { if (profile->label.flags & FLAG_IMMUTIBLE) { *info = "cannot replace immutable profile"; return -EPERM; } else if (noreplace) { *info = "profile already exists"; return -EEXIST; } } return 0; } /* audit callback for net specific fields */ static void audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; struct apparmor_audit_data *ad = aad(sa); if (ad->iface.ns) { audit_log_format(ab, " ns="); audit_log_untrustedstring(ab, ad->iface.ns); } } /** * audit_policy - Do auditing of policy changes * @subj_label: label to check if it can manage policy * @op: policy operation being performed * @ns_name: name of namespace being manipulated * @name: name of profile being manipulated (NOT NULL) * @info: any extra information to be audited (MAYBE NULL) * @error: error code * * Returns: the error to be returned after audit is done */ static int audit_policy(struct aa_label *subj_label, const char *op, const char *ns_name, const char *name, const char *info, int error) { DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op); ad.iface.ns = ns_name; ad.name = name; ad.info = info; ad.error = error; ad.subj_label = subj_label; aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, audit_cb); return error; } /* don't call out to other LSMs in the stack for apparmor policy admin * permissions */ static int policy_ns_capable(const struct cred *subj_cred, struct aa_label *label, struct user_namespace *userns, int cap) { int err; /* check for MAC_ADMIN cap in cred */ err = cap_capable(subj_cred, userns, cap, CAP_OPT_NONE); if (!err) err = aa_capable(subj_cred, label, cap, CAP_OPT_NONE); return err; } /** * aa_policy_view_capable - check if viewing policy in at @ns is allowed * @subj_cred: cred of subject * @label: label that is trying to view policy in ns * @ns: namespace being viewed by @label (may be NULL if @label's ns) * * Returns: true if viewing policy is allowed * * If @ns is NULL then the namespace being viewed is assumed to be the * tasks current namespace. */ bool aa_policy_view_capable(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns) { struct user_namespace *user_ns = subj_cred->user_ns; struct aa_ns *view_ns = labels_view(label); bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) || in_egroup_p(make_kgid(user_ns, 0)); bool response = false; if (!ns) ns = view_ns; if (root_in_user_ns && aa_ns_visible(view_ns, ns, true) && (user_ns == &init_user_ns || (unprivileged_userns_apparmor_policy != 0 && user_ns->level == view_ns->level))) response = true; return response; } bool aa_policy_admin_capable(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns) { struct user_namespace *user_ns = subj_cred->user_ns; bool capable = policy_ns_capable(subj_cred, label, user_ns, CAP_MAC_ADMIN) == 0; AA_DEBUG("cap_mac_admin? %d\n", capable); AA_DEBUG("policy locked? %d\n", aa_g_lock_policy); return aa_policy_view_capable(subj_cred, label, ns) && capable && !aa_g_lock_policy; } bool aa_current_policy_view_capable(struct aa_ns *ns) { struct aa_label *label; bool res; label = __begin_current_label_crit_section(); res = aa_policy_view_capable(current_cred(), label, ns); __end_current_label_crit_section(label); return res; } bool aa_current_policy_admin_capable(struct aa_ns *ns) { struct aa_label *label; bool res; label = __begin_current_label_crit_section(); res = aa_policy_admin_capable(current_cred(), label, ns); __end_current_label_crit_section(label); return res; } /** * aa_may_manage_policy - can the current task manage policy * @subj_cred: subjects cred * @label: label to check if it can manage policy * @ns: namespace being managed by @label (may be NULL if @label's ns) * @mask: contains the policy manipulation operation being done * * Returns: 0 if the task is allowed to manipulate policy else error */ int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns, u32 mask) { const char *op; if (mask & AA_MAY_REMOVE_POLICY) op = OP_PROF_RM; else if (mask & AA_MAY_REPLACE_POLICY) op = OP_PROF_REPL; else op = OP_PROF_LOAD; /* check if loading policy is locked out */ if (aa_g_lock_policy) return audit_policy(label, op, NULL, NULL, "policy_locked", -EACCES); if (!aa_policy_admin_capable(subj_cred, label, ns)) return audit_policy(label, op, NULL, NULL, "not policy admin", -EACCES); /* TODO: add fine grained mediation of policy loads */ return 0; } static struct aa_profile *__list_lookup_parent(struct list_head *lh, struct aa_profile *profile) { const char *base = basename(profile->base.hname); long len = base - profile->base.hname; struct aa_load_ent *ent; /* parent won't have trailing // so remove from len */ if (len <= 2) return NULL; len -= 2; list_for_each_entry(ent, lh, list) { if (ent->new == profile) continue; if (strncmp(ent->new->base.hname, profile->base.hname, len) == 0 && ent->new->base.hname[len] == 0) return ent->new; } return NULL; } /** * __replace_profile - replace @old with @new on a list * @old: profile to be replaced (NOT NULL) * @new: profile to replace @old with (NOT NULL) * * Will duplicate and refcount elements that @new inherits from @old * and will inherit @old children. * * refcount @new for list, put @old list refcount * * Requires: namespace list lock be held, or list not be shared */ static void __replace_profile(struct aa_profile *old, struct aa_profile *new) { struct aa_profile *child, *tmp; if (!list_empty(&old->base.profiles)) { LIST_HEAD(lh); list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu); list_for_each_entry_safe(child, tmp, &lh, base.list) { struct aa_profile *p; list_del_init(&child->base.list); p = __find_child(&new->base.profiles, child->base.name); if (p) { /* @p replaces @child */ __replace_profile(child, p); continue; } /* inherit @child and its children */ /* TODO: update hname of inherited children */ /* list refcount transferred to @new */ p = aa_deref_parent(child); rcu_assign_pointer(child->parent, aa_get_profile(new)); list_add_rcu(&child->base.list, &new->base.profiles); aa_put_profile(p); } } if (!rcu_access_pointer(new->parent)) { struct aa_profile *parent = aa_deref_parent(old); rcu_assign_pointer(new->parent, aa_get_profile(parent)); } aa_label_replace(&old->label, &new->label); /* migrate dents must come after label replacement b/c update */ __aafs_profile_migrate_dents(old, new); if (list_empty(&new->base.list)) { /* new is not on a list already */ list_replace_rcu(&old->base.list, &new->base.list); aa_get_profile(new); aa_put_profile(old); } else __list_remove_profile(old); } /** * __lookup_replace - lookup replacement information for a profile * @ns: namespace the lookup occurs in * @hname: name of profile to lookup * @noreplace: true if not replacing an existing profile * @p: Returns - profile to be replaced * @info: Returns - info string on why lookup failed * * Returns: profile to replace (no ref) on success else ptr error */ static int __lookup_replace(struct aa_ns *ns, const char *hname, bool noreplace, struct aa_profile **p, const char **info) { *p = aa_get_profile(__lookup_profile(&ns->base, hname)); if (*p) { int error = replacement_allowed(*p, noreplace, info); if (error) { *info = "profile can not be replaced"; return error; } } return 0; } static void share_name(struct aa_profile *old, struct aa_profile *new) { aa_put_str(new->base.hname); aa_get_str(old->base.hname); new->base.hname = old->base.hname; new->base.name = old->base.name; new->label.hname = old->label.hname; } /* Update to newest version of parent after previous replacements * Returns: unrefcount newest version of parent */ static struct aa_profile *update_to_newest_parent(struct aa_profile *new) { struct aa_profile *parent, *newest; parent = rcu_dereference_protected(new->parent, mutex_is_locked(&new->ns->lock)); newest = aa_get_newest_profile(parent); /* parent replaced in this atomic set? */ if (newest != parent) { aa_put_profile(parent); rcu_assign_pointer(new->parent, newest); } else aa_put_profile(newest); return newest; } /** * aa_replace_profiles - replace profile(s) on the profile list * @policy_ns: namespace load is occurring on * @label: label that is attempting to load/replace policy * @mask: permission mask * @udata: serialized data stream (NOT NULL) * * unpack and replace a profile on the profile list and uses of that profile * by any task creds via invalidating the old version of the profile, which * tasks will notice to update their own cred. If the profile does not exist * on the profile list it is added. * * Returns: size of data consumed else error code on failure. */ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label, u32 mask, struct aa_loaddata *udata) { const char *ns_name = NULL, *info = NULL; struct aa_ns *ns = NULL; struct aa_load_ent *ent, *tmp; struct aa_loaddata *rawdata_ent; const char *op; ssize_t count, error; LIST_HEAD(lh); op = mask & AA_MAY_REPLACE_POLICY ? OP_PROF_REPL : OP_PROF_LOAD; aa_get_loaddata(udata); /* released below */ error = aa_unpack(udata, &lh, &ns_name); if (error) goto out; /* ensure that profiles are all for the same ns * TODO: update locking to remove this constaint. All profiles in * the load set must succeed as a set or the load will * fail. Sort ent list and take ns locks in hierarchy order */ count = 0; list_for_each_entry(ent, &lh, list) { if (ns_name) { if (ent->ns_name && strcmp(ent->ns_name, ns_name) != 0) { info = "policy load has mixed namespaces"; error = -EACCES; goto fail; } } else if (ent->ns_name) { if (count) { info = "policy load has mixed namespaces"; error = -EACCES; goto fail; } ns_name = ent->ns_name; } else count++; } if (ns_name) { ns = aa_prepare_ns(policy_ns ? policy_ns : labels_ns(label), ns_name); if (IS_ERR(ns)) { op = OP_PROF_LOAD; info = "failed to prepare namespace"; error = PTR_ERR(ns); ns = NULL; ent = NULL; goto fail; } } else ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label)); mutex_lock_nested(&ns->lock, ns->level); /* check for duplicate rawdata blobs: space and file dedup */ if (!list_empty(&ns->rawdata_list)) { list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) { if (aa_rawdata_eq(rawdata_ent, udata)) { struct aa_loaddata *tmp; tmp = __aa_get_loaddata(rawdata_ent); /* check we didn't fail the race */ if (tmp) { aa_put_loaddata(udata); udata = tmp; break; } } } } /* setup parent and ns info */ list_for_each_entry(ent, &lh, list) { struct aa_policy *policy; struct aa_profile *p; if (aa_g_export_binary) ent->new->rawdata = aa_get_loaddata(udata); error = __lookup_replace(ns, ent->new->base.hname, !(mask & AA_MAY_REPLACE_POLICY), &ent->old, &info); if (error) goto fail_lock; if (ent->new->rename) { error = __lookup_replace(ns, ent->new->rename, !(mask & AA_MAY_REPLACE_POLICY), &ent->rename, &info); if (error) goto fail_lock; } /* released when @new is freed */ ent->new->ns = aa_get_ns(ns); if (ent->old || ent->rename) continue; /* no ref on policy only use inside lock */ p = NULL; policy = __lookup_parent(ns, ent->new->base.hname); if (!policy) { /* first check for parent in the load set */ p = __list_lookup_parent(&lh, ent->new); if (!p) { /* * fill in missing parent with null * profile that doesn't have * permissions. This allows for * individual profile loading where * the child is loaded before the * parent, and outside of the current * atomic set. This unfortunately can * happen with some userspaces. The * null profile will be replaced once * the parent is loaded. */ policy = __create_missing_ancestors(ns, ent->new->base.hname, GFP_KERNEL); if (!policy) { error = -ENOENT; info = "parent does not exist"; goto fail_lock; } } } if (!p && policy != &ns->base) /* released on profile replacement or free_profile */ p = (struct aa_profile *) policy; rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); } /* create new fs entries for introspection if needed */ if (!udata->dents[AAFS_LOADDATA_DIR] && aa_g_export_binary) { error = __aa_fs_create_rawdata(ns, udata); if (error) { info = "failed to create raw_data dir and files"; ent = NULL; goto fail_lock; } } list_for_each_entry(ent, &lh, list) { if (!ent->old) { struct dentry *parent; if (rcu_access_pointer(ent->new->parent)) { struct aa_profile *p; p = aa_deref_parent(ent->new); parent = prof_child_dir(p); } else parent = ns_subprofs_dir(ent->new->ns); error = __aafs_profile_mkdir(ent->new, parent); } if (error) { info = "failed to create"; goto fail_lock; } } /* Done with checks that may fail - do actual replacement */ __aa_bump_ns_revision(ns); if (aa_g_export_binary) __aa_loaddata_update(udata, ns->revision); list_for_each_entry_safe(ent, tmp, &lh, list) { list_del_init(&ent->list); op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL; if (ent->old && ent->old->rawdata == ent->new->rawdata && ent->new->rawdata) { /* dedup actual profile replacement */ audit_policy(label, op, ns_name, ent->new->base.hname, "same as current profile, skipping", error); /* break refcount cycle with proxy. */ aa_put_proxy(ent->new->label.proxy); ent->new->label.proxy = NULL; goto skip; } /* * TODO: finer dedup based on profile range in data. Load set * can differ but profile may remain unchanged */ audit_policy(label, op, ns_name, ent->new->base.hname, NULL, error); if (ent->old) { share_name(ent->old, ent->new); __replace_profile(ent->old, ent->new); } else { struct list_head *lh; if (rcu_access_pointer(ent->new->parent)) { struct aa_profile *parent; parent = update_to_newest_parent(ent->new); lh = &parent->base.profiles; } else lh = &ns->base.profiles; __add_profile(lh, ent->new); } skip: aa_load_ent_free(ent); } __aa_labelset_update_subtree(ns); mutex_unlock(&ns->lock); out: aa_put_ns(ns); aa_put_loaddata(udata); kfree(ns_name); if (error) return error; return udata->size; fail_lock: mutex_unlock(&ns->lock); /* audit cause of failure */ op = (ent && !ent->old) ? OP_PROF_LOAD : OP_PROF_REPL; fail: audit_policy(label, op, ns_name, ent ? ent->new->base.hname : NULL, info, error); /* audit status that rest of profiles in the atomic set failed too */ info = "valid profile in failed atomic policy load"; list_for_each_entry(tmp, &lh, list) { if (tmp == ent) { info = "unchecked profile in failed atomic policy load"; /* skip entry that caused failure */ continue; } op = (!tmp->old) ? OP_PROF_LOAD : OP_PROF_REPL; audit_policy(label, op, ns_name, tmp->new->base.hname, info, error); } list_for_each_entry_safe(ent, tmp, &lh, list) { list_del_init(&ent->list); aa_load_ent_free(ent); } goto out; } /** * aa_remove_profiles - remove profile(s) from the system * @policy_ns: namespace the remove is being done from * @subj: label attempting to remove policy * @fqname: name of the profile or namespace to remove (NOT NULL) * @size: size of the name * * Remove a profile or sub namespace from the current namespace, so that * they can not be found anymore and mark them as replaced by unconfined * * NOTE: removing confinement does not restore rlimits to preconfinement values * * Returns: size of data consume else error code if fails */ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, char *fqname, size_t size) { struct aa_ns *ns = NULL; struct aa_profile *profile = NULL; const char *name = fqname, *info = NULL; const char *ns_name = NULL; ssize_t error = 0; if (*fqname == 0) { info = "no profile specified"; error = -ENOENT; goto fail; } if (fqname[0] == ':') { size_t ns_len; name = aa_splitn_fqname(fqname, size, &ns_name, &ns_len); /* released below */ ns = aa_lookupn_ns(policy_ns ? policy_ns : labels_ns(subj), ns_name, ns_len); if (!ns) { info = "namespace does not exist"; error = -ENOENT; goto fail; } } else /* released below */ ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(subj)); if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ mutex_lock_nested(&ns->parent->lock, ns->parent->level); __aa_bump_ns_revision(ns); __aa_remove_ns(ns); mutex_unlock(&ns->parent->lock); } else { /* remove profile */ mutex_lock_nested(&ns->lock, ns->level); profile = aa_get_profile(__lookup_profile(&ns->base, name)); if (!profile) { error = -ENOENT; info = "profile does not exist"; goto fail_ns_lock; } name = profile->base.hname; __aa_bump_ns_revision(ns); __remove_profile(profile); __aa_labelset_update_subtree(ns); mutex_unlock(&ns->lock); } /* don't fail removal if audit fails */ (void) audit_policy(subj, OP_PROF_RM, ns_name, name, info, error); aa_put_ns(ns); aa_put_profile(profile); return size; fail_ns_lock: mutex_unlock(&ns->lock); aa_put_ns(ns); fail: (void) audit_policy(subj, OP_PROF_RM, ns_name, name, info, error); return error; } |
| 91 238 236 237 235 236 237 2 2 2 2 2 238 238 238 235 3 3 1 1 238 3 3 3 3 3 3 2 2 2 2 3 1 3 141 97 97 261 40 40 39 35 40 8 9 9 9 8 2 1 3 3 1 141 139 141 141 19 4 141 141 141 40 33 42 140 34 33 33 34 1 32 32 32 1 31 2 2 2 2 2 2 2 2 23 24 24 7 10 18 44 5 5 2 2 2 2 2 1 2 1 1 1 1 1 1 30 30 30 22 30 31 23 23 23 22 347 7 341 18 68 34 70 67 64 70 141 141 124 115 123 243 244 242 238 238 238 238 235 212 215 141 83 13 13 92 125 125 91 179 183 15 15 15 15 4 15 15 15 31 31 31 30 16 15 16 16 15 14 2 14 14 14 14 4 14 14 14 2 15 15 16 13 10 10 10 4 4 6 5 6 2 2 2 2 2 2 1 2 4 3 4 1 1 1 1 11 5 10 1 10 10 10 11 4 4 11 12 11 13 13 2 2 1 12 4 11 14 2 1 10 11 11 1 11 12 9 11 12 11 4 4 12 11 12 11 12 12 12 12 12 12 10 10 10 10 11 10 9 11 9 9 9 3 3 11 10 10 12 11 10 11 11 10 12 12 16 13 15 30 30 7 7 30 28 3 30 9 9 24 24 1 1 1 1 1 40 37 37 31 29 27 26 27 3 3 3 3 38 2 15 16 16 13 12 12 12 12 12 12 3 2 3 1 1 1 1 1 1 1 1 7 6 3 7 2 2 2 2 2 2 2 2 2 1 1 1 1 1 4 3 4 1 1 1 1 1 1 4 4 4 2 1 1 1 1 1 14 13 13 12 12 11 9 8 9 1 1 2 1 2 2 1 1 2 2 1 1 1 3 12 14 18 18 18 18 18 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module for IP set management */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/rculist.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/ipset/ip_set.h> static LIST_HEAD(ip_set_type_list); /* all registered set types */ static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */ struct ip_set_net { struct ip_set * __rcu *ip_set_list; /* all individual sets */ ip_set_id_t ip_set_max; /* max number of sets */ bool is_deleted; /* deleted by ip_set_net_exit */ bool is_destroyed; /* all sets are destroyed */ }; static unsigned int ip_set_net_id __read_mostly; static struct ip_set_net *ip_set_pernet(struct net *net) { return net_generic(net, ip_set_net_id); } #define IP_SET_INC 64 #define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0) static unsigned int max_sets; module_param(max_sets, int, 0600); MODULE_PARM_DESC(max_sets, "maximal number of sets"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); MODULE_DESCRIPTION("core IP set support"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET); /* When the nfnl mutex or ip_set_ref_lock is held: */ #define ip_set_dereference(inst) \ rcu_dereference_protected((inst)->ip_set_list, \ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \ lockdep_is_held(&ip_set_ref_lock) || \ (inst)->is_deleted) #define ip_set(inst, id) \ ip_set_dereference(inst)[id] #define ip_set_ref_netlink(inst,id) \ rcu_dereference_raw((inst)->ip_set_list)[id] #define ip_set_dereference_nfnl(p) \ rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET)) /* The set types are implemented in modules and registered set types * can be found in ip_set_type_list. Adding/deleting types is * serialized by ip_set_type_mutex. */ static void ip_set_type_lock(void) { mutex_lock(&ip_set_type_mutex); } static void ip_set_type_unlock(void) { mutex_unlock(&ip_set_type_mutex); } /* Register and deregister settype */ static struct ip_set_type * find_set_type(const char *name, u8 family, u8 revision) { struct ip_set_type *type; list_for_each_entry_rcu(type, &ip_set_type_list, list, lockdep_is_held(&ip_set_type_mutex)) if (STRNCMP(type->name, name) && (type->family == family || type->family == NFPROTO_UNSPEC) && revision >= type->revision_min && revision <= type->revision_max) return type; return NULL; } /* Unlock, try to load a set type module and lock again */ static bool load_settype(const char *name) { if (!try_module_get(THIS_MODULE)) return false; nfnl_unlock(NFNL_SUBSYS_IPSET); pr_debug("try to load ip_set_%s\n", name); if (request_module("ip_set_%s", name) < 0) { pr_warn("Can't find ip_set type %s\n", name); nfnl_lock(NFNL_SUBSYS_IPSET); module_put(THIS_MODULE); return false; } nfnl_lock(NFNL_SUBSYS_IPSET); module_put(THIS_MODULE); return true; } /* Find a set type and reference it */ #define find_set_type_get(name, family, revision, found) \ __find_set_type_get(name, family, revision, found, false) static int __find_set_type_get(const char *name, u8 family, u8 revision, struct ip_set_type **found, bool retry) { struct ip_set_type *type; int err; if (retry && !load_settype(name)) return -IPSET_ERR_FIND_TYPE; rcu_read_lock(); *found = find_set_type(name, family, revision); if (*found) { err = !try_module_get((*found)->me) ? -EFAULT : 0; goto unlock; } /* Make sure the type is already loaded * but we don't support the revision */ list_for_each_entry_rcu(type, &ip_set_type_list, list) if (STRNCMP(type->name, name)) { err = -IPSET_ERR_FIND_TYPE; goto unlock; } rcu_read_unlock(); return retry ? -IPSET_ERR_FIND_TYPE : __find_set_type_get(name, family, revision, found, true); unlock: rcu_read_unlock(); return err; } /* Find a given set type by name and family. * If we succeeded, the supported minimal and maximum revisions are * filled out. */ #define find_set_type_minmax(name, family, min, max) \ __find_set_type_minmax(name, family, min, max, false) static int __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max, bool retry) { struct ip_set_type *type; bool found = false; if (retry && !load_settype(name)) return -IPSET_ERR_FIND_TYPE; *min = 255; *max = 0; rcu_read_lock(); list_for_each_entry_rcu(type, &ip_set_type_list, list) if (STRNCMP(type->name, name) && (type->family == family || type->family == NFPROTO_UNSPEC)) { found = true; if (type->revision_min < *min) *min = type->revision_min; if (type->revision_max > *max) *max = type->revision_max; } rcu_read_unlock(); if (found) return 0; return retry ? -IPSET_ERR_FIND_TYPE : __find_set_type_minmax(name, family, min, max, true); } #define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \ (f) == NFPROTO_IPV6 ? "inet6" : "any") /* Register a set type structure. The type is identified by * the unique triple of name, family and revision. */ int ip_set_type_register(struct ip_set_type *type) { int ret = 0; if (type->protocol != IPSET_PROTOCOL) { pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n", type->name, family_name(type->family), type->revision_min, type->revision_max, type->protocol, IPSET_PROTOCOL); return -EINVAL; } ip_set_type_lock(); if (find_set_type(type->name, type->family, type->revision_min)) { /* Duplicate! */ pr_warn("ip_set type %s, family %s with revision min %u already registered!\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); return -EINVAL; } list_add_rcu(&type->list, &ip_set_type_list); pr_debug("type %s, family %s, revision %u:%u registered.\n", type->name, family_name(type->family), type->revision_min, type->revision_max); ip_set_type_unlock(); return ret; } EXPORT_SYMBOL_GPL(ip_set_type_register); /* Unregister a set type. There's a small race with ip_set_create */ void ip_set_type_unregister(struct ip_set_type *type) { ip_set_type_lock(); if (!find_set_type(type->name, type->family, type->revision_min)) { pr_warn("ip_set type %s, family %s with revision min %u not registered\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); return; } list_del_rcu(&type->list); pr_debug("type %s, family %s with revision min %u unregistered.\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); synchronize_rcu(); } EXPORT_SYMBOL_GPL(ip_set_type_unregister); /* Utility functions */ void * ip_set_alloc(size_t size) { return kvzalloc(size, GFP_KERNEL_ACCOUNT); } EXPORT_SYMBOL_GPL(ip_set_alloc); void ip_set_free(void *members) { pr_debug("%p: free with %s\n", members, is_vmalloc_addr(members) ? "vfree" : "kfree"); kvfree(members); } EXPORT_SYMBOL_GPL(ip_set_free); static bool flag_nested(const struct nlattr *nla) { return nla->nla_type & NLA_F_NESTED; } static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = { [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 }, [IPSET_ATTR_IPADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), }; int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr) { struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) return -IPSET_ERR_PROTOCOL; *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]); return 0; } EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4); int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr) { struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) return -IPSET_ERR_PROTOCOL; memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]), sizeof(struct in6_addr)); return 0; } EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); static u32 ip_set_timeout_get(const unsigned long *timeout) { u32 t; if (*timeout == IPSET_ELEM_PERMANENT) return 0; t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC; /* Zero value in userspace means no timeout */ return t == 0 ? 1 : t; } static char * ip_set_comment_uget(struct nlattr *tb) { return nla_data(tb); } /* Called from uadd only, protected by the set spinlock. * The kadt functions don't use the comment extensions in any way. */ void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, const struct ip_set_ext *ext) { struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); size_t len = ext->comment ? strlen(ext->comment) : 0; if (unlikely(c)) { set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } if (!len) return; if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) len = IPSET_MAX_COMMENT_SIZE; c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); if (unlikely(!c)) return; strscpy(c->str, ext->comment, len + 1); set->ext_size += sizeof(*c) + strlen(c->str) + 1; rcu_assign_pointer(comment->c, c); } EXPORT_SYMBOL_GPL(ip_set_init_comment); /* Used only when dumping a set, protected by rcu_read_lock() */ static int ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) { struct ip_set_comment_rcu *c = rcu_dereference(comment->c); if (!c) return 0; return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); } /* Called from uadd/udel, flush or the garbage collectors protected * by the set spinlock. * Called when the set is destroyed and when there can't be any user * of the set data anymore. */ static void ip_set_comment_free(struct ip_set *set, void *ptr) { struct ip_set_comment *comment = ptr; struct ip_set_comment_rcu *c; c = rcu_dereference_protected(comment->c, 1); if (unlikely(!c)) return; set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } typedef void (*destroyer)(struct ip_set *, void *); /* ipset data extension types, in size order */ const struct ip_set_ext_type ip_set_extensions[] = { [IPSET_EXT_ID_COUNTER] = { .type = IPSET_EXT_COUNTER, .flag = IPSET_FLAG_WITH_COUNTERS, .len = sizeof(struct ip_set_counter), .align = __alignof__(struct ip_set_counter), }, [IPSET_EXT_ID_TIMEOUT] = { .type = IPSET_EXT_TIMEOUT, .len = sizeof(unsigned long), .align = __alignof__(unsigned long), }, [IPSET_EXT_ID_SKBINFO] = { .type = IPSET_EXT_SKBINFO, .flag = IPSET_FLAG_WITH_SKBINFO, .len = sizeof(struct ip_set_skbinfo), .align = __alignof__(struct ip_set_skbinfo), }, [IPSET_EXT_ID_COMMENT] = { .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY, .flag = IPSET_FLAG_WITH_COMMENT, .len = sizeof(struct ip_set_comment), .align = __alignof__(struct ip_set_comment), .destroy = ip_set_comment_free, }, }; EXPORT_SYMBOL_GPL(ip_set_extensions); static bool add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[]) { return ip_set_extensions[id].flag ? (flags & ip_set_extensions[id].flag) : !!tb[IPSET_ATTR_TIMEOUT]; } size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, size_t align) { enum ip_set_ext_id id; u32 cadt_flags = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) set->flags |= IPSET_CREATE_FLAG_FORCEADD; if (!align) align = 1; for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; if (align < ip_set_extensions[id].align) align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; len += ip_set_extensions[id].len; } return ALIGN(len, align); } EXPORT_SYMBOL_GPL(ip_set_elem_len); int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext) { u64 fullmark; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_TIMEOUT]) { if (!SET_WITH_TIMEOUT(set)) return -IPSET_ERR_TIMEOUT; ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) { if (!SET_WITH_COUNTER(set)) return -IPSET_ERR_COUNTER; if (tb[IPSET_ATTR_BYTES]) ext->bytes = be64_to_cpu(nla_get_be64( tb[IPSET_ATTR_BYTES])); if (tb[IPSET_ATTR_PACKETS]) ext->packets = be64_to_cpu(nla_get_be64( tb[IPSET_ATTR_PACKETS])); } if (tb[IPSET_ATTR_COMMENT]) { if (!SET_WITH_COMMENT(set)) return -IPSET_ERR_COMMENT; ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]); } if (tb[IPSET_ATTR_SKBMARK]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK])); ext->skbinfo.skbmark = fullmark >> 32; ext->skbinfo.skbmarkmask = fullmark & 0xffffffff; } if (tb[IPSET_ATTR_SKBPRIO]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; ext->skbinfo.skbprio = be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO])); } if (tb[IPSET_ATTR_SKBQUEUE]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; ext->skbinfo.skbqueue = be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE])); } return 0; } EXPORT_SYMBOL_GPL(ip_set_get_extensions); static u64 ip_set_get_bytes(const struct ip_set_counter *counter) { return (u64)atomic64_read(&(counter)->bytes); } static u64 ip_set_get_packets(const struct ip_set_counter *counter) { return (u64)atomic64_read(&(counter)->packets); } static bool ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) { return nla_put_net64(skb, IPSET_ATTR_BYTES, cpu_to_be64(ip_set_get_bytes(counter)), IPSET_ATTR_PAD) || nla_put_net64(skb, IPSET_ATTR_PACKETS, cpu_to_be64(ip_set_get_packets(counter)), IPSET_ATTR_PAD); } static bool ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) { /* Send nonzero parameters only */ return ((skbinfo->skbmark || skbinfo->skbmarkmask) && nla_put_net64(skb, IPSET_ATTR_SKBMARK, cpu_to_be64((u64)skbinfo->skbmark << 32 | skbinfo->skbmarkmask), IPSET_ATTR_PAD)) || (skbinfo->skbprio && nla_put_net32(skb, IPSET_ATTR_SKBPRIO, cpu_to_be32(skbinfo->skbprio))) || (skbinfo->skbqueue && nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, cpu_to_be16(skbinfo->skbqueue))); } int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, const void *e, bool active) { if (SET_WITH_TIMEOUT(set)) { unsigned long *timeout = ext_timeout(e, set); if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(active ? ip_set_timeout_get(timeout) : *timeout))) return -EMSGSIZE; } if (SET_WITH_COUNTER(set) && ip_set_put_counter(skb, ext_counter(e, set))) return -EMSGSIZE; if (SET_WITH_COMMENT(set) && ip_set_put_comment(skb, ext_comment(e, set))) return -EMSGSIZE; if (SET_WITH_SKBINFO(set) && ip_set_put_skbinfo(skb, ext_skbinfo(e, set))) return -EMSGSIZE; return 0; } EXPORT_SYMBOL_GPL(ip_set_put_extensions); static bool ip_set_match_counter(u64 counter, u64 match, u8 op) { switch (op) { case IPSET_COUNTER_NONE: return true; case IPSET_COUNTER_EQ: return counter == match; case IPSET_COUNTER_NE: return counter != match; case IPSET_COUNTER_LT: return counter < match; case IPSET_COUNTER_GT: return counter > match; } return false; } static void ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) { atomic64_add((long long)bytes, &(counter)->bytes); } static void ip_set_add_packets(u64 packets, struct ip_set_counter *counter) { atomic64_add((long long)packets, &(counter)->packets); } static void ip_set_update_counter(struct ip_set_counter *counter, const struct ip_set_ext *ext, u32 flags) { if (ext->packets != ULLONG_MAX && !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { ip_set_add_bytes(ext->bytes, counter); ip_set_add_packets(ext->packets, counter); } } static void ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { mext->skbinfo = *skbinfo; } bool ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags, void *data) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(data, set))) return false; if (SET_WITH_COUNTER(set)) { struct ip_set_counter *counter = ext_counter(data, set); ip_set_update_counter(counter, ext, flags); if (flags & IPSET_FLAG_MATCH_COUNTERS && !(ip_set_match_counter(ip_set_get_packets(counter), mext->packets, mext->packets_op) && ip_set_match_counter(ip_set_get_bytes(counter), mext->bytes, mext->bytes_op))) return false; } if (SET_WITH_SKBINFO(set)) ip_set_get_skbinfo(ext_skbinfo(data, set), ext, mext, flags); return true; } EXPORT_SYMBOL_GPL(ip_set_match_extensions); /* Creating/destroying/renaming/swapping affect the existence and * the properties of a set. All of these can be executed from userspace * only and serialized by the nfnl mutex indirectly from nfnetlink. * * Sets are identified by their index in ip_set_list and the index * is used by the external references (set/SET netfilter modules). * * The set behind an index may change by swapping only, from userspace. */ static void __ip_set_get(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); set->ref++; write_unlock_bh(&ip_set_ref_lock); } static void __ip_set_put(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); BUG_ON(set->ref == 0); set->ref--; write_unlock_bh(&ip_set_ref_lock); } /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need * a separate reference counter */ static void __ip_set_get_netlink(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); set->ref_netlink++; write_unlock_bh(&ip_set_ref_lock); } static void __ip_set_put_netlink(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); BUG_ON(set->ref_netlink == 0); set->ref_netlink--; write_unlock_bh(&ip_set_ref_lock); } /* Add, del and test set entries from kernel. * * The set behind the index must exist and must be referenced * so it can't be destroyed (or changed) under our foot. */ static struct ip_set * ip_set_rcu_get(struct net *net, ip_set_id_t index) { struct ip_set_net *inst = ip_set_pernet(net); /* ip_set_list and the set pointer need to be protected */ return ip_set_dereference_nfnl(inst->ip_set_list)[index]; } static inline void ip_set_lock(struct ip_set *set) { if (!set->variant->region_lock) spin_lock_bh(&set->lock); } static inline void ip_set_unlock(struct ip_set *set) { if (!set->variant->region_lock) spin_unlock_bh(&set->lock); } int ip_set_test(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret = 0; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return 0; ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt); if (ret == -EAGAIN) { /* Type requests element to be completed */ pr_debug("element must be completed, ADD is triggered\n"); ip_set_lock(set); set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_unlock(set); ret = 1; } else { /* --return-nomatch: invert matched element */ if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) && (set->type->features & IPSET_TYPE_NOMATCH) && (ret > 0 || ret == -ENOTEMPTY)) ret = -ret; } /* Convert error codes to nomatch */ return (ret < 0 ? 0 : ret); } EXPORT_SYMBOL_GPL(ip_set_test); int ip_set_add(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_unlock(set); return ret; } EXPORT_SYMBOL_GPL(ip_set_add); int ip_set_del(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret = 0; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt); ip_set_unlock(set); return ret; } EXPORT_SYMBOL_GPL(ip_set_del); /* Find set by name, reference it once. The reference makes sure the * thing pointed to, does not go away under our feet. * */ ip_set_id_t ip_set_get_byname(struct net *net, const char *name, struct ip_set **set) { ip_set_id_t i, index = IPSET_INVALID_ID; struct ip_set *s; struct ip_set_net *inst = ip_set_pernet(net); rcu_read_lock(); for (i = 0; i < inst->ip_set_max; i++) { s = rcu_dereference(inst->ip_set_list)[i]; if (s && STRNCMP(s->name, name)) { __ip_set_get(s); index = i; *set = s; break; } } rcu_read_unlock(); return index; } EXPORT_SYMBOL_GPL(ip_set_get_byname); /* If the given set pointer points to a valid set, decrement * reference count by 1. The caller shall not assume the index * to be valid, after calling this function. * */ static void __ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index) { struct ip_set *set; rcu_read_lock(); set = rcu_dereference(inst->ip_set_list)[index]; if (set) __ip_set_put(set); rcu_read_unlock(); } void ip_set_put_byindex(struct net *net, ip_set_id_t index) { struct ip_set_net *inst = ip_set_pernet(net); __ip_set_put_byindex(inst, index); } EXPORT_SYMBOL_GPL(ip_set_put_byindex); /* Get the name of a set behind a set index. * Set itself is protected by RCU, but its name isn't: to protect against * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the * name. */ void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name) { struct ip_set *set = ip_set_rcu_get(net, index); BUG_ON(!set); read_lock_bh(&ip_set_ref_lock); strscpy_pad(name, set->name, IPSET_MAXNAMELEN); read_unlock_bh(&ip_set_ref_lock); } EXPORT_SYMBOL_GPL(ip_set_name_byindex); /* Routines to call by external subsystems, which do not * call nfnl_lock for us. */ /* Find set by index, reference it once. The reference makes sure the * thing pointed to, does not go away under our feet. * * The nfnl mutex is used in the function. */ ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) { struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); if (index >= inst->ip_set_max) return IPSET_INVALID_ID; nfnl_lock(NFNL_SUBSYS_IPSET); set = ip_set(inst, index); if (set) __ip_set_get(set); else index = IPSET_INVALID_ID; nfnl_unlock(NFNL_SUBSYS_IPSET); return index; } EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex); /* If the given set pointer points to a valid set, decrement * reference count by 1. The caller shall not assume the index * to be valid, after calling this function. * * The nfnl mutex is used in the function. */ void ip_set_nfnl_put(struct net *net, ip_set_id_t index) { struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); nfnl_lock(NFNL_SUBSYS_IPSET); if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */ set = ip_set(inst, index); if (set) __ip_set_put(set); } nfnl_unlock(NFNL_SUBSYS_IPSET); } EXPORT_SYMBOL_GPL(ip_set_nfnl_put); /* Communication protocol with userspace over netlink. * * The commands are serialized by the nfnl mutex. */ static inline u8 protocol(const struct nlattr * const tb[]) { return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]); } static inline bool protocol_failed(const struct nlattr * const tb[]) { return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL; } static inline bool protocol_min_failed(const struct nlattr * const tb[]) { return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN; } static inline u32 flag_exist(const struct nlmsghdr *nlh) { return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST; } static struct nlmsghdr * start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags, enum ipset_cmd cmd) { return nfnl_msg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), flags, NFPROTO_IPV4, NFNETLINK_V0, 0); } /* Create a set */ static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1}, [IPSET_ATTR_REVISION] = { .type = NLA_U8 }, [IPSET_ATTR_FAMILY] = { .type = NLA_U8 }, [IPSET_ATTR_DATA] = { .type = NLA_NESTED }, }; static struct ip_set * find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id) { struct ip_set *set = NULL; ip_set_id_t i; *id = IPSET_INVALID_ID; for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set && STRNCMP(set->name, name)) { *id = i; break; } } return (*id == IPSET_INVALID_ID ? NULL : set); } static inline struct ip_set * find_set(struct ip_set_net *inst, const char *name) { ip_set_id_t id; return find_set_and_id(inst, name, &id); } static int find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index, struct ip_set **set) { struct ip_set *s; ip_set_id_t i; *index = IPSET_INVALID_ID; for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (!s) { if (*index == IPSET_INVALID_ID) *index = i; } else if (STRNCMP(name, s->name)) { /* Name clash */ *set = s; return -EEXIST; } } if (*index == IPSET_INVALID_ID) /* No free slot remained */ return -IPSET_ERR_MAX_SETS; return 0; } static int ip_set_none(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return -EOPNOTSUPP; } static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set, *clash = NULL; ip_set_id_t index = IPSET_INVALID_ID; struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {}; const char *name, *typename; u8 family, revision; u32 flags = flag_exist(info->nlh); int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_TYPENAME] || !attr[IPSET_ATTR_REVISION] || !attr[IPSET_ATTR_FAMILY] || (attr[IPSET_ATTR_DATA] && !flag_nested(attr[IPSET_ATTR_DATA])))) return -IPSET_ERR_PROTOCOL; name = nla_data(attr[IPSET_ATTR_SETNAME]); typename = nla_data(attr[IPSET_ATTR_TYPENAME]); family = nla_get_u8(attr[IPSET_ATTR_FAMILY]); revision = nla_get_u8(attr[IPSET_ATTR_REVISION]); pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n", name, typename, family_name(family), revision); /* First, and without any locks, allocate and initialize * a normal base set structure. */ set = kzalloc(sizeof(*set), GFP_KERNEL); if (!set) return -ENOMEM; spin_lock_init(&set->lock); strscpy(set->name, name, IPSET_MAXNAMELEN); set->family = family; set->revision = revision; /* Next, check that we know the type, and take * a reference on the type, to make sure it stays available * while constructing our new set. * * After referencing the type, we try to create the type * specific part of the set without holding any locks. */ ret = find_set_type_get(typename, family, revision, &set->type); if (ret) goto out; /* Without holding any locks, create private part. */ if (attr[IPSET_ATTR_DATA] && nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], set->type->create_policy, NULL)) { ret = -IPSET_ERR_PROTOCOL; goto put_out; } /* Set create flags depending on the type revision */ set->flags |= set->type->create_flags[revision]; ret = set->type->create(info->net, set, tb, flags); if (ret != 0) goto put_out; /* BTW, ret==0 here. */ /* Here, we have a valid, constructed set and we are protected * by the nfnl mutex. Find the first free index in ip_set_list * and check clashing. */ ret = find_free_id(inst, set->name, &index, &clash); if (ret == -EEXIST) { /* If this is the same set and requested, ignore error */ if ((flags & IPSET_FLAG_EXIST) && STRNCMP(set->type->name, clash->type->name) && set->type->family == clash->type->family && set->type->revision_min == clash->type->revision_min && set->type->revision_max == clash->type->revision_max && set->variant->same_set(set, clash)) ret = 0; goto cleanup; } else if (ret == -IPSET_ERR_MAX_SETS) { struct ip_set **list, **tmp; ip_set_id_t i = inst->ip_set_max + IP_SET_INC; if (i < inst->ip_set_max || i == IPSET_INVALID_ID) /* Wraparound */ goto cleanup; list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL); if (!list) goto cleanup; /* nfnl mutex is held, both lists are valid */ tmp = ip_set_dereference(inst); memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max); rcu_assign_pointer(inst->ip_set_list, list); /* Make sure all current packets have passed through */ synchronize_net(); /* Use new list */ index = inst->ip_set_max; inst->ip_set_max = i; kvfree(tmp); ret = 0; } else if (ret) { goto cleanup; } /* Finally! Add our shiny new set to the list, and be done. */ pr_debug("create: '%s' created with index %u!\n", set->name, index); ip_set(inst, index) = set; return ret; cleanup: set->variant->cancel_gc(set); set->variant->destroy(set); put_out: module_put(set->type->me); out: kfree(set); return ret; } /* Destroy sets */ static const struct nla_policy ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, }; /* In order to return quickly when destroying a single set, it is split * into two stages: * - Cancel garbage collector * - Destroy the set itself via call_rcu() */ static void ip_set_destroy_set_rcu(struct rcu_head *head) { struct ip_set *set = container_of(head, struct ip_set, rcu); set->variant->destroy(set); module_put(set->type->me); kfree(set); } static void _destroy_all_sets(struct ip_set_net *inst) { struct ip_set *set; ip_set_id_t i; bool need_wait = false; /* First cancel gc's: set:list sets are flushed as well */ for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set) { set->variant->cancel_gc(set); if (set->type->features & IPSET_TYPE_NAME) need_wait = true; } } /* Must wait for flush to be really finished */ if (need_wait) rcu_barrier(); for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set) { ip_set(inst, i) = NULL; set->variant->destroy(set); module_put(set->type->me); kfree(set); } } } static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *s; ip_set_id_t i; int ret = 0; if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; /* Commands are serialized and references are * protected by the ip_set_ref_lock. * External systems (i.e. xt_set) must call * ip_set_nfnl_get_* functions, that way we * can safely check references here. * * list:set timer can only decrement the reference * counter, so if it's already zero, we can proceed * without holding the lock. */ if (!attr[IPSET_ATTR_SETNAME]) { read_lock_bh(&ip_set_ref_lock); for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s && (s->ref || s->ref_netlink)) { ret = -IPSET_ERR_BUSY; goto out; } } inst->is_destroyed = true; read_unlock_bh(&ip_set_ref_lock); _destroy_all_sets(inst); /* Modified by ip_set_destroy() only, which is serialized */ inst->is_destroyed = false; } else { u32 flags = flag_exist(info->nlh); u16 features = 0; read_lock_bh(&ip_set_ref_lock); s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &i); if (!s) { if (!(flags & IPSET_FLAG_EXIST)) ret = -ENOENT; goto out; } else if (s->ref || s->ref_netlink) { ret = -IPSET_ERR_BUSY; goto out; } features = s->type->features; ip_set(inst, i) = NULL; read_unlock_bh(&ip_set_ref_lock); /* Must cancel garbage collectors */ s->variant->cancel_gc(s); if (features & IPSET_TYPE_NAME) { /* Must wait for flush to be really finished */ rcu_barrier(); } call_rcu(&s->rcu, ip_set_destroy_set_rcu); } return 0; out: read_unlock_bh(&ip_set_ref_lock); return ret; } /* Flush sets */ static void ip_set_flush_set(struct ip_set *set) { pr_debug("set: %s\n", set->name); ip_set_lock(set); set->variant->flush(set); ip_set_unlock(set); } static int ip_set_flush(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *s; ip_set_id_t i; if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; if (!attr[IPSET_ATTR_SETNAME]) { for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s) ip_set_flush_set(s); } } else { s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!s) return -ENOENT; ip_set_flush_set(s); } return 0; } /* Rename a set */ static const struct nla_policy ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, }; static int ip_set_rename(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set, *s; const char *name2; ip_set_id_t i; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_SETNAME2])) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; write_lock_bh(&ip_set_ref_lock); if (set->ref != 0 || set->ref_netlink != 0) { ret = -IPSET_ERR_REFERENCED; goto out; } name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s && STRNCMP(s->name, name2)) { ret = -IPSET_ERR_EXIST_SETNAME2; goto out; } } strscpy_pad(set->name, name2, IPSET_MAXNAMELEN); out: write_unlock_bh(&ip_set_ref_lock); return ret; } /* Swap two sets so that name/index points to the other. * References and set names are also swapped. * * The commands are serialized by the nfnl mutex and references are * protected by the ip_set_ref_lock. The kernel interfaces * do not hold the mutex but the pointer settings are atomic * so the ip_set_list always contains valid pointers to the sets. */ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *from, *to; ip_set_id_t from_id, to_id; char from_name[IPSET_MAXNAMELEN]; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_SETNAME2])) return -IPSET_ERR_PROTOCOL; from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &from_id); if (!from) return -ENOENT; to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id); if (!to) return -IPSET_ERR_EXIST_SETNAME2; /* Features must not change. * Not an artifical restriction anymore, as we must prevent * possible loops created by swapping in setlist type of sets. */ if (!(from->type->features == to->type->features && from->family == to->family)) return -IPSET_ERR_TYPE_MISMATCH; write_lock_bh(&ip_set_ref_lock); if (from->ref_netlink || to->ref_netlink) { write_unlock_bh(&ip_set_ref_lock); return -EBUSY; } strscpy_pad(from_name, from->name, IPSET_MAXNAMELEN); strscpy_pad(from->name, to->name, IPSET_MAXNAMELEN); strscpy_pad(to->name, from_name, IPSET_MAXNAMELEN); swap(from->ref, to->ref); ip_set(inst, from_id) = to; ip_set(inst, to_id) = from; write_unlock_bh(&ip_set_ref_lock); return 0; } /* List/save set data */ #define DUMP_INIT 0 #define DUMP_ALL 1 #define DUMP_ONE 2 #define DUMP_LAST 3 #define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF) #define DUMP_FLAGS(arg) (((u32)(arg)) >> 16) int ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) { u32 cadt_flags = 0; if (SET_WITH_TIMEOUT(set)) if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(set->timeout)))) return -EMSGSIZE; if (SET_WITH_COUNTER(set)) cadt_flags |= IPSET_FLAG_WITH_COUNTERS; if (SET_WITH_COMMENT(set)) cadt_flags |= IPSET_FLAG_WITH_COMMENT; if (SET_WITH_SKBINFO(set)) cadt_flags |= IPSET_FLAG_WITH_SKBINFO; if (SET_WITH_FORCEADD(set)) cadt_flags |= IPSET_FLAG_WITH_FORCEADD; if (!cadt_flags) return 0; return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); } EXPORT_SYMBOL_GPL(ip_set_put_flags); static int ip_set_dump_done(struct netlink_callback *cb) { if (cb->args[IPSET_CB_ARG0]) { struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET]; ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX]; struct ip_set *set = ip_set_ref_netlink(inst, index); if (set->variant->uref) set->variant->uref(set, cb, false); pr_debug("release set %s\n", set->name); __ip_set_put_netlink(set); } return 0; } static inline void dump_attrs(struct nlmsghdr *nlh) { const struct nlattr *attr; int rem; pr_debug("dump nlmsg\n"); nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) { pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len); } } static const struct nla_policy ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_FLAGS] = { .type = NLA_U32 }, }; static int ip_set_dump_start(struct netlink_callback *cb) { struct nlmsghdr *nlh = nlmsg_hdr(cb->skb); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1]; struct nlattr *attr = (void *)nlh + min_len; struct sk_buff *skb = cb->skb; struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk)); u32 dump_type; int ret; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr, nlh->nlmsg_len - min_len, ip_set_dump_policy, NULL); if (ret) goto error; cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]); if (cda[IPSET_ATTR_SETNAME]) { ip_set_id_t index; struct ip_set *set; set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]), &index); if (!set) { ret = -ENOENT; goto error; } dump_type = DUMP_ONE; cb->args[IPSET_CB_INDEX] = index; } else { dump_type = DUMP_ALL; } if (cda[IPSET_ATTR_FLAGS]) { u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]); dump_type |= (f << 16); } cb->args[IPSET_CB_NET] = (unsigned long)inst; cb->args[IPSET_CB_DUMP] = dump_type; return 0; error: /* We have to create and send the error message manually :-( */ if (nlh->nlmsg_flags & NLM_F_ACK) { netlink_ack(cb->skb, nlh, ret, NULL); } return ret; } static int ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb) { ip_set_id_t index = IPSET_INVALID_ID, max; struct ip_set *set = NULL; struct nlmsghdr *nlh = NULL; unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0; struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk)); u32 dump_type, dump_flags; bool is_destroyed; int ret = 0; if (!cb->args[IPSET_CB_DUMP]) return -EINVAL; if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max) goto out; dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]); dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]); max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1 : inst->ip_set_max; dump_last: pr_debug("dump type, flag: %u %u index: %ld\n", dump_type, dump_flags, cb->args[IPSET_CB_INDEX]); for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) { index = (ip_set_id_t)cb->args[IPSET_CB_INDEX]; write_lock_bh(&ip_set_ref_lock); set = ip_set(inst, index); is_destroyed = inst->is_destroyed; if (!set || is_destroyed) { write_unlock_bh(&ip_set_ref_lock); if (dump_type == DUMP_ONE) { ret = -ENOENT; goto out; } if (is_destroyed) { /* All sets are just being destroyed */ ret = 0; goto out; } continue; } /* When dumping all sets, we must dump "sorted" * so that lists (unions of sets) are dumped last. */ if (dump_type != DUMP_ONE && ((dump_type == DUMP_ALL) == !!(set->type->features & IPSET_DUMP_LAST))) { write_unlock_bh(&ip_set_ref_lock); continue; } pr_debug("List set: %s\n", set->name); if (!cb->args[IPSET_CB_ARG0]) { /* Start listing: make sure set won't be destroyed */ pr_debug("reference set\n"); set->ref_netlink++; } write_unlock_bh(&ip_set_ref_lock); nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, IPSET_CMD_LIST); if (!nlh) { ret = -EMSGSIZE; goto release_refcount; } if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, cb->args[IPSET_CB_PROTO]) || nla_put_string(skb, IPSET_ATTR_SETNAME, set->name)) goto nla_put_failure; if (dump_flags & IPSET_FLAG_LIST_SETNAME) goto next_set; switch (cb->args[IPSET_CB_ARG0]) { case 0: /* Core header data */ if (nla_put_string(skb, IPSET_ATTR_TYPENAME, set->type->name) || nla_put_u8(skb, IPSET_ATTR_FAMILY, set->family) || nla_put_u8(skb, IPSET_ATTR_REVISION, set->revision)) goto nla_put_failure; if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN && nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index))) goto nla_put_failure; ret = set->variant->head(set, skb); if (ret < 0) goto release_refcount; if (dump_flags & IPSET_FLAG_LIST_HEADER) goto next_set; if (set->variant->uref) set->variant->uref(set, cb, true); fallthrough; default: ret = set->variant->list(set, skb, cb); if (!cb->args[IPSET_CB_ARG0]) /* Set is done, proceed with next one */ goto next_set; goto release_refcount; } } /* If we dump all sets, continue with dumping last ones */ if (dump_type == DUMP_ALL) { dump_type = DUMP_LAST; cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16); cb->args[IPSET_CB_INDEX] = 0; if (set && set->variant->uref) set->variant->uref(set, cb, false); goto dump_last; } goto out; nla_put_failure: ret = -EFAULT; next_set: if (dump_type == DUMP_ONE) cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID; else cb->args[IPSET_CB_INDEX]++; release_refcount: /* If there was an error or set is done, release set */ if (ret || !cb->args[IPSET_CB_ARG0]) { set = ip_set_ref_netlink(inst, index); if (set->variant->uref) set->variant->uref(set, cb, false); pr_debug("release set %s\n", set->name); __ip_set_put_netlink(set); cb->args[IPSET_CB_ARG0] = 0; } out: if (nlh) { nlmsg_end(skb, nlh); pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len); dump_attrs(nlh); } return ret < 0 ? ret : skb->len; } static int ip_set_dump(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; { struct netlink_dump_control c = { .start = ip_set_dump_start, .dump = ip_set_dump_do, .done = ip_set_dump_done, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } } /* Add, del and test */ static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_DATA] = { .type = NLA_NESTED }, [IPSET_ATTR_ADT] = { .type = NLA_NESTED }, }; static int call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 flags, bool use_lineno) { int ret; u32 lineno = 0; bool eexist = flags & IPSET_FLAG_EXIST, retried = false; do { if (retried) { __ip_set_get_netlink(set); nfnl_unlock(NFNL_SUBSYS_IPSET); cond_resched(); nfnl_lock(NFNL_SUBSYS_IPSET); __ip_set_put_netlink(set); } ip_set_lock(set); ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); ip_set_unlock(set); retried = true; } while (ret == -ERANGE || (ret == -EAGAIN && set->variant->resize && (ret = set->variant->resize(set, retried)) == 0)); if (!ret || (ret == -IPSET_ERR_EXIST && eexist)) return 0; if (lineno && use_lineno) { /* Error in restore/batch mode: send back lineno */ struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb); struct sk_buff *skb2; struct nlmsgerr *errmsg; size_t payload = min(SIZE_MAX, sizeof(*errmsg) + nlmsg_len(nlh)); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1]; struct nlattr *cmdattr; u32 *errline; skb2 = nlmsg_new(payload, GFP_KERNEL); if (!skb2) return -ENOMEM; rep = nlmsg_put(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); errmsg = nlmsg_data(rep); errmsg->error = ret; unsafe_memcpy(&errmsg->msg, nlh, nlh->nlmsg_len, /* Bounds checked by the skb layer. */); cmdattr = (void *)&errmsg->msg + min_len; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr, nlh->nlmsg_len - min_len, ip_set_adt_policy, NULL); if (ret) { nlmsg_free(skb2); return ret; } errline = nla_data(cda[IPSET_ATTR_LINENO]); *errline = lineno; nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); /* Signal netlink not to send its ACK/errmsg. */ return -EINTR; } return ret; } static int ip_set_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, enum ipset_adt adt, const struct nlmsghdr *nlh, const struct nlattr * const attr[], struct netlink_ext_ack *extack) { struct ip_set_net *inst = ip_set_pernet(net); struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; const struct nlattr *nla; u32 flags = flag_exist(nlh); bool use_lineno; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !((attr[IPSET_ATTR_DATA] != NULL) ^ (attr[IPSET_ATTR_ADT] != NULL)) || (attr[IPSET_ATTR_DATA] && !flag_nested(attr[IPSET_ATTR_DATA])) || (attr[IPSET_ATTR_ADT] && (!flag_nested(attr[IPSET_ATTR_ADT]) || !attr[IPSET_ATTR_LINENO])))) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; use_lineno = !!attr[IPSET_ATTR_LINENO]; if (attr[IPSET_ATTR_DATA]) { if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(net, ctnl, skb, set, tb, adt, flags, use_lineno); } else { int nla_rem; nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) { if (nla_type(nla) != IPSET_ATTR_DATA || !flag_nested(nla) || nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(net, ctnl, skb, set, tb, adt, flags, use_lineno); if (ret < 0) return ret; } } return ret; } static int ip_set_uadd(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return ip_set_ad(info->net, info->sk, skb, IPSET_ADD, info->nlh, attr, info->extack); } static int ip_set_udel(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return ip_set_ad(info->net, info->sk, skb, IPSET_DEL, info->nlh, attr, info->extack); } static int ip_set_utest(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; int ret = 0; u32 lineno; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_DATA] || !flag_nested(attr[IPSET_ATTR_DATA]))) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; rcu_read_lock_bh(); ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0); rcu_read_unlock_bh(); /* Userspace can't trigger element to be re-added */ if (ret == -EAGAIN) ret = 1; return ret > 0 ? 0 : -IPSET_ERR_EXIST; } /* Get headed data of a set */ static int ip_set_header(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); const struct ip_set *set; struct sk_buff *skb2; struct nlmsghdr *nlh2; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME])) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_HEADER); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) || nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get type data */ static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_FAMILY] = { .type = NLA_U8 }, }; static int ip_set_type(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct sk_buff *skb2; struct nlmsghdr *nlh2; u8 family, min, max; const char *typename; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_TYPENAME] || !attr[IPSET_ATTR_FAMILY])) return -IPSET_ERR_PROTOCOL; family = nla_get_u8(attr[IPSET_ATTR_FAMILY]); typename = nla_data(attr[IPSET_ATTR_TYPENAME]); ret = find_set_type_minmax(typename, family, &min, &max); if (ret) return ret; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_TYPE); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) || nla_put_u8(skb2, IPSET_ATTR_REVISION, max) || nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min)) goto nla_put_failure; nlmsg_end(skb2, nlh2); pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get protocol version */ static const struct nla_policy ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, }; static int ip_set_protocol(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct sk_buff *skb2; struct nlmsghdr *nlh2; if (unlikely(!attr[IPSET_ATTR_PROTOCOL])) return -IPSET_ERR_PROTOCOL; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_PROTOCOL); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL)) goto nla_put_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get set by name or index, from userspace */ static int ip_set_byname(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct sk_buff *skb2; struct nlmsghdr *nlh2; ip_set_id_t id = IPSET_INVALID_ID; const struct ip_set *set; if (unlikely(protocol_failed(attr) || !attr[IPSET_ATTR_SETNAME])) return -IPSET_ERR_PROTOCOL; set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id); if (id == IPSET_INVALID_ID) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_GET_BYNAME); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id))) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_INDEX] = { .type = NLA_U16 }, }; static int ip_set_byindex(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct sk_buff *skb2; struct nlmsghdr *nlh2; ip_set_id_t id = IPSET_INVALID_ID; const struct ip_set *set; if (unlikely(protocol_failed(attr) || !attr[IPSET_ATTR_INDEX])) return -IPSET_ERR_PROTOCOL; id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]); if (id >= inst->ip_set_max) return -ENOENT; set = ip_set(inst, id); if (set == NULL) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_GET_BYINDEX); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = { [IPSET_CMD_NONE] = { .call = ip_set_none, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, }, [IPSET_CMD_CREATE] = { .call = ip_set_create, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_create_policy, }, [IPSET_CMD_DESTROY] = { .call = ip_set_destroy, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_FLUSH] = { .call = ip_set_flush, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_RENAME] = { .call = ip_set_rename, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname2_policy, }, [IPSET_CMD_SWAP] = { .call = ip_set_swap, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname2_policy, }, [IPSET_CMD_LIST] = { .call = ip_set_dump, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_dump_policy, }, [IPSET_CMD_SAVE] = { .call = ip_set_dump, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_ADD] = { .call = ip_set_uadd, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_DEL] = { .call = ip_set_udel, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_TEST] = { .call = ip_set_utest, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_HEADER] = { .call = ip_set_header, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_TYPE] = { .call = ip_set_type, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_type_policy, }, [IPSET_CMD_PROTOCOL] = { .call = ip_set_protocol, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_protocol_policy, }, [IPSET_CMD_GET_BYNAME] = { .call = ip_set_byname, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_GET_BYINDEX] = { .call = ip_set_byindex, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_index_policy, }, }; static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = { .name = "ip_set", .subsys_id = NFNL_SUBSYS_IPSET, .cb_count = IPSET_MSG_MAX, .cb = ip_set_netlink_subsys_cb, }; /* Interface to iptables/ip6tables */ static int ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) { unsigned int *op; void *data; int copylen = *len, ret = 0; struct net *net = sock_net(sk); struct ip_set_net *inst = ip_set_pernet(net); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (optval != SO_IP_SET) return -EBADF; if (*len < sizeof(unsigned int)) return -EINVAL; data = vmalloc(*len); if (!data) return -ENOMEM; if (copy_from_user(data, user, *len) != 0) { ret = -EFAULT; goto done; } op = data; if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ struct ip_set_req_version *req_version = data; if (*len < sizeof(struct ip_set_req_version)) { ret = -EINVAL; goto done; } if (req_version->version < IPSET_PROTOCOL_MIN) { ret = -EPROTO; goto done; } } switch (*op) { case IP_SET_OP_VERSION: { struct ip_set_req_version *req_version = data; if (*len != sizeof(struct ip_set_req_version)) { ret = -EINVAL; goto done; } req_version->version = IPSET_PROTOCOL; if (copy_to_user(user, req_version, sizeof(struct ip_set_req_version))) ret = -EFAULT; goto done; } case IP_SET_OP_GET_BYNAME: { struct ip_set_req_get_set *req_get = data; ip_set_id_t id; if (*len != sizeof(struct ip_set_req_get_set)) { ret = -EINVAL; goto done; } req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; nfnl_lock(NFNL_SUBSYS_IPSET); find_set_and_id(inst, req_get->set.name, &id); req_get->set.index = id; nfnl_unlock(NFNL_SUBSYS_IPSET); goto copy; } case IP_SET_OP_GET_FNAME: { struct ip_set_req_get_set_family *req_get = data; ip_set_id_t id; if (*len != sizeof(struct ip_set_req_get_set_family)) { ret = -EINVAL; goto done; } req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; nfnl_lock(NFNL_SUBSYS_IPSET); find_set_and_id(inst, req_get->set.name, &id); req_get->set.index = id; if (id != IPSET_INVALID_ID) req_get->family = ip_set(inst, id)->family; nfnl_unlock(NFNL_SUBSYS_IPSET); goto copy; } case IP_SET_OP_GET_BYINDEX: { struct ip_set_req_get_set *req_get = data; struct ip_set *set; if (*len != sizeof(struct ip_set_req_get_set) || req_get->set.index >= inst->ip_set_max) { ret = -EINVAL; goto done; } nfnl_lock(NFNL_SUBSYS_IPSET); set = ip_set(inst, req_get->set.index); ret = strscpy(req_get->set.name, set ? set->name : "", IPSET_MAXNAMELEN); nfnl_unlock(NFNL_SUBSYS_IPSET); if (ret < 0) goto done; goto copy; } default: ret = -EBADMSG; goto done; } /* end of switch(op) */ copy: if (copy_to_user(user, data, copylen)) ret = -EFAULT; done: vfree(data); if (ret > 0) ret = 0; return ret; } static struct nf_sockopt_ops so_set __read_mostly = { .pf = PF_INET, .get_optmin = SO_IP_SET, .get_optmax = SO_IP_SET + 1, .get = ip_set_sockfn_get, .owner = THIS_MODULE, }; static int __net_init ip_set_net_init(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); struct ip_set **list; inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX; if (inst->ip_set_max >= IPSET_INVALID_ID) inst->ip_set_max = IPSET_INVALID_ID - 1; list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL); if (!list) return -ENOMEM; inst->is_deleted = false; inst->is_destroyed = false; rcu_assign_pointer(inst->ip_set_list, list); return 0; } static void __net_exit ip_set_net_pre_exit(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); inst->is_deleted = true; /* flag for ip_set_nfnl_put */ } static void __net_exit ip_set_net_exit(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); _destroy_all_sets(inst); kvfree(rcu_dereference_protected(inst->ip_set_list, 1)); } static struct pernet_operations ip_set_net_ops = { .init = ip_set_net_init, .pre_exit = ip_set_net_pre_exit, .exit = ip_set_net_exit, .id = &ip_set_net_id, .size = sizeof(struct ip_set_net), }; static int __init ip_set_init(void) { int ret = register_pernet_subsys(&ip_set_net_ops); if (ret) { pr_err("ip_set: cannot register pernet_subsys.\n"); return ret; } ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); if (ret != 0) { pr_err("ip_set: cannot register with nfnetlink.\n"); unregister_pernet_subsys(&ip_set_net_ops); return ret; } ret = nf_register_sockopt(&so_set); if (ret != 0) { pr_err("SO_SET registry failed: %d\n", ret); nfnetlink_subsys_unregister(&ip_set_netlink_subsys); unregister_pernet_subsys(&ip_set_net_ops); return ret; } return 0; } static void __exit ip_set_fini(void) { nf_unregister_sockopt(&so_set); nfnetlink_subsys_unregister(&ip_set_netlink_subsys); unregister_pernet_subsys(&ip_set_net_ops); /* Wait for call_rcu() in destroy */ rcu_barrier(); pr_debug("these are the famous last words\n"); } module_init(ip_set_init); module_exit(ip_set_fini); MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL)); |
| 94 20 66 66 26 87 62 84 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_PORT_H #define _LINUX_TTY_PORT_H #include <linux/kfifo.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/tty_buffer.h> #include <linux/wait.h> struct attribute_group; struct tty_driver; struct tty_port; struct tty_struct; /** * struct tty_port_operations -- operations on tty_port * @carrier_raised: return true if the carrier is raised on @port * @dtr_rts: raise the DTR line if @active is true, otherwise lower DTR * @shutdown: called when the last close completes or a hangup finishes IFF the * port was initialized. Do not use to free resources. Turn off the device * only. Called under the port mutex to serialize against @activate and * @shutdown. * @activate: called under the port mutex from tty_port_open(), serialized using * the port mutex. Supposed to turn on the device. * * FIXME: long term getting the tty argument *out* of this would be good * for consoles. * * @destruct: called on the final put of a port. Free resources, possibly incl. * the port itself. */ struct tty_port_operations { bool (*carrier_raised)(struct tty_port *port); void (*dtr_rts)(struct tty_port *port, bool active); void (*shutdown)(struct tty_port *port); int (*activate)(struct tty_port *port, struct tty_struct *tty); void (*destruct)(struct tty_port *port); }; struct tty_port_client_operations { size_t (*receive_buf)(struct tty_port *port, const u8 *cp, const u8 *fp, size_t count); void (*lookahead_buf)(struct tty_port *port, const u8 *cp, const u8 *fp, size_t count); void (*write_wakeup)(struct tty_port *port); }; extern const struct tty_port_client_operations tty_port_default_client_ops; /** * struct tty_port -- port level information * * @buf: buffer for this port, locked internally * @tty: back pointer to &struct tty_struct, valid only if the tty is open. Use * tty_port_tty_get() to obtain it (and tty_kref_put() to release). * @itty: internal back pointer to &struct tty_struct. Avoid this. It should be * eliminated in the long term. * @ops: tty port operations (like activate, shutdown), see &struct * tty_port_operations * @client_ops: tty port client operations (like receive_buf, write_wakeup). * By default, tty_port_default_client_ops is used. * @lock: lock protecting @tty * @blocked_open: # of procs waiting for open in tty_port_block_til_ready() * @count: usage count * @open_wait: open waiters queue (waiting e.g. for a carrier) * @delta_msr_wait: modem status change queue (waiting for MSR changes) * @flags: user TTY flags (%ASYNC_) * @iflags: internal flags (%TTY_PORT_) * @console: when set, the port is a console * @mutex: locking, for open, shutdown and other port operations * @buf_mutex: @xmit_buf alloc lock * @xmit_buf: optional xmit buffer used by some drivers * @xmit_fifo: optional xmit buffer used by some drivers * @close_delay: delay in jiffies to wait when closing the port * @closing_wait: delay in jiffies for output to be sent before closing * @drain_delay: set to zero if no pure time based drain is needed else set to * size of fifo * @kref: references counter. Reaching zero calls @ops->destruct() if non-%NULL * or frees the port otherwise. * @client_data: pointer to private data, for @client_ops * * Each device keeps its own port level information. &struct tty_port was * introduced as a common structure for such information. As every TTY device * shall have a backing tty_port structure, every driver can use these members. * * The tty port has a different lifetime to the tty so must be kept apart. * In addition be careful as tty -> port mappings are valid for the life * of the tty object but in many cases port -> tty mappings are valid only * until a hangup so don't use the wrong path. * * Tty port shall be initialized by tty_port_init() and shut down either by * tty_port_destroy() (refcounting not used), or tty_port_put() (refcounting). * * There is a lot of helpers around &struct tty_port too. To name the most * significant ones: tty_port_open(), tty_port_close() (or * tty_port_close_start() and tty_port_close_end() separately if need be), and * tty_port_hangup(). These call @ops->activate() and @ops->shutdown() as * needed. */ struct tty_port { struct tty_bufhead buf; struct tty_struct *tty; struct tty_struct *itty; const struct tty_port_operations *ops; const struct tty_port_client_operations *client_ops; spinlock_t lock; int blocked_open; int count; wait_queue_head_t open_wait; wait_queue_head_t delta_msr_wait; unsigned long flags; unsigned long iflags; unsigned char console:1; struct mutex mutex; struct mutex buf_mutex; u8 *xmit_buf; DECLARE_KFIFO_PTR(xmit_fifo, u8); unsigned int close_delay; unsigned int closing_wait; int drain_delay; struct kref kref; void *client_data; }; /* tty_port::iflags bits -- use atomic bit ops */ #define TTY_PORT_INITIALIZED 0 /* device is initialized */ #define TTY_PORT_SUSPENDED 1 /* device is suspended */ #define TTY_PORT_ACTIVE 2 /* device is open */ /* * uart drivers: use the uart_port::status field and the UPSTAT_* defines * for s/w-based flow control steering and carrier detection status */ #define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ #define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ #define TTY_PORT_KOPENED 5 /* device exclusively opened by kernel */ void tty_port_init(struct tty_port *port); void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, unsigned index); struct device *tty_port_register_device(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device); struct device *tty_port_register_device_attr(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp); struct device *tty_port_register_device_serdev(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *host, struct device *parent); struct device *tty_port_register_device_attr_serdev(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *host, struct device *parent, void *drvdata, const struct attribute_group **attr_grp); void tty_port_unregister_device(struct tty_port *port, struct tty_driver *driver, unsigned index); int tty_port_alloc_xmit_buf(struct tty_port *port); void tty_port_free_xmit_buf(struct tty_port *port); void tty_port_destroy(struct tty_port *port); void tty_port_put(struct tty_port *port); static inline struct tty_port *tty_port_get(struct tty_port *port) { if (port && kref_get_unless_zero(&port->kref)) return port; return NULL; } /* If the cts flow control is enabled, return true. */ static inline bool tty_port_cts_enabled(const struct tty_port *port) { return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); } static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) { assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); } static inline bool tty_port_active(const struct tty_port *port) { return test_bit(TTY_PORT_ACTIVE, &port->iflags); } static inline void tty_port_set_active(struct tty_port *port, bool val) { assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); } static inline bool tty_port_check_carrier(const struct tty_port *port) { return test_bit(TTY_PORT_CHECK_CD, &port->iflags); } static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) { assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); } static inline bool tty_port_suspended(const struct tty_port *port) { return test_bit(TTY_PORT_SUSPENDED, &port->iflags); } static inline void tty_port_set_suspended(struct tty_port *port, bool val) { assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); } static inline bool tty_port_initialized(const struct tty_port *port) { return test_bit(TTY_PORT_INITIALIZED, &port->iflags); } static inline void tty_port_set_initialized(struct tty_port *port, bool val) { assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); } static inline bool tty_port_kopened(const struct tty_port *port) { return test_bit(TTY_PORT_KOPENED, &port->iflags); } static inline void tty_port_set_kopened(struct tty_port *port, bool val) { assign_bit(TTY_PORT_KOPENED, &port->iflags, val); } struct tty_struct *tty_port_tty_get(struct tty_port *port); void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); bool tty_port_carrier_raised(struct tty_port *port); void tty_port_raise_dtr_rts(struct tty_port *port); void tty_port_lower_dtr_rts(struct tty_port *port); void tty_port_hangup(struct tty_port *port); void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); void tty_port_tty_wakeup(struct tty_port *port); int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, struct file *filp); int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct file *filp); void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); void tty_port_close(struct tty_port *port, struct tty_struct *tty, struct file *filp); int tty_port_install(struct tty_port *port, struct tty_driver *driver, struct tty_struct *tty); int tty_port_open(struct tty_port *port, struct tty_struct *tty, struct file *filp); static inline int tty_port_users(struct tty_port *port) { return port->count + port->blocked_open; } #endif |
| 1525 1521 1520 1523 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 | // SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include <crypto/internal/blake2s.h> #include <linux/types.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/sizes.h> #include <asm/cpufeature.h> #include <asm/fpu/api.h> #include <asm/processor.h> #include <asm/simd.h> asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state, const u8 *block, const size_t nblocks, const u32 inc); asmlinkage void blake2s_compress_avx512(struct blake2s_state *state, const u8 *block, const size_t nblocks, const u32 inc); static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3); static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512); void blake2s_compress(struct blake2s_state *state, const u8 *block, size_t nblocks, const u32 inc) { /* SIMD disables preemption, so relax after processing each page. */ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); if (!static_branch_likely(&blake2s_use_ssse3) || !may_use_simd()) { blake2s_compress_generic(state, block, nblocks, inc); return; } do { const size_t blocks = min_t(size_t, nblocks, SZ_4K / BLAKE2S_BLOCK_SIZE); kernel_fpu_begin(); if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&blake2s_use_avx512)) blake2s_compress_avx512(state, block, blocks, inc); else blake2s_compress_ssse3(state, block, blocks, inc); kernel_fpu_end(); nblocks -= blocks; block += blocks * BLAKE2S_BLOCK_SIZE; } while (nblocks); } EXPORT_SYMBOL(blake2s_compress); static int __init blake2s_mod_init(void) { if (boot_cpu_has(X86_FEATURE_SSSE3)) static_branch_enable(&blake2s_use_ssse3); if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) && boot_cpu_has(X86_FEATURE_AVX512VL) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL)) static_branch_enable(&blake2s_use_avx512); return 0; } subsys_initcall(blake2s_mod_init); |
| 3 3 2 2 3 2 3 1 3 2 2 2 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 | /* * Cryptographic API. * * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions * * Copyright (C) 2013 Intel Corporation * Author: Tim Chen <tim.c.chen@linux.intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-t10dif.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/init.h> #include <linux/string.h> #include <linux/kernel.h> #include <asm/cpufeatures.h> #include <asm/cpu_device_id.h> #include <asm/simd.h> asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len); struct chksum_desc_ctx { __u16 crc; }; static int chksum_init(struct shash_desc *desc) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); ctx->crc = 0; return 0; } static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); if (length >= 16 && crypto_simd_usable()) { kernel_fpu_begin(); ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); kernel_fpu_end(); } else ctx->crc = crc_t10dif_generic(ctx->crc, data, length); return 0; } static int chksum_final(struct shash_desc *desc, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); *(__u16 *)out = ctx->crc; return 0; } static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) { if (len >= 16 && crypto_simd_usable()) { kernel_fpu_begin(); *(__u16 *)out = crc_t10dif_pcl(crc, data, len); kernel_fpu_end(); } else *(__u16 *)out = crc_t10dif_generic(crc, data, len); return 0; } static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); return __chksum_finup(ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { return __chksum_finup(0, data, length, out); } static struct shash_alg alg = { .digestsize = CRC_T10DIF_DIGEST_SIZE, .init = chksum_init, .update = chksum_update, .final = chksum_final, .finup = chksum_finup, .digest = chksum_digest, .descsize = sizeof(struct chksum_desc_ctx), .base = { .cra_name = "crct10dif", .cra_driver_name = "crct10dif-pclmul", .cra_priority = 200, .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static const struct x86_cpu_id crct10dif_cpu_id[] = { X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id); static int __init crct10dif_intel_mod_init(void) { if (!x86_match_cpu(crct10dif_cpu_id)) return -ENODEV; return crypto_register_shash(&alg); } static void __exit crct10dif_intel_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crct10dif_intel_mod_init); module_exit(crct10dif_intel_mod_fini); MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crct10dif"); MODULE_ALIAS_CRYPTO("crct10dif-pclmul"); |
| 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/string.h> #include <linux/skbuff.h> #include <linux/export.h> #include <net/caif/cfpkt.h> #define PKT_PREFIX 48 #define PKT_POSTFIX 2 #define PKT_LEN_WHEN_EXTENDING 128 #define PKT_ERROR(pkt, errmsg) \ do { \ cfpkt_priv(pkt)->erronous = true; \ skb_reset_tail_pointer(&pkt->skb); \ pr_warn(errmsg); \ } while (0) /* * net/caif/ is generic and does not * understand SKB, so we do this typecast */ struct cfpkt { struct sk_buff skb; }; /* Private data inside SKB */ struct cfpkt_priv_data { struct dev_info dev_info; bool erronous; }; static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) { return (struct cfpkt_priv_data *) pkt->skb.cb; } static inline bool is_erronous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) { return &pkt->skb; } static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) { return (struct cfpkt *) skb; } struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) { struct cfpkt *pkt = skb_to_pkt(nativepkt); cfpkt_priv(pkt)->erronous = false; return pkt; } EXPORT_SYMBOL(cfpkt_fromnative); void *cfpkt_tonative(struct cfpkt *pkt) { return (void *) pkt; } EXPORT_SYMBOL(cfpkt_tonative); static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; skb = alloc_skb(len + pfx, GFP_ATOMIC); if (unlikely(skb == NULL)) return NULL; skb_reserve(skb, pfx); return skb_to_pkt(skb); } inline struct cfpkt *cfpkt_create(u16 len) { return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); } void cfpkt_destroy(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); kfree_skb(skb); } inline bool cfpkt_more(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len > 0; } int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (skb_headlen(skb) >= len) { memcpy(data, skb->data, len); return 0; } return !cfpkt_extr_head(pkt, data, len) && !cfpkt_add_head(pkt, data, len); } int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(len > skb->len)) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } if (unlikely(len > skb_headlen(skb))) { if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } } from = skb_pull(skb, len); from -= len; if (data) memcpy(data, from, len); return 0; } EXPORT_SYMBOL(cfpkt_extr_head); int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *data = dta; u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } if (unlikely(skb->data + len > skb_tail_pointer(skb))) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } from = skb_tail_pointer(skb) - len; skb_trim(skb, skb->len - len); memcpy(data, from, len); return 0; } int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) { return cfpkt_add_body(pkt, NULL, len); } int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; u16 addlen = 0; if (unlikely(is_erronous(pkt))) return -EPROTO; lastskb = skb; /* Check whether we need to add space at the tail */ if (unlikely(skb_tailroom(skb) < len)) { if (likely(len < PKT_LEN_WHEN_EXTENDING)) addlen = PKT_LEN_WHEN_EXTENDING; else addlen = len; } /* Check whether we need to change the SKB before writing to the tail */ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { /* Make sure data is writable */ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { PKT_ERROR(pkt, "cow failed\n"); return -EPROTO; } } /* All set to put the last SKB and optionally write data there. */ to = pskb_put(skb, lastskb, len); if (likely(data)) memcpy(to, data, len); return 0; } inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) { return cfpkt_add_body(pkt, &data, 1); } int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; const u8 *data = data2; int ret; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_headroom(skb) < len)) { PKT_ERROR(pkt, "no headroom\n"); return -EPROTO; } /* Make sure data is writable */ ret = skb_cow_data(skb, 0, &lastskb); if (unlikely(ret < 0)) { PKT_ERROR(pkt, "cow failed\n"); return ret; } to = skb_push(skb, len); memcpy(to, data, len); return 0; } EXPORT_SYMBOL(cfpkt_add_head); inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) { return cfpkt_add_body(pkt, data, len); } inline u16 cfpkt_getlen(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len; } int cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16, void *, u16), u16 data) { /* * Don't care about the performance hit of linearizing, * Checksum should not be used on high-speed interfaces anyway. */ if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(&pkt->skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); } int cfpkt_setlen(struct cfpkt *pkt, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (unlikely(is_erronous(pkt))) return -EPROTO; if (likely(len <= skb->len)) { if (unlikely(skb->data_len)) ___pskb_trim(skb, len); else skb_trim(skb, len); return cfpkt_getlen(pkt); } /* Need to expand SKB */ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) PKT_ERROR(pkt, "skb_pad_trail failed\n"); return cfpkt_getlen(pkt); } struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt, u16 expectlen) { struct sk_buff *dst = pkt_to_skb(dstpkt); struct sk_buff *add = pkt_to_skb(addpkt); u16 addlen = skb_headlen(add); u16 neededtailspace; struct sk_buff *tmp; u16 dstlen; u16 createlen; if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { return dstpkt; } neededtailspace = max(expectlen, addlen); if (dst->tail + neededtailspace > dst->end) { /* Create a dumplicate of 'dst' with more tail space */ struct cfpkt *tmppkt; dstlen = skb_headlen(dst); createlen = dstlen + neededtailspace; tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); if (tmppkt == NULL) return NULL; tmp = pkt_to_skb(tmppkt); skb_put_data(tmp, dst->data, dstlen); cfpkt_destroy(dstpkt); dst = tmp; } skb_put_data(dst, add->data, skb_headlen(add)); cfpkt_destroy(addpkt); return skb_to_pkt(dst); } struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) { struct sk_buff *skb2; struct sk_buff *skb = pkt_to_skb(pkt); struct cfpkt *tmppkt; u8 *split = skb->data + pos; u16 len2nd = skb_tail_pointer(skb) - split; if (unlikely(is_erronous(pkt))) return NULL; if (skb->data + pos > skb_tail_pointer(skb)) { PKT_ERROR(pkt, "trying to split beyond end of packet\n"); return NULL; } /* Create a new packet for the second part of the data */ tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, PKT_PREFIX); if (tmppkt == NULL) return NULL; skb2 = pkt_to_skb(tmppkt); if (skb2 == NULL) return NULL; skb_put_data(skb2, split, len2nd); /* Reduce the length of the original packet */ skb_trim(skb, pos); skb2->priority = skb->priority; return skb_to_pkt(skb2); } bool cfpkt_erroneous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) { return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } EXPORT_SYMBOL(cfpkt_info); void cfpkt_set_prio(struct cfpkt *pkt, int prio) { pkt_to_skb(pkt)->priority = prio; } EXPORT_SYMBOL(cfpkt_set_prio); |
| 754 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 | // SPDX-License-Identifier: GPL-2.0 /* * Helpers for IOMMU drivers implementing SVA */ #include <linux/mmu_context.h> #include <linux/mutex.h> #include <linux/sched/mm.h> #include <linux/iommu.h> #include "iommu-priv.h" static DEFINE_MUTEX(iommu_sva_lock); static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm); /* Allocate a PASID for the mm within range (inclusive) */ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev) { struct iommu_mm_data *iommu_mm; ioasid_t pasid; lockdep_assert_held(&iommu_sva_lock); if (!arch_pgtable_dma_compat(mm)) return ERR_PTR(-EBUSY); iommu_mm = mm->iommu_mm; /* Is a PASID already associated with this mm? */ if (iommu_mm) { if (iommu_mm->pasid >= dev->iommu->max_pasids) return ERR_PTR(-EOVERFLOW); return iommu_mm; } iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL); if (!iommu_mm) return ERR_PTR(-ENOMEM); pasid = iommu_alloc_global_pasid(dev); if (pasid == IOMMU_PASID_INVALID) { kfree(iommu_mm); return ERR_PTR(-ENOSPC); } iommu_mm->pasid = pasid; INIT_LIST_HEAD(&iommu_mm->sva_domains); /* * Make sure the write to mm->iommu_mm is not reordered in front of * initialization to iommu_mm fields. If it does, readers may see a * valid iommu_mm with uninitialized values. */ smp_store_release(&mm->iommu_mm, iommu_mm); return iommu_mm; } /** * iommu_sva_bind_device() - Bind a process address space to a device * @dev: the device * @mm: the mm to bind, caller must hold a reference to mm_users * * Create a bond between device and address space, allowing the device to * access the mm using the PASID returned by iommu_sva_get_pasid(). If a * bond already exists between @device and @mm, an additional internal * reference is taken. Caller must call iommu_sva_unbind_device() * to release each reference. * * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to * initialize the required SVA features. * * On error, returns an ERR_PTR value. */ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) { struct iommu_group *group = dev->iommu_group; struct iommu_attach_handle *attach_handle; struct iommu_mm_data *iommu_mm; struct iommu_domain *domain; struct iommu_sva *handle; int ret; if (!group) return ERR_PTR(-ENODEV); mutex_lock(&iommu_sva_lock); /* Allocate mm->pasid if necessary. */ iommu_mm = iommu_alloc_mm_data(mm, dev); if (IS_ERR(iommu_mm)) { ret = PTR_ERR(iommu_mm); goto out_unlock; } /* A bond already exists, just take a reference`. */ attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA); if (!IS_ERR(attach_handle)) { handle = container_of(attach_handle, struct iommu_sva, handle); if (attach_handle->domain->mm != mm) { ret = -EBUSY; goto out_unlock; } refcount_inc(&handle->users); mutex_unlock(&iommu_sva_lock); return handle; } if (PTR_ERR(attach_handle) != -ENOENT) { ret = PTR_ERR(attach_handle); goto out_unlock; } handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) { ret = -ENOMEM; goto out_unlock; } /* Search for an existing domain. */ list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) { ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, &handle->handle); if (!ret) { domain->users++; goto out; } } /* Allocate a new domain and set it on device pasid. */ domain = iommu_sva_domain_alloc(dev, mm); if (IS_ERR(domain)) { ret = PTR_ERR(domain); goto out_free_handle; } ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, &handle->handle); if (ret) goto out_free_domain; domain->users = 1; list_add(&domain->next, &mm->iommu_mm->sva_domains); out: refcount_set(&handle->users, 1); mutex_unlock(&iommu_sva_lock); handle->dev = dev; return handle; out_free_domain: iommu_domain_free(domain); out_free_handle: kfree(handle); out_unlock: mutex_unlock(&iommu_sva_lock); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(iommu_sva_bind_device); /** * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device * @handle: the handle returned by iommu_sva_bind_device() * * Put reference to a bond between device and address space. The device should * not be issuing any more transaction for this PASID. All outstanding page * requests for this PASID must have been flushed to the IOMMU. */ void iommu_sva_unbind_device(struct iommu_sva *handle) { struct iommu_domain *domain = handle->handle.domain; struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm; struct device *dev = handle->dev; mutex_lock(&iommu_sva_lock); if (!refcount_dec_and_test(&handle->users)) { mutex_unlock(&iommu_sva_lock); return; } iommu_detach_device_pasid(domain, dev, iommu_mm->pasid); if (--domain->users == 0) { list_del(&domain->next); iommu_domain_free(domain); } mutex_unlock(&iommu_sva_lock); kfree(handle); } EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); u32 iommu_sva_get_pasid(struct iommu_sva *handle) { struct iommu_domain *domain = handle->handle.domain; return mm_get_enqcmd_pasid(domain->mm); } EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); void mm_pasid_drop(struct mm_struct *mm) { struct iommu_mm_data *iommu_mm = mm->iommu_mm; if (!iommu_mm) return; iommu_free_global_pasid(iommu_mm->pasid); kfree(iommu_mm); } /* * I/O page fault handler for SVA */ static enum iommu_page_response_code iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm) { vm_fault_t ret; struct vm_area_struct *vma; unsigned int access_flags = 0; unsigned int fault_flags = FAULT_FLAG_REMOTE; struct iommu_fault_page_request *prm = &fault->prm; enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID; if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)) return status; if (!mmget_not_zero(mm)) return status; mmap_read_lock(mm); vma = vma_lookup(mm, prm->addr); if (!vma) /* Unmapped area */ goto out_put_mm; if (prm->perm & IOMMU_FAULT_PERM_READ) access_flags |= VM_READ; if (prm->perm & IOMMU_FAULT_PERM_WRITE) { access_flags |= VM_WRITE; fault_flags |= FAULT_FLAG_WRITE; } if (prm->perm & IOMMU_FAULT_PERM_EXEC) { access_flags |= VM_EXEC; fault_flags |= FAULT_FLAG_INSTRUCTION; } if (!(prm->perm & IOMMU_FAULT_PERM_PRIV)) fault_flags |= FAULT_FLAG_USER; if (access_flags & ~vma->vm_flags) /* Access fault */ goto out_put_mm; ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : IOMMU_PAGE_RESP_SUCCESS; out_put_mm: mmap_read_unlock(mm); mmput(mm); return status; } static void iommu_sva_handle_iopf(struct work_struct *work) { struct iopf_fault *iopf; struct iopf_group *group; enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; group = container_of(work, struct iopf_group, work); list_for_each_entry(iopf, &group->faults, list) { /* * For the moment, errors are sticky: don't handle subsequent * faults in the group if there is an error. */ if (status != IOMMU_PAGE_RESP_SUCCESS) break; status = iommu_sva_handle_mm(&iopf->fault, group->attach_handle->domain->mm); } iopf_group_response(group, status); iopf_free_group(group); } static int iommu_sva_iopf_handler(struct iopf_group *group) { struct iommu_fault_param *fault_param = group->fault_param; INIT_WORK(&group->work, iommu_sva_handle_iopf); if (!queue_work(fault_param->queue->wq, &group->work)) return -EBUSY; return 0; } static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) { const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; if (ops->domain_alloc_sva) { domain = ops->domain_alloc_sva(dev, mm); if (IS_ERR(domain)) return domain; } else { domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); if (!domain) return ERR_PTR(-ENOMEM); } domain->type = IOMMU_DOMAIN_SVA; mmgrab(mm); domain->mm = mm; domain->owner = ops; domain->iopf_handler = iommu_sva_iopf_handler; return domain; } |
| 43 10696 4 14 391 617 10228 12 597 9836 5653 6 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERCPU_COUNTER_H #define _LINUX_PERCPU_COUNTER_H /* * A simple "approximate counter" for use in ext2 and ext3 superblocks. * * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. */ #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/list.h> #include <linux/threads.h> #include <linux/percpu.h> #include <linux/types.h> /* percpu_counter batch for local add or sub */ #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX #ifdef CONFIG_SMP struct percpu_counter { raw_spinlock_t lock; s64 count; #ifdef CONFIG_HOTPLUG_CPU struct list_head list; /* All percpu_counters are on a list */ #endif s32 __percpu *counters; }; extern int percpu_counter_batch; int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, gfp_t gfp, u32 nr_counters, struct lock_class_key *key); #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \ ({ \ static struct lock_class_key __key; \ \ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\ &__key); \ }) #define percpu_counter_init(fbc, value, gfp) \ percpu_counter_init_many(fbc, value, gfp, 1) void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters); static inline void percpu_counter_destroy(struct percpu_counter *fbc) { percpu_counter_destroy_many(fbc, 1); } void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount, s32 batch); void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); } static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } static inline bool percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) { return __percpu_counter_limited_add(fbc, limit, amount, percpu_counter_batch); } /* * With percpu_counter_add_local() and percpu_counter_sub_local(), counts * are accumulated in local per cpu counter and not in fbc->count until * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter * write efficient. * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be * used to add up the counts from each CPU to account for all the local * counts. So percpu_counter_add_local() and percpu_counter_sub_local() * should be used when a counter is updated frequently and read rarely. */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) { percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH); } static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { s64 ret = __percpu_counter_sum(fbc); return ret < 0 ? 0 : ret; } static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { return __percpu_counter_sum(fbc); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) { return fbc->count; } /* * It is possible for the percpu_counter_read() to return a small negative * number for some counter which should never be negative. * */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { /* Prevent reloads of fbc->count */ s64 ret = READ_ONCE(fbc->count); if (ret >= 0) return ret; return 0; } static inline bool percpu_counter_initialized(struct percpu_counter *fbc) { return (fbc->counters != NULL); } #else /* !CONFIG_SMP */ struct percpu_counter { s64 count; }; static inline int percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, gfp_t gfp, u32 nr_counters) { u32 i; for (i = 0; i < nr_counters; i++) fbc[i].count = amount; return 0; } static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp) { return percpu_counter_init_many(fbc, amount, gfp, 1); } static inline void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters) { } static inline void percpu_counter_destroy(struct percpu_counter *fbc) { } static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { fbc->count = amount; } static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { if (fbc->count > rhs) return 1; else if (fbc->count < rhs) return -1; else return 0; } static inline int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) { return percpu_counter_compare(fbc, rhs); } static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { unsigned long flags; local_irq_save(flags); fbc->count += amount; local_irq_restore(flags); } static inline bool percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) { unsigned long flags; bool good = false; s64 count; if (amount == 0) return true; local_irq_save(flags); count = fbc->count + amount; if ((amount > 0 && count <= limit) || (amount < 0 && count >= limit)) { fbc->count = count; good = true; } local_irq_restore(flags); return good; } /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) { percpu_counter_add(fbc, amount); } static inline void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { percpu_counter_add(fbc, amount); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) { return fbc->count; } /* * percpu_counter is intended to track positive numbers. In the UP case the * number should never be negative. */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { return fbc->count; } static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { return percpu_counter_read_positive(fbc); } static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { return percpu_counter_read(fbc); } static inline bool percpu_counter_initialized(struct percpu_counter *fbc) { return true; } static inline void percpu_counter_sync(struct percpu_counter *fbc) { } #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) { percpu_counter_add(fbc, 1); } static inline void percpu_counter_dec(struct percpu_counter *fbc) { percpu_counter_add(fbc, -1); } static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) { percpu_counter_add(fbc, -amount); } static inline void percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount) { percpu_counter_add_local(fbc, -amount); } #endif /* _LINUX_PERCPU_COUNTER_H */ |
| 11 25 2 37 37 25 7 3 9 1 25 19 23 24 1 3 5 1 1 1 1 34 66 70 63 54 48 76 85 85 78 35 64 3 11 11 11 11 11 1 1 1 1 28 53 55 17 61 60 17 14 13 12 17 169 256 243 256 33 21 1 3 1 40 40 40 40 40 40 40 2 11 11 11 11 11 14 20 20 20 20 20 8 11 11 11 11 11 1 3 8 8 8 8 2 2 2 7 2 7 2 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 | /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __HCI_CORE_H #define __HCI_CORE_H #include <linux/idr.h> #include <linux/leds.h> #include <linux/rculist.h> #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sync.h> #include <net/bluetooth/hci_sock.h> #include <net/bluetooth/coredump.h> /* HCI priority */ #define HCI_PRIO_MAX 7 /* HCI maximum id value */ #define HCI_MAX_ID 10000 /* HCI Core structures */ struct inquiry_data { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_period_mode; __u8 pscan_mode; __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; __u8 ssp_mode; }; struct inquiry_entry { struct list_head all; /* inq_cache.all */ struct list_head list; /* unknown or resolve */ enum { NAME_NOT_KNOWN, NAME_NEEDED, NAME_PENDING, NAME_KNOWN, } name_state; __u32 timestamp; struct inquiry_data data; }; struct discovery_state { int type; enum { DISCOVERY_STOPPED, DISCOVERY_STARTING, DISCOVERY_FINDING, DISCOVERY_RESOLVING, DISCOVERY_STOPPING, } state; struct list_head all; /* All devices found during inquiry */ struct list_head unknown; /* Name state not known */ struct list_head resolve; /* Name needs to be resolved */ __u32 timestamp; bdaddr_t last_adv_addr; u8 last_adv_addr_type; s8 last_adv_rssi; u32 last_adv_flags; u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH]; u8 last_adv_data_len; bool report_invalid_rssi; bool result_filtering; bool limited; s8 rssi; u16 uuid_count; u8 (*uuids)[16]; unsigned long name_resolve_timeout; }; #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ enum suspend_tasks { SUSPEND_PAUSE_DISCOVERY, SUSPEND_UNPAUSE_DISCOVERY, SUSPEND_PAUSE_ADVERTISING, SUSPEND_UNPAUSE_ADVERTISING, SUSPEND_SCAN_DISABLE, SUSPEND_SCAN_ENABLE, SUSPEND_DISCONNECTING, SUSPEND_POWERING_DOWN, SUSPEND_PREPARE_NOTIFIER, SUSPEND_SET_ADV_FILTER, __SUSPEND_NUM_TASKS }; enum suspended_state { BT_RUNNING = 0, BT_SUSPEND_DISCONNECT, BT_SUSPEND_CONFIGURE_WAKE, }; struct hci_conn_hash { struct list_head list; unsigned int acl_num; unsigned int sco_num; unsigned int iso_num; unsigned int le_num; unsigned int le_num_peripheral; }; struct bdaddr_list { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; }; struct codec_list { struct list_head list; u8 id; __u16 cid; __u16 vid; u8 transport; u8 num_caps; u32 len; struct hci_codec_caps caps[]; }; struct bdaddr_list_with_irk { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; u8 peer_irk[16]; u8 local_irk[16]; }; /* Bitmask of connection flags */ enum hci_conn_flags { HCI_CONN_FLAG_REMOTE_WAKEUP = 1, HCI_CONN_FLAG_DEVICE_PRIVACY = 2, }; typedef u8 hci_conn_flags_t; struct bdaddr_list_with_flags { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; hci_conn_flags_t flags; }; struct bt_uuid { struct list_head list; u8 uuid[16]; u8 size; u8 svc_hint; }; struct blocked_key { struct list_head list; struct rcu_head rcu; u8 type; u8 val[16]; }; struct smp_csrk { bdaddr_t bdaddr; u8 bdaddr_type; u8 type; u8 val[16]; }; struct smp_ltk { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; u8 authenticated; u8 type; u8 enc_size; __le16 ediv; __le64 rand; u8 val[16]; }; struct smp_irk { struct list_head list; struct rcu_head rcu; bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; u8 val[16]; }; struct link_key { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; u8 type; u8 val[HCI_LINK_KEY_SIZE]; u8 pin_len; }; struct oob_data { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; u8 present; u8 hash192[16]; u8 rand192[16]; u8 hash256[16]; u8 rand256[16]; }; struct adv_info { struct list_head list; bool enabled; bool pending; bool periodic; __u8 mesh; __u8 instance; __u8 handle; __u32 flags; __u16 timeout; __u16 remaining_time; __u16 duration; __u16 adv_data_len; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; bool adv_data_changed; __u16 scan_rsp_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; bool scan_rsp_changed; __u16 per_adv_data_len; __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; __s8 tx_power; __u32 min_interval; __u32 max_interval; bdaddr_t random_addr; bool rpa_expired; struct delayed_work rpa_expired_cb; }; #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F #define DATA_CMP(_d1, _l1, _d2, _l2) \ (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2) #define ADV_DATA_CMP(_adv, _data, _len) \ DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len) #define SCAN_RSP_CMP(_adv, _data, _len) \ DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len) struct monitored_device { struct list_head list; bdaddr_t bdaddr; __u8 addr_type; __u16 handle; bool notified; }; struct adv_pattern { struct list_head list; __u8 ad_type; __u8 offset; __u8 length; __u8 value[HCI_MAX_EXT_AD_LENGTH]; }; struct adv_rssi_thresholds { __s8 low_threshold; __s8 high_threshold; __u16 low_threshold_timeout; __u16 high_threshold_timeout; __u8 sampling_period; }; struct adv_monitor { struct list_head patterns; struct adv_rssi_thresholds rssi; __u16 handle; enum { ADV_MONITOR_STATE_NOT_REGISTERED, ADV_MONITOR_STATE_REGISTERED, ADV_MONITOR_STATE_OFFLOADED } state; }; #define HCI_MIN_ADV_MONITOR_HANDLE 1 #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 #define HCI_ADV_MONITOR_EXT_NONE 1 #define HCI_ADV_MONITOR_EXT_MSFT 2 #define HCI_MAX_SHORT_NAME_LENGTH 10 #define HCI_CONN_HANDLE_MAX 0x0eff #define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX) /* Min encryption key size to match with SMP */ #define HCI_MIN_ENC_KEY_SIZE 7 /* Default LE RPA expiry time, 15 minutes */ #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) /* Default min/max age of connection information (1s/3s) */ #define DEFAULT_CONN_INFO_MIN_AGE 1000 #define DEFAULT_CONN_INFO_MAX_AGE 3000 /* Default authenticated payload timeout 30s */ #define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8 #define HCI_MAX_PAGES 3 struct hci_dev { struct list_head list; struct mutex lock; struct ida unset_handle_ida; const char *name; unsigned long flags; __u16 id; __u8 bus; bdaddr_t bdaddr; bdaddr_t setup_addr; bdaddr_t public_addr; bdaddr_t random_addr; bdaddr_t static_addr; __u8 adv_addr_type; __u8 dev_name[HCI_MAX_NAME_LENGTH]; __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; __u8 eir[HCI_MAX_EIR_LENGTH]; __u16 appearance; __u8 dev_class[3]; __u8 major_class; __u8 minor_class; __u8 max_page; __u8 features[HCI_MAX_PAGES][8]; __u8 le_features[8]; __u8 le_accept_list_size; __u8 le_resolv_list_size; __u8 le_num_of_adv_sets; __u8 le_states[8]; __u8 mesh_ad_types[16]; __u8 mesh_send_ref; __u8 commands[64]; __u8 hci_ver; __u16 hci_rev; __u8 lmp_ver; __u16 manufacturer; __u16 lmp_subver; __u16 voice_setting; __u8 num_iac; __u16 stored_max_keys; __u16 stored_num_keys; __u8 io_capability; __s8 inq_tx_power; __u8 err_data_reporting; __u16 page_scan_interval; __u16 page_scan_window; __u8 page_scan_type; __u8 le_adv_channel_map; __u16 le_adv_min_interval; __u16 le_adv_max_interval; __u8 le_scan_type; __u16 le_scan_interval; __u16 le_scan_window; __u16 le_scan_int_suspend; __u16 le_scan_window_suspend; __u16 le_scan_int_discovery; __u16 le_scan_window_discovery; __u16 le_scan_int_adv_monitor; __u16 le_scan_window_adv_monitor; __u16 le_scan_int_connect; __u16 le_scan_window_connect; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_latency; __u16 le_supv_timeout; __u16 le_def_tx_len; __u16 le_def_tx_time; __u16 le_max_tx_len; __u16 le_max_tx_time; __u16 le_max_rx_len; __u16 le_max_rx_time; __u8 le_max_key_size; __u8 le_min_key_size; __u16 discov_interleaved_timeout; __u16 conn_info_min_age; __u16 conn_info_max_age; __u16 auth_payload_timeout; __u8 min_enc_key_size; __u8 max_enc_key_size; __u8 pairing_opts; __u8 ssp_debug_mode; __u8 hw_error_code; __u32 clock; __u16 advmon_allowlist_duration; __u16 advmon_no_filter_duration; __u8 enable_advmon_interleave_scan; __u16 devid_source; __u16 devid_vendor; __u16 devid_product; __u16 devid_version; __u8 def_page_scan_type; __u16 def_page_scan_int; __u16 def_page_scan_window; __u8 def_inq_scan_type; __u16 def_inq_scan_int; __u16 def_inq_scan_window; __u16 def_br_lsto; __u16 def_page_timeout; __u16 def_multi_adv_rotation_duration; __u16 def_le_autoconnect_timeout; __s8 min_le_tx_power; __s8 max_le_tx_power; __u16 pkt_type; __u16 esco_type; __u16 link_policy; __u16 link_mode; __u32 idle_timeout; __u16 sniff_min_interval; __u16 sniff_max_interval; unsigned int auto_accept_delay; unsigned long quirks; atomic_t cmd_cnt; unsigned int acl_cnt; unsigned int sco_cnt; unsigned int le_cnt; unsigned int iso_cnt; unsigned int acl_mtu; unsigned int sco_mtu; unsigned int le_mtu; unsigned int iso_mtu; unsigned int acl_pkts; unsigned int sco_pkts; unsigned int le_pkts; unsigned int iso_pkts; unsigned long acl_last_tx; unsigned long le_last_tx; __u8 le_tx_def_phys; __u8 le_rx_def_phys; struct workqueue_struct *workqueue; struct workqueue_struct *req_workqueue; struct work_struct power_on; struct delayed_work power_off; struct work_struct error_reset; struct work_struct cmd_sync_work; struct list_head cmd_sync_work_list; struct mutex cmd_sync_work_lock; struct mutex unregister_lock; struct work_struct cmd_sync_cancel_work; struct work_struct reenable_adv_work; __u16 discov_timeout; struct delayed_work discov_off; struct delayed_work service_cache; struct delayed_work cmd_timer; struct delayed_work ncmd_timer; struct work_struct rx_work; struct work_struct cmd_work; struct work_struct tx_work; struct delayed_work le_scan_disable; struct sk_buff_head rx_q; struct sk_buff_head raw_q; struct sk_buff_head cmd_q; struct sk_buff *sent_cmd; struct sk_buff *recv_event; struct mutex req_lock; wait_queue_head_t req_wait_q; __u32 req_status; __u32 req_result; struct sk_buff *req_skb; struct sk_buff *req_rsp; void *smp_data; void *smp_bredr_data; struct discovery_state discovery; bool discovery_paused; int advertising_old_state; bool advertising_paused; struct notifier_block suspend_notifier; enum suspended_state suspend_state_next; enum suspended_state suspend_state; bool scanning_paused; bool suspended; u8 wake_reason; bdaddr_t wake_addr; u8 wake_addr_type; struct hci_conn_hash conn_hash; struct list_head mesh_pending; struct list_head mgmt_pending; struct list_head reject_list; struct list_head accept_list; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; struct list_head identity_resolving_keys; struct list_head remote_oob_data; struct list_head le_accept_list; struct list_head le_resolv_list; struct list_head le_conn_params; struct list_head pend_le_conns; struct list_head pend_le_reports; struct list_head blocked_keys; struct list_head local_codecs; struct hci_dev_stats stat; atomic_t promisc; const char *hw_info; const char *fw_info; struct dentry *debugfs; struct hci_devcoredump dump; struct device dev; struct rfkill *rfkill; DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); hci_conn_flags_t conn_flags; __s8 adv_tx_power; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 adv_data_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __u8 scan_rsp_data_len; __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; __u8 per_adv_data_len; struct list_head adv_instances; unsigned int adv_instance_cnt; __u8 cur_adv_instance; __u16 adv_instance_timeout; struct delayed_work adv_instance_expire; struct idr adv_monitors_idr; unsigned int adv_monitors_cnt; __u8 irk[16]; __u32 rpa_timeout; struct delayed_work rpa_expired; bdaddr_t rpa; struct delayed_work mesh_send_done; enum { INTERLEAVE_SCAN_NONE, INTERLEAVE_SCAN_NO_FILTER, INTERLEAVE_SCAN_ALLOWLIST } interleave_scan_state; struct delayed_work interleave_scan; struct list_head monitored_devices; bool advmon_pend_notify; #if IS_ENABLED(CONFIG_BT_LEDS) struct led_trigger *power_led; #endif #if IS_ENABLED(CONFIG_BT_MSFTEXT) __u16 msft_opcode; void *msft_data; bool msft_curve_validity; #endif #if IS_ENABLED(CONFIG_BT_AOSPEXT) bool aosp_capable; bool aosp_quality_report; #endif int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); int (*setup)(struct hci_dev *hdev); int (*shutdown)(struct hci_dev *hdev); int (*send)(struct hci_dev *hdev, struct sk_buff *skb); void (*notify)(struct hci_dev *hdev, unsigned int evt); void (*hw_error)(struct hci_dev *hdev, u8 code); int (*post_init)(struct hci_dev *hdev); int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); void (*cmd_timeout)(struct hci_dev *hdev); void (*reset)(struct hci_dev *hdev); bool (*wakeup)(struct hci_dev *hdev); int (*set_quality_report)(struct hci_dev *hdev, bool enable); int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data); u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb); }; #define HCI_PHY_HANDLE(handle) (handle & 0xff) enum conn_reasons { CONN_REASON_PAIR_DEVICE, CONN_REASON_L2CAP_CHAN, CONN_REASON_SCO_CONNECT, CONN_REASON_ISO_CONNECT, }; struct hci_conn { struct list_head list; atomic_t refcnt; bdaddr_t dst; __u8 dst_type; bdaddr_t src; __u8 src_type; bdaddr_t init_addr; __u8 init_addr_type; bdaddr_t resp_addr; __u8 resp_addr_type; __u8 adv_instance; __u16 handle; __u16 sync_handle; __u8 sid; __u16 state; __u16 mtu; __u8 mode; __u8 type; __u8 role; bool out; __u8 attempt; __u8 dev_class[3]; __u8 features[HCI_MAX_PAGES][8]; __u16 pkt_type; __u16 link_policy; __u8 key_type; __u8 auth_type; __u8 sec_level; __u8 pending_sec_level; __u8 pin_length; __u8 enc_key_size; __u8 io_capability; __u32 passkey_notify; __u8 passkey_entered; __u16 disc_timeout; __u16 conn_timeout; __u16 setting; __u16 auth_payload_timeout; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 le_adv_data_len; __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN]; __u16 le_per_adv_data_len; __u16 le_per_adv_data_offset; __u8 le_adv_phy; __u8 le_adv_sec_phy; __u8 le_tx_phy; __u8 le_rx_phy; __s8 rssi; __s8 tx_power; __s8 max_tx_power; struct bt_iso_qos iso_qos; __u8 num_bis; __u8 bis[HCI_MAX_ISO_BIS]; unsigned long flags; enum conn_reasons conn_reason; __u8 abort_reason; __u32 clock; __u16 clock_accuracy; unsigned long conn_info_timestamp; __u8 remote_cap; __u8 remote_auth; __u8 remote_id; unsigned int sent; struct sk_buff_head data_q; struct list_head chan_list; struct delayed_work disc_work; struct delayed_work auto_accept_work; struct delayed_work idle_work; struct delayed_work le_conn_timeout; struct device dev; struct dentry *debugfs; struct hci_dev *hdev; void *l2cap_data; void *sco_data; void *iso_data; struct list_head link_list; struct hci_conn *parent; struct hci_link *link; struct bt_codec codec; void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); void (*security_cfm_cb) (struct hci_conn *conn, u8 status); void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); void (*cleanup)(struct hci_conn *conn); }; struct hci_link { struct list_head list; struct hci_conn *conn; }; struct hci_chan { struct list_head list; __u16 handle; struct hci_conn *conn; struct sk_buff_head data_q; unsigned int sent; __u8 state; }; struct hci_conn_params { struct list_head list; struct list_head action; bdaddr_t addr; u8 addr_type; u16 conn_min_interval; u16 conn_max_interval; u16 conn_latency; u16 supervision_timeout; enum { HCI_AUTO_CONN_DISABLED, HCI_AUTO_CONN_REPORT, HCI_AUTO_CONN_DIRECT, HCI_AUTO_CONN_ALWAYS, HCI_AUTO_CONN_LINK_LOSS, HCI_AUTO_CONN_EXPLICIT, } auto_connect; struct hci_conn *conn; bool explicit_connect; /* Accessed without hdev->lock: */ hci_conn_flags_t flags; u8 privacy_mode; }; extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) #define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) #define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_volatile_flags(hdev) \ do { \ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ hci_dev_clear_flag(hdev, HCI_LE_ADV); \ hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ } while (0) #define hci_dev_le_state_simultaneous(hdev) \ (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \ (hdev->le_states[4] & 0x08) && /* Central */ \ (hdev->le_states[4] & 0x40) && /* Peripheral */ \ (hdev->le_states[3] & 0x10)) /* Simultaneous */ /* ----- HCI interface to upper protocols ----- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); int l2cap_disconn_ind(struct hci_conn *hcon); void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); #if IS_ENABLED(CONFIG_BT_BREDR) int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); #else static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { return 0; } static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { } #endif #if IS_ENABLED(CONFIG_BT_LE) int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); #else static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { return 0; } static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { } #endif /* ----- Inquiry cache ----- */ #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ static inline void discovery_init(struct hci_dev *hdev) { hdev->discovery.state = DISCOVERY_STOPPED; INIT_LIST_HEAD(&hdev->discovery.all); INIT_LIST_HEAD(&hdev->discovery.unknown); INIT_LIST_HEAD(&hdev->discovery.resolve); hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; } static inline void hci_discovery_filter_clear(struct hci_dev *hdev) { hdev->discovery.result_filtering = false; hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; hdev->discovery.uuid_count = 0; kfree(hdev->discovery.uuids); hdev->discovery.uuids = NULL; } bool hci_discovery_active(struct hci_dev *hdev); void hci_discovery_set_state(struct hci_dev *hdev, int state); static inline int inquiry_cache_empty(struct hci_dev *hdev) { return list_empty(&hdev->discovery.all); } static inline long inquiry_cache_age(struct hci_dev *hdev) { struct discovery_state *c = &hdev->discovery; return jiffies - c->timestamp; } static inline long inquiry_entry_age(struct inquiry_entry *e) { return jiffies - e->timestamp; } struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, bdaddr_t *bdaddr); struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, bdaddr_t *bdaddr, int state); void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, struct inquiry_entry *ie); u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, bool name_known); void hci_inquiry_cache_flush(struct hci_dev *hdev); /* ----- HCI Connections ----- */ enum { HCI_CONN_AUTH_PEND, HCI_CONN_ENCRYPT_PEND, HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, HCI_CONN_SCO_SETUP_PEND, HCI_CONN_MGMT_CONNECTED, HCI_CONN_SSP_ENABLED, HCI_CONN_SC_ENABLED, HCI_CONN_AES_CCM, HCI_CONN_POWER_SAVE, HCI_CONN_FLUSH_KEY, HCI_CONN_ENCRYPT, HCI_CONN_AUTH, HCI_CONN_SECURE, HCI_CONN_FIPS, HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, HCI_CONN_DROP, HCI_CONN_CANCEL, HCI_CONN_PARAM_REMOVAL_PEND, HCI_CONN_NEW_LINK_KEY, HCI_CONN_SCANNING, HCI_CONN_AUTH_FAILURE, HCI_CONN_PER_ADV, HCI_CONN_BIG_CREATED, HCI_CONN_CREATE_CIS, HCI_CONN_CREATE_BIG_SYNC, HCI_CONN_BIG_SYNC, HCI_CONN_BIG_SYNC_FAILED, HCI_CONN_CREATE_PA_SYNC, HCI_CONN_PA_SYNC, HCI_CONN_PA_SYNC_FAILED, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } static inline bool hci_conn_sc_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && test_bit(HCI_CONN_SC_ENABLED, &conn->flags); } static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; list_add_tail_rcu(&c->list, &h->list); switch (c->type) { case ACL_LINK: h->acl_num++; break; case LE_LINK: h->le_num++; if (c->role == HCI_ROLE_SLAVE) h->le_num_peripheral++; break; case SCO_LINK: case ESCO_LINK: h->sco_num++; break; case ISO_LINK: h->iso_num++; break; } } static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; list_del_rcu(&c->list); synchronize_rcu(); switch (c->type) { case ACL_LINK: h->acl_num--; break; case LE_LINK: h->le_num--; if (c->role == HCI_ROLE_SLAVE) h->le_num_peripheral--; break; case SCO_LINK: case ESCO_LINK: h->sco_num--; break; case ISO_LINK: h->iso_num--; break; } } static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; switch (type) { case ACL_LINK: return h->acl_num; case LE_LINK: return h->le_num; case SCO_LINK: case ESCO_LINK: return h->sco_num; case ISO_LINK: return h->iso_num; default: return 0; } } static inline unsigned int hci_conn_count(struct hci_dev *hdev) { struct hci_conn_hash *c = &hdev->conn_hash; return c->acl_num + c->sco_num + c->le_num + c->iso_num; } static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c == conn) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; __u8 type = INVALID_LINK; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->handle == handle) { type = c->type; break; } } rcu_read_unlock(); return type; } static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, bdaddr_t *ba, __u8 bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, ba) || c->type != ISO_LINK) continue; if (c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev, __u8 sid, bdaddr_t *dst, __u8 dst_type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || bacmp(&c->dst, dst) || c->dst_type != dst_type || c->sid != sid) continue; rcu_read_unlock(); return c; } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, bdaddr_t *ba, __u8 big, __u8 bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, ba) || c->type != ISO_LINK || !test_bit(HCI_CONN_PER_ADV, &c->flags)) continue; if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->handle == handle) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, __u8 type, bdaddr_t *ba) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && !bacmp(&c->dst, ba)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, bdaddr_t *ba, __u8 ba_type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != LE_LINK) continue; if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, bdaddr_t *ba, __u8 ba_type, __u8 cig, __u8 id) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; /* Match CIG ID if set */ if (cig != c->iso_qos.ucast.cig) continue; /* Match CIS ID if set */ if (id != c->iso_qos.ucast.cis) continue; /* Match destination address if set */ if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, __u8 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; if (handle == c->iso_qos.ucast.cig) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, __u8 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK) continue; /* An ISO_LINK hcon with BDADDR_ANY as destination * address is a Broadcast connection. A Broadcast * slave connection is associated with a PA train, * so the sync_handle can be used to differentiate * from unicast. */ if (bacmp(&c->dst, BDADDR_ANY) && c->sync_handle == HCI_SYNC_HANDLE_INVALID) continue; if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev, __u8 handle, __u8 num_bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK) continue; if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK || c->state != state) continue; if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !test_bit(HCI_CONN_PA_SYNC, &c->flags)) continue; if (c->iso_qos.bcast.big == big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK) continue; /* Ignore the listen hcon, we are looking * for the child hcon that was created as * a result of the PA sync established event. */ if (c->state == BT_LISTEN) continue; if (c->sync_handle == sync_handle) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->state == state) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data); static inline void hci_conn_hash_list_state(struct hci_dev *hdev, hci_conn_func_t func, __u8 type, __u16 state, void *data) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; if (!func) return; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->state == state) func(c, data); } rcu_read_unlock(); } static inline void hci_conn_hash_list_flag(struct hci_dev *hdev, hci_conn_func_t func, __u8 type, __u8 flag, void *data) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; if (!func) return; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && test_bit(flag, &c->flags)) func(c, data); } rcu_read_unlock(); } static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && !test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } /* Returns true if an le connection is in the scanning state */ static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); bool hci_iso_setup_path(struct hci_conn *conn); int hci_le_create_cis_pending(struct hci_dev *hdev); int hci_pa_create_sync_pending(struct hci_dev *hdev); int hci_le_big_create_sync_pending(struct hci_dev *hdev); int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role, u16 handle); struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role); void hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); struct hci_chan *hci_chan_create(struct hci_conn *conn); void hci_chan_del(struct hci_chan *chan); void hci_chan_list_flush(struct hci_conn *conn); struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, enum conn_reasons conn_reason); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, bool dst_resolved, u8 sec_level, u16 conn_timeout, u8 role, u8 phy, u8 sec_phy); void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type, enum conn_reasons conn_reason, u16 timeout); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting, struct bt_codec *codec, u16 timeout); struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, struct bt_iso_qos *qos, __u8 base_len, __u8 *base); struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos, __u8 data_len, __u8 *data); struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, bool initiator); int hci_conn_switch_role(struct hci_conn *conn, __u8 role); void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); void hci_conn_failed(struct hci_conn *conn, u8 status); u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle); /* * hci_conn_get() and hci_conn_put() are used to control the life-time of an * "hci_conn" object. They do not guarantee that the hci_conn object is running, * working or anything else. They just guarantee that the object is available * and can be dereferenced. So you can use its locks, local variables and any * other constant data. * Before accessing runtime data, you _must_ lock the object and then check that * it is still running. As soon as you release the locks, the connection might * get dropped, though. * * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control * how long the underlying connection is held. So every channel that runs on the * hci_conn object calls this to prevent the connection from disappearing. As * long as you hold a device, you must also guarantee that you have a valid * reference to the device via hci_conn_get() (or the initial reference from * hci_conn_add()). * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't * break because nobody cares for that. But this means, we cannot use * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). */ static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) { get_device(&conn->dev); return conn; } static inline void hci_conn_put(struct hci_conn *conn) { put_device(&conn->dev); } static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn) { BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); atomic_inc(&conn->refcnt); cancel_delayed_work(&conn->disc_work); return conn; } static inline void hci_conn_drop(struct hci_conn *conn) { BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); if (atomic_dec_and_test(&conn->refcnt)) { unsigned long timeo; switch (conn->type) { case ACL_LINK: case LE_LINK: cancel_delayed_work(&conn->idle_work); if (conn->state == BT_CONNECTED) { timeo = conn->disc_timeout; if (!conn->out) timeo *= 2; } else { timeo = 0; } break; default: timeo = 0; break; } cancel_delayed_work(&conn->disc_work); queue_delayed_work(conn->hdev->workqueue, &conn->disc_work, timeo); } } /* ----- HCI Devices ----- */ static inline void hci_dev_put(struct hci_dev *d) { BT_DBG("%s orig refcnt %d", d->name, kref_read(&d->dev.kobj.kref)); put_device(&d->dev); } static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) { BT_DBG("%s orig refcnt %d", d->name, kref_read(&d->dev.kobj.kref)); get_device(&d->dev); return d; } #define hci_dev_lock(d) mutex_lock(&d->lock) #define hci_dev_unlock(d) mutex_unlock(&d->lock) #define to_hci_dev(d) container_of(d, struct hci_dev, dev) #define to_hci_conn(c) container_of(c, struct hci_conn, dev) static inline void *hci_get_drvdata(struct hci_dev *hdev) { return dev_get_drvdata(&hdev->dev); } static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) { dev_set_drvdata(&hdev->dev, data); } static inline void *hci_get_priv(struct hci_dev *hdev) { return (char *)hdev + sizeof(*hdev); } struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); struct hci_dev *hci_alloc_dev_priv(int sizeof_priv); static inline struct hci_dev *hci_alloc_dev(void) { return hci_alloc_dev_priv(0); } void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); void hci_release_dev(struct hci_dev *hdev); int hci_register_suspend_notifier(struct hci_dev *hdev); int hci_unregister_suspend_notifier(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) { #if IS_ENABLED(CONFIG_BT_MSFTEXT) hdev->msft_opcode = opcode; #endif } static inline void hci_set_aosp_capable(struct hci_dev *hdev) { #if IS_ENABLED(CONFIG_BT_AOSPEXT) hdev->aosp_capable = true; #endif } static inline void hci_devcd_setup(struct hci_dev *hdev) { #ifdef CONFIG_DEV_COREDUMP INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx); INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout); skb_queue_head_init(&hdev->dump.dump_q); #endif } int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_do_close(struct hci_dev *hdev); int hci_dev_reset(__u16 dev); int hci_dev_reset_stat(__u16 dev); int hci_dev_cmd(unsigned int cmd, void __user *arg); int hci_get_dev_list(void __user *arg); int hci_get_dev_info(void __user *arg); int hci_get_conn_list(void __user *arg); int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); int hci_inquiry(void __user *arg); struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, bdaddr_t *bdaddr, u8 type); struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *list, bdaddr_t *bdaddr, u8 type); struct bdaddr_list_with_flags * hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type, u8 *peer_irk, u8 *local_irk); int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type, u32 flags); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); void hci_conn_params_clear_disabled(struct hci_dev *hdev); void hci_conn_params_free(struct hci_conn_params *param); void hci_pend_le_list_del_init(struct hci_conn_params *param); void hci_pend_le_list_add(struct hci_conn_params *param, struct list_head *list); struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type); void hci_uuids_clear(struct hci_dev *hdev); void hci_link_keys_clear(struct hci_dev *hdev); struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent); struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 role); int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_smp_ltks_clear(struct hci_dev *hdev); int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa); struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 val[16], bdaddr_t *rpa); void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]); void hci_blocked_keys_clear(struct hci_dev *hdev); void hci_smp_irks_clear(struct hci_dev *hdev); bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 *hash192, u8 *rand192, u8 *hash256, u8 *rand256); int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_adv_instances_clear(struct hci_dev *hdev); struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance); struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, u16 timeout, u16 duration, s8 tx_power, u32 min_interval, u32 max_interval, u8 mesh_handle); struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u32 flags, u8 data_len, u8 *data, u32 min_interval, u32 max_interval); int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle); int hci_remove_all_adv_monitor(struct hci_dev *hdev); bool hci_is_adv_monitoring(struct hci_dev *hdev); int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_init_sysfs(struct hci_dev *hdev); void hci_conn_init_sysfs(struct hci_conn *conn); void hci_conn_add_sysfs(struct hci_conn *conn); void hci_conn_del_sysfs(struct hci_conn *conn); #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) #define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent) /* ----- LMP capabilities ----- */ #define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT) #define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH) #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) #define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) #define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH) #define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) #define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) #define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) /* ----- Extended LMP capabilities ----- */ #define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) #define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) #define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING) /* ----- Host capabilities ----- */ #define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) #define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC) #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) #define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ !hci_dev_test_flag(dev, HCI_AUTO_OFF)) #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ hci_dev_test_flag(dev, HCI_SC_ENABLED)) #define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ !adv->rpa_expired) #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) #define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M)) #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) #define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \ !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \ &(dev)->quirks)) #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) /* Use LL Privacy based address resolution if supported */ #define use_ll_privacy(dev) (ll_privacy_capable(dev) && \ hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY)) #define privacy_mode_capable(dev) (use_ll_privacy(dev) && \ (hdev->commands[39] & 0x04)) #define read_key_size_capable(dev) \ ((dev)->commands[20] & 0x10 && \ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) /* Use enhanced synchronous connection if command is supported and its quirk * has not been set. */ #define enhanced_sync_conn_capable(dev) \ (((dev)->commands[29] & 0x08) && \ !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks)) /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40) && \ !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks)) /* Use ext create connection if command is supported */ #define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks)) /* Extended advertising support */ #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) /* Maximum advertising length */ #define max_adv_len(dev) \ (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH) /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789: * * C24: Mandatory if the LE Controller supports Connection State and either * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported */ #define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \ ext_adv_capable(dev)) && \ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \ &(dev)->quirks)) /* Periodic advertising support */ #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV)) /* CIS Master/Slave and BIS support */ #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev)) #define cis_capable(dev) \ (cis_central_capable(dev) || cis_peripheral_capable(dev)) #define cis_central_capable(dev) \ ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL) #define cis_peripheral_capable(dev) \ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) #define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \ (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks))) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type, __u8 *flags) { switch (type) { case ACL_LINK: return l2cap_connect_ind(hdev, bdaddr); case SCO_LINK: case ESCO_LINK: return sco_connect_ind(hdev, bdaddr, flags); case ISO_LINK: return iso_connect_ind(hdev, bdaddr, flags); default: BT_ERR("unknown link type %d", type); return -EINVAL; } } static inline int hci_proto_disconn_ind(struct hci_conn *conn) { if (conn->type != ACL_LINK && conn->type != LE_LINK) return HCI_ERROR_REMOTE_USER_TERM; return l2cap_disconn_ind(conn); } /* ----- HCI callbacks ----- */ struct hci_cb { struct list_head list; char *name; bool (*match) (struct hci_conn *conn); void (*connect_cfm) (struct hci_conn *conn, __u8 status); void (*disconn_cfm) (struct hci_conn *conn, __u8 status); void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list) { struct hci_cb *cb, *cpy; rcu_read_lock(); list_for_each_entry_rcu(cb, &hci_cb_list, list) { if (cb->match && cb->match(conn)) { cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC); if (!cpy) break; *cpy = *cb; INIT_LIST_HEAD(&cpy->list); list_add_rcu(&cpy->list, list); } } rcu_read_unlock(); } static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->connect_cfm) cb->connect_cfm(conn, status); kfree(cb); } if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); } static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->disconn_cfm) cb->disconn_cfm(conn, reason); kfree(cb); } if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); } static inline void hci_security_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); kfree(cb); } if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); } static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { __u8 encrypt; if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) return; encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; hci_security_cfm(conn, status, encrypt); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { __u8 encrypt; if (conn->state == BT_CONFIG) { if (!status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); hci_conn_drop(conn); return; } if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) encrypt = 0x00; else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) encrypt = 0x02; else encrypt = 0x01; if (!status) { if (conn->sec_level == BT_SECURITY_SDP) conn->sec_level = BT_SECURITY_LOW; if (conn->pending_sec_level > conn->sec_level) conn->sec_level = conn->pending_sec_level; } hci_security_cfm(conn, status, encrypt); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); kfree(cb); } } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) { struct list_head list; struct hci_cb *cb, *tmp; INIT_LIST_HEAD(&list); hci_cb_lookup(conn, &list); list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); kfree(cb); } } static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) { if (addr_type != ADDR_LE_DEV_RANDOM) return false; if ((bdaddr->b[5] & 0xc0) == 0x40) return true; return false; } static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type) { if (addr_type == ADDR_LE_DEV_PUBLIC) return true; /* Check for Random Static address type */ if ((addr->b[5] & 0xc0) == 0xc0) return true; return false; } static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { if (!hci_bdaddr_is_rpa(bdaddr, addr_type)) return NULL; return hci_find_irk_by_rpa(hdev, bdaddr); } static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, u16 to_multiplier) { u16 max_latency; if (min > max) { BT_WARN("min %d > max %d", min, max); return -EINVAL; } if (min < 6) { BT_WARN("min %d < 6", min); return -EINVAL; } if (max > 3200) { BT_WARN("max %d > 3200", max); return -EINVAL; } if (to_multiplier < 10) { BT_WARN("to_multiplier %d < 10", to_multiplier); return -EINVAL; } if (to_multiplier > 3200) { BT_WARN("to_multiplier %d > 3200", to_multiplier); return -EINVAL; } if (max >= to_multiplier * 8) { BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier); return -EINVAL; } max_latency = (to_multiplier * 4 / max) - 1; if (latency > 499) { BT_WARN("latency %d > 499", latency); return -EINVAL; } if (latency > max_latency) { BT_WARN("latency %d > max_latency %d", latency, max_latency); return -EINVAL; } return 0; } int hci_register_cb(struct hci_cb *hcb); int hci_unregister_cb(struct hci_cb *hcb); int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param); int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param); void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); void *hci_recv_event_data(struct hci_dev *hdev, __u8 event); u32 hci_conn_get_phy(struct hci_conn *conn); /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk); void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, void *data, u16 data_len, ktime_t tstamp, int flag, struct sock *skip_sk); void hci_sock_dev_event(struct hci_dev *hdev, int event); #define HCI_MGMT_VAR_LEN BIT(0) #define HCI_MGMT_NO_HDEV BIT(1) #define HCI_MGMT_UNTRUSTED BIT(2) #define HCI_MGMT_UNCONFIGURED BIT(3) #define HCI_MGMT_HDEV_OPTIONAL BIT(4) struct hci_mgmt_handler { int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len); size_t data_len; unsigned long flags; }; struct hci_mgmt_chan { struct list_head list; unsigned short channel; size_t handler_count; const struct hci_mgmt_handler *handlers; void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); }; int hci_mgmt_chan_register(struct hci_mgmt_chan *c); void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); /* Management interface */ #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ BIT(BDADDR_LE_RANDOM)) #define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \ BIT(BDADDR_LE_PUBLIC) | \ BIT(BDADDR_LE_RANDOM)) /* These LE scan and inquiry parameters were chosen according to LE General * Discovery Procedure specification. */ #define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */ #define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */ #define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */ #define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */ #define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */ #define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */ #define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */ #define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */ #define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */ #define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */ #define DISCOV_LE_TIMEOUT 10240 /* msec */ #define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 #define DISCOV_BREDR_INQUIRY_LEN 0x08 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ #define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */ #define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */ #define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */ #define INTERVAL_TO_MS(x) (((x) * 10) / 0x10) #define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */ void mgmt_fill_version_info(void *ver); int mgmt_new_settings(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_removed(struct hci_dev *hdev); void mgmt_set_powered_failed(struct hci_dev *hdev, int err); void mgmt_power_on(struct hci_dev *hdev, int err); void __mgmt_power_off(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status); void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 value, u8 confirm_hint); int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type); int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered); void mgmt_auth_failed(struct hci_conn *conn, u8 status); void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u64 instant); void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); void mgmt_discovering(struct hci_dev *hdev, u8 discovering); void mgmt_suspending(struct hci_dev *hdev, u8 state); void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, u8 addr_type); bool mgmt_powering_down(struct hci_dev *hdev); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent); void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bool persistent); void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 store_hint, u16 min_interval, u16 max_interval, u16 latency, u16 timeout); void mgmt_smp_complete(struct hci_conn *conn, bool complete); bool mgmt_get_connectable(struct hci_dev *hdev); u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, bdaddr_t *bdaddr, u8 addr_type); int hci_abort_conn(struct hci_conn *conn, u8 reason); u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, __u8 ltk[16], __u8 key_size); void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *bdaddr_type); #define SCO_AIRMODE_MASK 0x0003 #define SCO_AIRMODE_CVSD 0x0000 #define SCO_AIRMODE_TRANSP 0x0003 #define LOCAL_CODEC_ACL_MASK BIT(0) #define LOCAL_CODEC_SCO_MASK BIT(1) #define TRANSPORT_TYPE_MAX 0x04 #endif /* __HCI_CORE_H */ |
| 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/alloc.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * HPFS bitmap operations */ #include "hpfs_fn.h" static void hpfs_claim_alloc(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free != (unsigned)-1) { if (unlikely(!sbi->sb_n_free)) { hpfs_error(s, "free count underflow, allocating sector %08x", sec); sbi->sb_n_free = -1; return; } sbi->sb_n_free--; } } static void hpfs_claim_free(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free != (unsigned)-1) { if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) { hpfs_error(s, "free count overflow, freeing sector %08x", sec); sbi->sb_n_free = -1; return; } sbi->sb_n_free++; } } static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes != (unsigned)-1) { if (unlikely(!sbi->sb_n_free_dnodes)) { hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec); sbi->sb_n_free_dnodes = -1; return; } sbi->sb_n_free_dnodes--; } } static void hpfs_claim_dirband_free(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes != (unsigned)-1) { if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) { hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec); sbi->sb_n_free_dnodes = -1; return; } sbi->sb_n_free_dnodes++; } } /* * Check if a sector is allocated in bitmap * This is really slow. Turned on only if chk==2 */ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) { struct quad_buffer_head qbh; __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); goto fail1; } hpfs_brelse4(&qbh); if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); goto fail1; } hpfs_brelse4(&qbh); } return 0; fail1: hpfs_brelse4(&qbh); fail: return 1; } /* * Check if sector(s) have proper number and additionally check if they're * allocated in bitmap. */ int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg) { if (start + len < start || start < 0x12 || start + len > hpfs_sb(s)->sb_fs_size) { hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start); return 1; } if (hpfs_sb(s)->sb_chk>=2) { int i; for (i = 0; i < len; i++) if (chk_if_allocated(s, start + i, msg)) return 1; } return 0; } static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward) { struct quad_buffer_head qbh; __le32 *bmp; unsigned bs = near & ~0x3fff; unsigned nr = (near & 0x3fff) & ~(n - 1); /*unsigned mnr;*/ unsigned i, q; int a, b; secno ret = 0; if (n != 1 && n != 4) { hpfs_error(s, "Bad allocation size: %d", n); return 0; } if (bs != ~0x3fff) { if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; } else { if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls; } if (!tstbits(bmp, nr, n + forward)) { ret = bs + nr; goto rt; } q = nr + n; b = 0; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; if (n != 1) q = ((q-1)&~(n-1))+n; if (!b) { if (q>>5 != nr>>5) { b = 1; q = nr & 0x1f; } } else if (q > nr) break; } if (!a) { ret = bs + q; goto rt; } nr >>= 5; /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */ i = nr; do { if (!le32_to_cpu(bmp[i])) goto cont; if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; q = i<<5; if (i > 0) { unsigned k = le32_to_cpu(bmp[i-1]); while (k & 0x80000000) { q--; k <<= 1; } } if (n != 1) q = ((q-1)&~(n-1))+n; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; if (n != 1) q = ((q-1)&~(n-1))+n; if (q>>5 > i) break; } if (!a) { ret = bs + q; goto rt; } cont: i++, i &= 0x1ff; } while (i != nr); rt: if (ret) { if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); ret = 0; goto b; } bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); } b: hpfs_brelse4(&qbh); uls: return ret; } /* * Allocation strategy: 1) search place near the sector specified * 2) search bitmap where free sectors last found * 3) search all bitmaps * 4) search all bitmaps ignoring number of pre-allocated * sectors */ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) { secno sec; int i; unsigned n_bmps; struct hpfs_sb_info *sbi = hpfs_sb(s); int f_p = 0; int near_bmp; if (forward < 0) { forward = -forward; f_p = 1; } n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; if (near && near < sbi->sb_fs_size) { if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; near_bmp = near >> 14; } else near_bmp = n_bmps / 2; /* if (b != -1) { if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) { b &= 0x0fffffff; goto ret; } if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret; */ if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc; less_fwd: for (i = 0; i < n_bmps; i++) { if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp+i; goto ret; } if (!forward) { if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp-i-1; goto ret; } } else { if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp+i-n_bmps; goto ret; } } if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) { goto ret; } } if (!f_p) { if (forward) { sbi->sb_max_fwd_alloc = forward * 3 / 4; forward /= 2; goto less_fwd; } } sec = 0; ret: if (sec) { i = 0; do hpfs_claim_alloc(s, sec + i); while (unlikely(++i < n)); } if (sec && f_p) { for (i = 0; i < forward; i++) { if (!hpfs_alloc_if_possible(s, sec + n + i)) { hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); sec = 0; break; } } } return sec; } static secno alloc_in_dirband(struct super_block *s, secno near) { unsigned nr = near; secno sec; struct hpfs_sb_info *sbi = hpfs_sb(s); if (nr < sbi->sb_dirband_start) nr = sbi->sb_dirband_start; if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size) nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; nr -= sbi->sb_dirband_start; nr >>= 2; sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); if (!sec) return 0; hpfs_claim_dirband_alloc(s, sec); return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; } /* Alloc sector if it's free */ int hpfs_alloc_if_possible(struct super_block *s, secno sec) { struct quad_buffer_head qbh; __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_claim_alloc(s, sec); return 1; } hpfs_brelse4(&qbh); end: return 0; } /* Free sectors in bitmaps */ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) { struct quad_buffer_head qbh; __le32 *bmp; struct hpfs_sb_info *sbi = hpfs_sb(s); /*pr_info("2 - ");*/ if (!n) return; if (sec < 0x12) { hpfs_error(s, "Trying to free reserved sector %08x", sec); return; } sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; new_map: if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { return; } new_tst: if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) { hpfs_error(s, "sector %08x not allocated", sec); hpfs_brelse4(&qbh); return; } bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f)); hpfs_claim_free(s, sec); if (!--n) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); return; } if (!(++sec & 0x3fff)) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); goto new_map; } goto new_tst; } /* * Check if there are at least n free dnodes on the filesystem. * Called before adding to dnode. If we run out of space while * splitting dnodes, it would corrupt dnode tree. */ int hpfs_check_free_dnodes(struct super_block *s, int n) { int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; int i, j; __le32 *bmp; struct quad_buffer_head qbh; if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { for (j = 0; j < 512; j++) { unsigned k; if (!le32_to_cpu(bmp[j])) continue; for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { hpfs_brelse4(&qbh); return 0; } } } hpfs_brelse4(&qbh); i = 0; if (hpfs_sb(s)->sb_c_bitmap != -1) { bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1"); goto chk_bmp; } chk_next: if (i == b) i++; if (i >= n_bmps) return 1; bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2"); chk_bmp: if (bmp) { for (j = 0; j < 512; j++) { u32 k; if (!le32_to_cpu(bmp[j])) continue; for (k = 0xf; k; k <<= 4) if ((le32_to_cpu(bmp[j]) & k) == k) { if (!--n) { hpfs_brelse4(&qbh); return 0; } } } hpfs_brelse4(&qbh); } i++; goto chk_next; } void hpfs_free_dnode(struct super_block *s, dnode_secno dno) { if (hpfs_sb(s)->sb_chk) if (dno & 3) { hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno); return; } if (dno < hpfs_sb(s)->sb_dirband_start || dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { hpfs_free_sectors(s, dno, 4); } else { struct quad_buffer_head qbh; __le32 *bmp; unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { return; } bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f)); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_claim_dirband_free(s, dno); } } struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, dnode_secno *dno, struct quad_buffer_head *qbh) { struct dnode *d; if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) { if (!(*dno = alloc_in_dirband(s, near))) if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL; } else { if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) if (!(*dno = alloc_in_dirband(s, near))) return NULL; } if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { hpfs_free_dnode(s, *dno); return NULL; } memset(d, 0, 2048); d->magic = cpu_to_le32(DNODE_MAGIC); d->first_free = cpu_to_le32(52); d->dirent[0] = 32; d->dirent[2] = 8; d->dirent[30] = 1; d->dirent[31] = 255; d->self = cpu_to_le32(*dno); return d; } struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno, struct buffer_head **bh) { struct fnode *f; if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL; if (!(f = hpfs_get_sector(s, *fno, bh))) { hpfs_free_sectors(s, *fno, 1); return NULL; } memset(f, 0, 512); f->magic = cpu_to_le32(FNODE_MAGIC); f->ea_offs = cpu_to_le16(0xc4); f->btree.n_free_nodes = 8; f->btree.first_free = cpu_to_le16(8); return f; } struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano, struct buffer_head **bh) { struct anode *a; if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL; if (!(a = hpfs_get_sector(s, *ano, bh))) { hpfs_free_sectors(s, *ano, 1); return NULL; } memset(a, 0, 512); a->magic = cpu_to_le32(ANODE_MAGIC); a->self = cpu_to_le32(*ano); a->btree.n_free_nodes = 40; a->btree.n_used_nodes = 0; a->btree.first_free = cpu_to_le16(8); return a; } static unsigned find_run(__le32 *bmp, unsigned *idx) { unsigned len; while (tstbits(bmp, *idx, 1)) { (*idx)++; if (unlikely(*idx >= 0x4000)) return 0; } len = 1; while (!tstbits(bmp, *idx + len, 1)) len++; return len; } static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result) { int err; secno end; if (fatal_signal_pending(current)) return -EINTR; end = start + len; if (start < limit_start) start = limit_start; if (end > limit_end) end = limit_end; if (start >= end) return 0; if (end - start < minlen) return 0; err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0); if (err) return err; *result += end - start; return 0; } int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result) { int err = 0; struct hpfs_sb_info *sbi = hpfs_sb(s); unsigned idx, len, start_bmp, end_bmp; __le32 *bmp; struct quad_buffer_head qbh; *result = 0; if (!end || end > sbi->sb_fs_size) end = sbi->sb_fs_size; if (start >= sbi->sb_fs_size) return 0; if (minlen > 0x4000) return 0; if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) { hpfs_lock(s); if (sb_rdonly(s)) { err = -EROFS; goto unlock_1; } if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { err = -EIO; goto unlock_1; } idx = 0; while ((len = find_run(bmp, &idx)) && !err) { err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); idx += len; } hpfs_brelse4(&qbh); unlock_1: hpfs_unlock(s); } start_bmp = start >> 14; end_bmp = (end + 0x3fff) >> 14; while (start_bmp < end_bmp && !err) { hpfs_lock(s); if (sb_rdonly(s)) { err = -EROFS; goto unlock_2; } if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) { err = -EIO; goto unlock_2; } idx = 0; while ((len = find_run(bmp, &idx)) && !err) { err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result); idx += len; } hpfs_brelse4(&qbh); unlock_2: hpfs_unlock(s); start_bmp++; } return err; } |
| 3 2 1 2 38 32 25 22 22 22 25 64 41 41 64 63 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * Null algorithms, aka Much Ado About Nothing. * * These are needed for IPsec, and may be useful in general for * testing & debugging. * * The null cipher is compliant with RFC2410. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> */ #include <crypto/null.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/string.h> static DEFINE_MUTEX(crypto_default_null_skcipher_lock); static struct crypto_sync_skcipher *crypto_default_null_skcipher; static int crypto_default_null_skcipher_refcnt; static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { if (slen > *dlen) return -EINVAL; memcpy(dst, src, slen); *dlen = slen; return 0; } static int null_init(struct shash_desc *desc) { return 0; } static int null_update(struct shash_desc *desc, const u8 *data, unsigned int len) { return 0; } static int null_final(struct shash_desc *desc, u8 *out) { return 0; } static int null_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return 0; } static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { return 0; } static int null_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return 0; } static int null_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { return 0; } static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { memcpy(dst, src, NULL_BLOCK_SIZE); } static int null_skcipher_crypt(struct skcipher_request *req) { struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { if (walk.src.virt.addr != walk.dst.virt.addr) memcpy(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); err = skcipher_walk_done(&walk, 0); } return err; } static struct shash_alg digest_null = { .digestsize = NULL_DIGEST_SIZE, .setkey = null_hash_setkey, .init = null_init, .update = null_update, .finup = null_digest, .digest = null_digest, .final = null_final, .base = { .cra_name = "digest_null", .cra_driver_name = "digest_null-generic", .cra_blocksize = NULL_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct skcipher_alg skcipher_null = { .base.cra_name = "ecb(cipher_null)", .base.cra_driver_name = "ecb-cipher_null", .base.cra_priority = 100, .base.cra_blocksize = NULL_BLOCK_SIZE, .base.cra_ctxsize = 0, .base.cra_module = THIS_MODULE, .min_keysize = NULL_KEY_SIZE, .max_keysize = NULL_KEY_SIZE, .ivsize = NULL_IV_SIZE, .setkey = null_skcipher_setkey, .encrypt = null_skcipher_crypt, .decrypt = null_skcipher_crypt, }; static struct crypto_alg null_algs[] = { { .cra_name = "cipher_null", .cra_driver_name = "cipher_null-generic", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = NULL_BLOCK_SIZE, .cra_ctxsize = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = NULL_KEY_SIZE, .cia_max_keysize = NULL_KEY_SIZE, .cia_setkey = null_setkey, .cia_encrypt = null_crypt, .cia_decrypt = null_crypt } } }, { .cra_name = "compress_null", .cra_driver_name = "compress_null-generic", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_blocksize = NULL_BLOCK_SIZE, .cra_ctxsize = 0, .cra_module = THIS_MODULE, .cra_u = { .compress = { .coa_compress = null_compress, .coa_decompress = null_compress } } } }; MODULE_ALIAS_CRYPTO("compress_null"); MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void) { struct crypto_sync_skcipher *tfm; mutex_lock(&crypto_default_null_skcipher_lock); tfm = crypto_default_null_skcipher; if (!tfm) { tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); if (IS_ERR(tfm)) goto unlock; crypto_default_null_skcipher = tfm; } crypto_default_null_skcipher_refcnt++; unlock: mutex_unlock(&crypto_default_null_skcipher_lock); return tfm; } EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); void crypto_put_default_null_skcipher(void) { mutex_lock(&crypto_default_null_skcipher_lock); if (!--crypto_default_null_skcipher_refcnt) { crypto_free_sync_skcipher(crypto_default_null_skcipher); crypto_default_null_skcipher = NULL; } mutex_unlock(&crypto_default_null_skcipher_lock); } EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); static int __init crypto_null_mod_init(void) { int ret = 0; ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs)); if (ret < 0) goto out; ret = crypto_register_shash(&digest_null); if (ret < 0) goto out_unregister_algs; ret = crypto_register_skcipher(&skcipher_null); if (ret < 0) goto out_unregister_shash; return 0; out_unregister_shash: crypto_unregister_shash(&digest_null); out_unregister_algs: crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); out: return ret; } static void __exit crypto_null_mod_fini(void) { crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); crypto_unregister_shash(&digest_null); crypto_unregister_skcipher(&skcipher_null); } subsys_initcall(crypto_null_mod_init); module_exit(crypto_null_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Null Cryptographic Algorithms"); |
| 122 122 133 119 119 142 111 105 105 105 105 105 105 111 144 105 105 105 104 105 111 109 109 109 110 105 105 104 110 2 111 8 6 8 2 104 6 105 105 104 105 105 105 105 105 103 103 2 2 2 144 132 9 144 113 107 107 107 144 128 125 3 3 3 3 3 113 107 107 107 3 3 144 111 144 139 9 140 8 139 21 138 143 139 135 5 143 143 111 111 111 109 109 110 111 144 22 22 144 112 113 3 113 113 113 144 124 118 119 124 5 124 119 119 117 3 116 115 115 111 144 133 133 132 132 133 3 141 136 137 137 2 143 144 144 144 143 141 6 141 144 6 112 6 112 111 9 134 110 104 109 109 16 16 16 16 16 134 21 16 16 10 16 16 16 16 16 134 110 110 112 6 104 15 21 112 106 106 134 129 118 134 119 134 3 9 133 3 105 3 3 3 128 112 134 21 7 7 7 134 106 98 7 7 7 7 112 134 131 117 106 107 107 134 134 134 134 5 5 5 5 129 129 125 123 129 98 5 5 5 134 134 133 128 5 132 133 134 6 6 6 4 2 1 2 1 1 6 138 138 139 139 137 133 137 138 138 5 133 3 3 3 2 2 139 2 136 21 138 103 131 104 104 104 103 139 139 135 139 136 130 14 127 119 119 127 127 126 127 139 1 1 112 111 112 139 5 5 138 137 138 139 134 134 134 134 5 5 134 130 126 131 131 134 104 134 130 131 131 133 104 139 139 139 132 5 5 5 138 5 136 133 134 131 131 126 134 133 133 11 104 125 118 134 131 131 125 134 133 134 133 138 138 129 138 111 138 132 138 111 138 135 135 135 133 138 137 138 137 6 6 6 6 5 6 4 5 5 5 2 5 5 5 7 7 7 7 7 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 | /* * Copyright (C) 2014 Red Hat * Copyright (C) 2014 Intel Corp. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rob Clark <robdclark@gmail.com> * Daniel Vetter <daniel.vetter@ffwll.ch> */ #include <linux/dma-fence.h> #include <linux/ktime.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_blend.h> #include <drm/drm_bridge.h> #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_panic.h> #include <drm/drm_print.h> #include <drm/drm_self_refresh_helper.h> #include <drm/drm_vblank.h> #include <drm/drm_writeback.h> #include "drm_crtc_helper_internal.h" #include "drm_crtc_internal.h" /** * DOC: overview * * This helper library provides implementations of check and commit functions on * top of the CRTC modeset helper callbacks and the plane helper callbacks. It * also provides convenience implementations for the atomic state handling * callbacks for drivers which don't need to subclass the drm core structures to * add their own additional internal state. * * This library also provides default implementations for the check callback in * drm_atomic_helper_check() and for the commit callback with * drm_atomic_helper_commit(). But the individual stages and callbacks are * exposed to allow drivers to mix and match and e.g. use the plane helpers only * together with a driver private modeset implementation. * * This library also provides implementations for all the legacy driver * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(), * drm_atomic_helper_disable_plane(), and the various functions to implement * set_property callbacks. New drivers must not implement these functions * themselves but must use the provided helpers. * * The atomic helper uses the same function table structures as all other * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs, * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It * also shares the &struct drm_plane_helper_funcs function table with the plane * helpers. */ static void drm_atomic_helper_plane_changed(struct drm_atomic_state *state, struct drm_plane_state *old_plane_state, struct drm_plane_state *plane_state, struct drm_plane *plane) { struct drm_crtc_state *crtc_state; if (old_plane_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, old_plane_state->crtc); if (WARN_ON(!crtc_state)) return; crtc_state->planes_changed = true; } if (plane_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); if (WARN_ON(!crtc_state)) return; crtc_state->planes_changed = true; } } static int handle_conflicting_encoders(struct drm_atomic_state *state, bool disable_conflicting_encoders) { struct drm_connector_state *new_conn_state; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_encoder *encoder; unsigned int encoder_mask = 0; int i, ret = 0; /* * First loop, find all newly assigned encoders from the connectors * part of the state. If the same encoder is assigned to multiple * connectors bail out. */ for_each_new_connector_in_state(state, connector, new_conn_state, i) { const struct drm_connector_helper_funcs *funcs = connector->helper_private; struct drm_encoder *new_encoder; if (!new_conn_state->crtc) continue; if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, state); else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else new_encoder = drm_connector_get_single_encoder(connector); if (new_encoder) { if (encoder_mask & drm_encoder_mask(new_encoder)) { drm_dbg_atomic(connector->dev, "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", new_encoder->base.id, new_encoder->name, connector->base.id, connector->name); return -EINVAL; } encoder_mask |= drm_encoder_mask(new_encoder); } } if (!encoder_mask) return 0; /* * Second loop, iterate over all connectors not part of the state. * * If a conflicting encoder is found and disable_conflicting_encoders * is not set, an error is returned. Userspace can provide a solution * through the atomic ioctl. * * If the flag is set conflicting connectors are removed from the CRTC * and the CRTC is disabled if no encoder is left. This preserves * compatibility with the legacy set_config behavior. */ drm_connector_list_iter_begin(state->dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct drm_crtc_state *crtc_state; if (drm_atomic_get_new_connector_state(state, connector)) continue; encoder = connector->state->best_encoder; if (!encoder || !(encoder_mask & drm_encoder_mask(encoder))) continue; if (!disable_conflicting_encoders) { drm_dbg_atomic(connector->dev, "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", encoder->base.id, encoder->name, connector->state->crtc->base.id, connector->state->crtc->name, connector->base.id, connector->name); ret = -EINVAL; goto out; } new_conn_state = drm_atomic_get_connector_state(state, connector); if (IS_ERR(new_conn_state)) { ret = PTR_ERR(new_conn_state); goto out; } drm_dbg_atomic(connector->dev, "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", encoder->base.id, encoder->name, new_conn_state->crtc->base.id, new_conn_state->crtc->name, connector->base.id, connector->name); crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL); if (ret) goto out; if (!crtc_state->connector_mask) { ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); if (ret < 0) goto out; crtc_state->active = false; } } out: drm_connector_list_iter_end(&conn_iter); return ret; } static void set_best_encoder(struct drm_atomic_state *state, struct drm_connector_state *conn_state, struct drm_encoder *encoder) { struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; if (conn_state->best_encoder) { /* Unset the encoder_mask in the old crtc state. */ crtc = conn_state->connector->state->crtc; /* A NULL crtc is an error here because we should have * duplicated a NULL best_encoder when crtc was NULL. * As an exception restoring duplicated atomic state * during resume is allowed, so don't warn when * best_encoder is equal to encoder we intend to set. */ WARN_ON(!crtc && encoder != conn_state->best_encoder); if (crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, crtc); crtc_state->encoder_mask &= ~drm_encoder_mask(conn_state->best_encoder); } } if (encoder) { crtc = conn_state->crtc; WARN_ON(!crtc); if (crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, crtc); crtc_state->encoder_mask |= drm_encoder_mask(encoder); } } conn_state->best_encoder = encoder; } static void steal_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_crtc_state *crtc_state; struct drm_connector *connector; struct drm_connector_state *old_connector_state, *new_connector_state; int i; for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { struct drm_crtc *encoder_crtc; if (new_connector_state->best_encoder != encoder) continue; encoder_crtc = old_connector_state->crtc; drm_dbg_atomic(encoder->dev, "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", encoder->base.id, encoder->name, encoder_crtc->base.id, encoder_crtc->name); set_best_encoder(state, new_connector_state, NULL); crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc); crtc_state->connectors_changed = true; return; } } static int update_connector_routing(struct drm_atomic_state *state, struct drm_connector *connector, struct drm_connector_state *old_connector_state, struct drm_connector_state *new_connector_state, bool added_by_user) { const struct drm_connector_helper_funcs *funcs; struct drm_encoder *new_encoder; struct drm_crtc_state *crtc_state; drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n", connector->base.id, connector->name); if (old_connector_state->crtc != new_connector_state->crtc) { if (old_connector_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc); crtc_state->connectors_changed = true; } if (new_connector_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); crtc_state->connectors_changed = true; } } if (!new_connector_state->crtc) { drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n", connector->base.id, connector->name); set_best_encoder(state, new_connector_state, NULL); return 0; } crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); /* * For compatibility with legacy users, we want to make sure that * we allow DPMS On->Off modesets on unregistered connectors. Modesets * which would result in anything else must be considered invalid, to * avoid turning on new displays on dead connectors. * * Since the connector can be unregistered at any point during an * atomic check or commit, this is racy. But that's OK: all we care * about is ensuring that userspace can't do anything but shut off the * display on a connector that was destroyed after it's been notified, * not before. * * Additionally, we also want to ignore connector registration when * we're trying to restore an atomic state during system resume since * there's a chance the connector may have been destroyed during the * process, but it's better to ignore that then cause * drm_atomic_helper_resume() to fail. * * Last, we want to ignore connector registration when the connector * was not pulled in the atomic state by user-space (ie, was pulled * in by the driver, e.g. when updating a DP-MST stream). */ if (!state->duplicated && drm_connector_is_unregistered(connector) && added_by_user && crtc_state->active) { drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] is not registered\n", connector->base.id, connector->name); return -EINVAL; } funcs = connector->helper_private; if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, state); else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else new_encoder = drm_connector_get_single_encoder(connector); if (!new_encoder) { drm_dbg_atomic(connector->dev, "No suitable encoder found for [CONNECTOR:%d:%s]\n", connector->base.id, connector->name); return -EINVAL; } if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) { drm_dbg_atomic(connector->dev, "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", new_encoder->base.id, new_encoder->name, new_connector_state->crtc->base.id, new_connector_state->crtc->name); return -EINVAL; } if (new_encoder == new_connector_state->best_encoder) { set_best_encoder(state, new_connector_state, new_encoder); drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", connector->base.id, connector->name, new_encoder->base.id, new_encoder->name, new_connector_state->crtc->base.id, new_connector_state->crtc->name); return 0; } steal_encoder(state, new_encoder); set_best_encoder(state, new_connector_state, new_encoder); crtc_state->connectors_changed = true; drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", connector->base.id, connector->name, new_encoder->base.id, new_encoder->name, new_connector_state->crtc->base.id, new_connector_state->crtc->name); return 0; } static int mode_fixup(struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct drm_connector *connector; struct drm_connector_state *new_conn_state; int i; int ret; for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) continue; drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode); } for_each_new_connector_in_state(state, connector, new_conn_state, i) { const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; struct drm_bridge *bridge; WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc); if (!new_conn_state->crtc || !new_conn_state->best_encoder) continue; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); /* * Each encoder has at most one connector (since we always steal * it away), so we won't call ->mode_fixup twice. */ encoder = new_conn_state->best_encoder; funcs = encoder->helper_private; bridge = drm_bridge_chain_get_first_bridge(encoder); ret = drm_atomic_bridge_chain_check(bridge, new_crtc_state, new_conn_state); if (ret) { drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n"); return ret; } if (funcs && funcs->atomic_check) { ret = funcs->atomic_check(encoder, new_crtc_state, new_conn_state); if (ret) { drm_dbg_atomic(encoder->dev, "[ENCODER:%d:%s] check failed\n", encoder->base.id, encoder->name); return ret; } } else if (funcs && funcs->mode_fixup) { ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, &new_crtc_state->adjusted_mode); if (!ret) { drm_dbg_atomic(encoder->dev, "[ENCODER:%d:%s] fixup failed\n", encoder->base.id, encoder->name); return -EINVAL; } } } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; if |