// SPDX-License-Identifier: GPL-2.0-or-later
      /*
       *        IPV4 GSO/GRO offload support
       *        Linux INET implementation
       *
       *        GRE GSO support
       */
      
      #include <linux/skbuff.h>
      #include <linux/init.h>
      #include <net/protocol.h>
      #include <net/gre.h>
      
      static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                                             netdev_features_t features)
      {
              int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
              struct sk_buff *segs = ERR_PTR(-EINVAL);
              u16 mac_offset = skb->mac_header;
              __be16 protocol = skb->protocol;
              u16 mac_len = skb->mac_len;
              int gre_offset, outer_hlen;
              bool need_csum, gso_partial;
      
    2         if (!skb->encapsulation)
                      goto out;
      
              if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
                      goto out;
      
              if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
                      goto out;
      
              /* setup inner skb. */
              skb->encapsulation = 0;
              SKB_GSO_CB(skb)->encap_level = 0;
              __skb_pull(skb, tnl_hlen);
              skb_reset_mac_header(skb);
              skb_set_network_header(skb, skb_inner_network_offset(skb));
              skb->mac_len = skb_inner_network_offset(skb);
              skb->protocol = skb->inner_protocol;
      
              need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
              skb->encap_hdr_csum = need_csum;
      
              features &= skb->dev->hw_enc_features;
      
              /* segment inner packet. */
              segs = skb_mac_gso_segment(skb, features);
              if (IS_ERR_OR_NULL(segs)) {
                      skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
                                           mac_len);
                      goto out;
              }
      
              gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
      
              outer_hlen = skb_tnl_header_len(skb);
              gre_offset = outer_hlen - tnl_hlen;
              skb = segs;
              do {
                      struct gre_base_hdr *greh;
                      __sum16 *pcsum;
      
                      /* Set up inner headers if we are offloading inner checksum */
                      if (skb->ip_summed == CHECKSUM_PARTIAL) {
                              skb_reset_inner_headers(skb);
                              skb->encapsulation = 1;
                      }
      
                      skb->mac_len = mac_len;
                      skb->protocol = protocol;
      
                      __skb_push(skb, outer_hlen);
                      skb_reset_mac_header(skb);
                      skb_set_network_header(skb, mac_len);
                      skb_set_transport_header(skb, gre_offset);
      
                      if (!need_csum)
                              continue;
      
                      greh = (struct gre_base_hdr *)skb_transport_header(skb);
                      pcsum = (__sum16 *)(greh + 1);
      
                      if (gso_partial && skb_is_gso(skb)) {
                              unsigned int partial_adj;
      
                              /* Adjust checksum to account for the fact that
                               * the partial checksum is based on actual size
                               * whereas headers should be based on MSS size.
                               */
                              partial_adj = skb->len + skb_headroom(skb) -
                                            SKB_GSO_CB(skb)->data_offset -
                                            skb_shinfo(skb)->gso_size;
                              *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
                      } else {
                              *pcsum = 0;
                      }
      
                      *(pcsum + 1) = 0;
                      *pcsum = gso_make_checksum(skb, 0);
              } while ((skb = skb->next));
      out:
    2         return segs;
      }
      
      static struct sk_buff *gre_gro_receive(struct list_head *head,
                                             struct sk_buff *skb)
      {
              struct sk_buff *pp = NULL;
              struct sk_buff *p;
              const struct gre_base_hdr *greh;
              unsigned int hlen, grehlen;
              unsigned int off;
              int flush = 1;
              struct packet_offload *ptype;
              __be16 type;
      
    8         if (NAPI_GRO_CB(skb)->encap_mark)
                      goto out;
      
    8         NAPI_GRO_CB(skb)->encap_mark = 1;
      
              off = skb_gro_offset(skb);
              hlen = off + sizeof(*greh);
              greh = skb_gro_header_fast(skb, off);
              if (skb_gro_header_hard(skb, hlen)) {
    8                 greh = skb_gro_header_slow(skb, hlen, off);
                      if (unlikely(!greh))
                              goto out;
              }
      
              /* Only support version 0 and K (key), C (csum) flags. Note that
               * although the support for the S (seq#) flag can be added easily
               * for GRO, this is problematic for GSO hence can not be enabled
               * here because a GRO pkt may end up in the forwarding path, thus
               * requiring GSO support to break it up correctly.
               */
    7         if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
                      goto out;
      
              /* We can only support GRE_CSUM if we can track the location of
               * the GRE header.  In the case of FOU/GUE we cannot because the
               * outer UDP header displaces the GRE header leaving us in a state
               * of limbo.
               */
    5         if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
                      goto out;
      
    5         type = greh->protocol;
      
    5         rcu_read_lock();
    5         ptype = gro_find_receive_by_type(type);
              if (!ptype)
                      goto out_unlock;
      
              grehlen = GRE_HEADER_SECTION;
      
    4         if (greh->flags & GRE_KEY)
                      grehlen += GRE_HEADER_SECTION;
      
    4         if (greh->flags & GRE_CSUM)
                      grehlen += GRE_HEADER_SECTION;
      
    4         hlen = off + grehlen;
              if (skb_gro_header_hard(skb, hlen)) {
    4                 greh = skb_gro_header_slow(skb, hlen, off);
                      if (unlikely(!greh))
                              goto out_unlock;
              }
      
              /* Don't bother verifying checksum if we're going to flush anyway. */
    4         if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
                      if (skb_gro_checksum_simple_validate(skb))
                              goto out_unlock;
      
                      skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
                                                   null_compute_pseudo);
              }
      
    4         list_for_each_entry(p, head, list) {
                      const struct gre_base_hdr *greh2;
      
                      if (!NAPI_GRO_CB(p)->same_flow)
                              continue;
      
                      /* The following checks are needed to ensure only pkts
                       * from the same tunnel are considered for aggregation.
                       * The criteria for "the same tunnel" includes:
                       * 1) same version (we only support version 0 here)
                       * 2) same protocol (we only support ETH_P_IP for now)
                       * 3) same set of flags
                       * 4) same key if the key field is present.
                       */
                      greh2 = (struct gre_base_hdr *)(p->data + off);
      
                      if (greh2->flags != greh->flags ||
                          greh2->protocol != greh->protocol) {
                              NAPI_GRO_CB(p)->same_flow = 0;
                              continue;
                      }
                      if (greh->flags & GRE_KEY) {
                              /* compare keys */
                              if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
                                      NAPI_GRO_CB(p)->same_flow = 0;
                                      continue;
                              }
                      }
              }
      
    4         skb_gro_pull(skb, grehlen);
      
              /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
              skb_gro_postpull_rcsum(skb, greh, grehlen);
      
    4         pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
              flush = 0;
      
      out_unlock:
    5         rcu_read_unlock();
      out:
    8         skb_gro_flush_final(skb, pp, flush);
      
    8         return pp;
      }
      
      static int gre_gro_complete(struct sk_buff *skb, int nhoff)
      {
              struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
              struct packet_offload *ptype;
              unsigned int grehlen = sizeof(*greh);
              int err = -ENOENT;
              __be16 type;
      
              skb->encapsulation = 1;
              skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
      
              type = greh->protocol;
              if (greh->flags & GRE_KEY)
                      grehlen += GRE_HEADER_SECTION;
      
              if (greh->flags & GRE_CSUM)
                      grehlen += GRE_HEADER_SECTION;
      
              rcu_read_lock();
              ptype = gro_find_complete_by_type(type);
              if (ptype)
                      err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
      
              rcu_read_unlock();
      
              skb_set_inner_mac_header(skb, nhoff + grehlen);
      
              return err;
      }
      
      static const struct net_offload gre_offload = {
              .callbacks = {
                      .gso_segment = gre_gso_segment,
                      .gro_receive = gre_gro_receive,
                      .gro_complete = gre_gro_complete,
              },
      };
      
      static int __init gre_offload_init(void)
      {
              int err;
      
              err = inet_add_offload(&gre_offload, IPPROTO_GRE);
      #if IS_ENABLED(CONFIG_IPV6)
              if (err)
                      return err;
      
              err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
              if (err)
                      inet_del_offload(&gre_offload, IPPROTO_GRE);
      #endif
      
              return err;
      }
      device_initcall(gre_offload_init);
      // SPDX-License-Identifier: GPL-2.0-or-later
      /*
       *        Anycast support for IPv6
       *        Linux INET6 implementation
       *
       *        Authors:
       *        David L Stevens (dlstevens@us.ibm.com)
       *
       *        based heavily on net/ipv6/mcast.c
       */
      
      #include <linux/capability.h>
      #include <linux/module.h>
      #include <linux/errno.h>
      #include <linux/types.h>
      #include <linux/random.h>
      #include <linux/string.h>
      #include <linux/socket.h>
      #include <linux/sockios.h>
      #include <linux/net.h>
      #include <linux/in6.h>
      #include <linux/netdevice.h>
      #include <linux/if_arp.h>
      #include <linux/route.h>
      #include <linux/init.h>
      #include <linux/proc_fs.h>
      #include <linux/seq_file.h>
      #include <linux/slab.h>
      
      #include <net/net_namespace.h>
      #include <net/sock.h>
      #include <net/snmp.h>
      
      #include <net/ipv6.h>
      #include <net/protocol.h>
      #include <net/if_inet6.h>
      #include <net/ndisc.h>
      #include <net/addrconf.h>
      #include <net/ip6_route.h>
      
      #include <net/checksum.h>
      
      #define IN6_ADDR_HSIZE_SHIFT        8
      #define IN6_ADDR_HSIZE                BIT(IN6_ADDR_HSIZE_SHIFT)
      /*        anycast address hash table
       */
      static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
      static DEFINE_SPINLOCK(acaddr_hash_lock);
      
      static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
      
      static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
      {
   65         u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
      
              return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
      }
      
      /*
       *        socket join an anycast group
       */
      
      int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
      {
   21         struct ipv6_pinfo *np = inet6_sk(sk);
              struct net_device *dev = NULL;
              struct inet6_dev *idev;
              struct ipv6_ac_socklist *pac;
   21         struct net *net = sock_net(sk);
              int        ishost = !net->ipv6.devconf_all->forwarding;
              int        err = 0;
      
              ASSERT_RTNL();
      
   21         if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                      return -EPERM;
   21         if (ipv6_addr_is_multicast(addr))
                      return -EINVAL;
      
   20         if (ifindex)
    7                 dev = __dev_get_by_index(net, ifindex);
      
   20         if (ipv6_chk_addr_and_flags(net, addr, dev, true, 0, IFA_F_TENTATIVE))
                      return -EINVAL;
      
   19         pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL);
              if (!pac)
                      return -ENOMEM;
   19         pac->acl_next = NULL;
              pac->acl_addr = *addr;
      
              if (ifindex == 0) {
                      struct rt6_info *rt;
      
   13                 rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
                      if (rt) {
   12                         dev = rt->dst.dev;
                              ip6_rt_put(rt);
    1                 } else if (ishost) {
                              err = -EADDRNOTAVAIL;
                              goto error;
                      } else {
                              /* router, no matching interface: just pick one */
                              dev = __dev_get_by_flags(net, IFF_UP,
                                                       IFF_UP | IFF_LOOPBACK);
                      }
              }
      
   18         if (!dev) {
                      err = -ENODEV;
                      goto error;
              }
      
   18         idev = __in6_dev_get(dev);
   18         if (!idev) {
    1                 if (ifindex)
                              err = -ENODEV;
                      else
                              err = -EADDRNOTAVAIL;
                      goto error;
              }
              /* reset ishost, now that we have a specific device */
              ishost = !idev->cnf.forwarding;
      
              pac->acl_ifindex = dev->ifindex;
      
              /* XXX
               * For hosts, allow link-local or matching prefix anycasts.
               * This obviates the need for propagating anycast routes while
               * still allowing some non-router anycast participation.
               */
              if (!ipv6_chk_prefix(addr, dev)) {
                      if (ishost)
                              err = -EADDRNOTAVAIL;
                      if (err)
                              goto error;
              }
      
   14         err = __ipv6_dev_ac_inc(idev, addr);
              if (!err) {
   11                 pac->acl_next = np->ipv6_ac_list;
                      np->ipv6_ac_list = pac;
                      pac = NULL;
              }
      
      error:
              if (pac)
   18                 sock_kfree_s(sk, pac, sizeof(*pac));
              return err;
      }
      
      /*
       *        socket leave an anycast group
       */
      int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
      {
    6         struct ipv6_pinfo *np = inet6_sk(sk);
              struct net_device *dev;
              struct ipv6_ac_socklist *pac, *prev_pac;
    6         struct net *net = sock_net(sk);
      
              ASSERT_RTNL();
      
              prev_pac = NULL;
    6         for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) {
    5                 if ((ifindex == 0 || pac->acl_ifindex == ifindex) &&
    5                      ipv6_addr_equal(&pac->acl_addr, addr))
                              break;
                      prev_pac = pac;
              }
              if (!pac)
                      return -ENOENT;
    4         if (prev_pac)
    3                 prev_pac->acl_next = pac->acl_next;
              else
    1                 np->ipv6_ac_list = pac->acl_next;
      
    4         dev = __dev_get_by_index(net, pac->acl_ifindex);
              if (dev)
    4                 ipv6_dev_ac_dec(dev, &pac->acl_addr);
      
    2         sock_kfree_s(sk, pac, sizeof(*pac));
    4         return 0;
      }
      
      void ipv6_sock_ac_close(struct sock *sk)
      {
  175         struct ipv6_pinfo *np = inet6_sk(sk);
              struct net_device *dev = NULL;
              struct ipv6_ac_socklist *pac;
    6         struct net *net = sock_net(sk);
              int        prev_index;
      
  175         if (!np->ipv6_ac_list)
                      return;
      
              rtnl_lock();
              pac = np->ipv6_ac_list;
              np->ipv6_ac_list = NULL;
      
              prev_index = 0;
              while (pac) {
    6                 struct ipv6_ac_socklist *next = pac->acl_next;
      
                      if (pac->acl_ifindex != prev_index) {
    6                         dev = __dev_get_by_index(net, pac->acl_ifindex);
                              prev_index = pac->acl_ifindex;
                      }
    6                 if (dev)
    6                         ipv6_dev_ac_dec(dev, &pac->acl_addr);
    5                 sock_kfree_s(sk, pac, sizeof(*pac));
                      pac = next;
              }
    3         rtnl_unlock();
      }
      
      static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
      {
              unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
      
              spin_lock(&acaddr_hash_lock);
    2         hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
   13         spin_unlock(&acaddr_hash_lock);
      }
      
      static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
      {
   10         spin_lock(&acaddr_hash_lock);
   10         hlist_del_init_rcu(&aca->aca_addr_lst);
   10         spin_unlock(&acaddr_hash_lock);
      }
      
      static void aca_get(struct ifacaddr6 *aca)
      {
   13         refcount_inc(&aca->aca_refcnt);
      }
      
      static void aca_free_rcu(struct rcu_head *h)
      {
              struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
      
              fib6_info_release(aca->aca_rt);
              kfree(aca);
      }
      
      static void aca_put(struct ifacaddr6 *ac)
      {
   15         if (refcount_dec_and_test(&ac->aca_refcnt)) {
                      call_rcu(&ac->rcu, aca_free_rcu);
              }
      }
      
      static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
                                         const struct in6_addr *addr)
      {
              struct ifacaddr6 *aca;
      
   14         aca = kzalloc(sizeof(*aca), GFP_ATOMIC);
              if (!aca)
                      return NULL;
      
   13         aca->aca_addr = *addr;
   13         fib6_info_hold(f6i);
   13         aca->aca_rt = f6i;
              INIT_HLIST_NODE(&aca->aca_addr_lst);
              aca->aca_users = 1;
              /* aca_tstamp should be updated upon changes */
              aca->aca_cstamp = aca->aca_tstamp = jiffies;
              refcount_set(&aca->aca_refcnt, 1);
      
              return aca;
      }
      
      /*
       *        device anycast group inc (add if not found)
       */
      int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
      {
              struct ifacaddr6 *aca;
              struct fib6_info *f6i;
              struct net *net;
              int err;
      
   14         ASSERT_RTNL();
      
   14         write_lock_bh(&idev->lock);
              if (idev->dead) {
                      err = -ENODEV;
                      goto out;
              }
      
   14         for (aca = idev->ac_list; aca; aca = aca->aca_next) {
    3                 if (ipv6_addr_equal(&aca->aca_addr, addr)) {
    1                         aca->aca_users++;
                              err = 0;
                              goto out;
                      }
              }
      
   14         net = dev_net(idev->dev);
              f6i = addrconf_f6i_alloc(net, idev, addr, true, GFP_ATOMIC);
              if (IS_ERR(f6i)) {
                      err = PTR_ERR(f6i);
                      goto out;
              }
   14         aca = aca_alloc(f6i, addr);
              if (!aca) {
                      fib6_info_release(f6i);
                      err = -ENOMEM;
                      goto out;
              }
      
              aca->aca_next = idev->ac_list;
              idev->ac_list = aca;
      
              /* Hold this for addrconf_join_solict() below before we unlock,
               * it is already exposed via idev->ac_list.
               */
   13         aca_get(aca);
   13         write_unlock_bh(&idev->lock);
      
   13         ipv6_add_acaddr_hash(net, aca);
      
              ip6_ins_rt(net, f6i);
      
              addrconf_join_solict(idev->dev, &aca->aca_addr);
      
              aca_put(aca);
              return 0;
      out:
    1         write_unlock_bh(&idev->lock);
   11         return err;
      }
      
      /*
       *        device anycast group decrement
       */
      int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
      {
              struct ifacaddr6 *aca, *prev_aca;
      
   10         ASSERT_RTNL();
      
   10         write_lock_bh(&idev->lock);
              prev_aca = NULL;
    2         for (aca = idev->ac_list; aca; aca = aca->aca_next) {
   10                 if (ipv6_addr_equal(&aca->aca_addr, addr))
                              break;
                      prev_aca = aca;
              }
              if (!aca) {
                      write_unlock_bh(&idev->lock);
                      return -ENOENT;
              }
   10         if (--aca->aca_users > 0) {
    2                 write_unlock_bh(&idev->lock);
                      return 0;
              }
   10         if (prev_aca)
    2                 prev_aca->aca_next = aca->aca_next;
              else
    8                 idev->ac_list = aca->aca_next;
   10         write_unlock_bh(&idev->lock);
              ipv6_del_acaddr_hash(aca);
              addrconf_leave_solict(idev, &aca->aca_addr);
      
              ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
      
              aca_put(aca);
    7         return 0;
      }
      
      /* called with rtnl_lock() */
      static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
      {
   10         struct inet6_dev *idev = __in6_dev_get(dev);
      
   10         if (!idev)
                      return -ENODEV;
   10         return __ipv6_dev_ac_dec(idev, addr);
      }
      
      void ipv6_ac_destroy_dev(struct inet6_dev *idev)
      {
              struct ifacaddr6 *aca;
      
  454         write_lock_bh(&idev->lock);
              while ((aca = idev->ac_list) != NULL) {
                      idev->ac_list = aca->aca_next;
                      write_unlock_bh(&idev->lock);
      
                      ipv6_del_acaddr_hash(aca);
      
                      addrconf_leave_solict(idev, &aca->aca_addr);
      
                      ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
      
                      aca_put(aca);
      
                      write_lock_bh(&idev->lock);
              }
  454         write_unlock_bh(&idev->lock);
      }
      
      /*
       *        check if the interface has this anycast address
       *        called with rcu_read_lock()
       */
      static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
      {
              struct inet6_dev *idev;
              struct ifacaddr6 *aca;
      
  197         idev = __in6_dev_get(dev);
  197         if (idev) {
                      read_lock_bh(&idev->lock);
    4                 for (aca = idev->ac_list; aca; aca = aca->aca_next)
    5                         if (ipv6_addr_equal(&aca->aca_addr, addr))
                                      break;
  196                 read_unlock_bh(&idev->lock);
                      return aca != NULL;
              }
              return false;
      }
      
      /*
       *        check if given interface (or any, if dev==0) has this anycast address
       */
      bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                               const struct in6_addr *addr)
      {
              struct net_device *nh_dev;
              struct ifacaddr6 *aca;
              bool found = false;
      
  258         rcu_read_lock();
  258         if (dev)
  197                 found = ipv6_chk_acast_dev(dev, addr);
              else {
   65                 unsigned int hash = inet6_acaddr_hash(net, addr);
      
                      hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
                                               aca_addr_lst) {
                              nh_dev = fib6_info_nh_dev(aca->aca_rt);
                              if (!nh_dev || !net_eq(dev_net(nh_dev), net))
                                      continue;
                              if (ipv6_addr_equal(&aca->aca_addr, addr)) {
                                      found = true;
                                      break;
                              }
                      }
              }
  258         rcu_read_unlock();
              return found;
      }
      
      /*        check if this anycast address is link-local on given interface or
       *        is global
       */
      bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
                                   const struct in6_addr *addr)
      {
  256         return ipv6_chk_acast_addr(net,
  256                                    (ipv6_addr_type(addr) & IPV6_ADDR_LINKLOCAL ?
                                          dev : NULL),
                                         addr);
      }
      
      #ifdef CONFIG_PROC_FS
      struct ac6_iter_state {
              struct seq_net_private p;
              struct net_device *dev;
              struct inet6_dev *idev;
      };
      
      #define ac6_seq_private(seq)        ((struct ac6_iter_state *)(seq)->private)
      
      static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
      {
              struct ifacaddr6 *im = NULL;
              struct ac6_iter_state *state = ac6_seq_private(seq);
              struct net *net = seq_file_net(seq);
      
              state->idev = NULL;
              for_each_netdev_rcu(net, state->dev) {
                      struct inet6_dev *idev;
                      idev = __in6_dev_get(state->dev);
                      if (!idev)
                              continue;
                      read_lock_bh(&idev->lock);
                      im = idev->ac_list;
                      if (im) {
                              state->idev = idev;
                              break;
                      }
                      read_unlock_bh(&idev->lock);
              }
              return im;
      }
      
      static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im)
      {
              struct ac6_iter_state *state = ac6_seq_private(seq);
      
              im = im->aca_next;
              while (!im) {
                      if (likely(state->idev != NULL))
                              read_unlock_bh(&state->idev->lock);
      
                      state->dev = next_net_device_rcu(state->dev);
                      if (!state->dev) {
                              state->idev = NULL;
                              break;
                      }
                      state->idev = __in6_dev_get(state->dev);
                      if (!state->idev)
                              continue;
                      read_lock_bh(&state->idev->lock);
                      im = state->idev->ac_list;
              }
              return im;
      }
      
      static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos)
      {
              struct ifacaddr6 *im = ac6_get_first(seq);
              if (im)
                      while (pos && (im = ac6_get_next(seq, im)) != NULL)
                              --pos;
              return pos ? NULL : im;
      }
      
      static void *ac6_seq_start(struct seq_file *seq, loff_t *pos)
              __acquires(RCU)
      {
              rcu_read_lock();
              return ac6_get_idx(seq, *pos);
      }
      
      static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
      {
              struct ifacaddr6 *im = ac6_get_next(seq, v);
      
              ++*pos;
              return im;
      }
      
      static void ac6_seq_stop(struct seq_file *seq, void *v)
              __releases(RCU)
      {
              struct ac6_iter_state *state = ac6_seq_private(seq);
      
              if (likely(state->idev != NULL)) {
                      read_unlock_bh(&state->idev->lock);
                      state->idev = NULL;
              }
              rcu_read_unlock();
      }
      
      static int ac6_seq_show(struct seq_file *seq, void *v)
      {
              struct ifacaddr6 *im = (struct ifacaddr6 *)v;
              struct ac6_iter_state *state = ac6_seq_private(seq);
      
              seq_printf(seq, "%-4d %-15s %pi6 %5d\n",
                         state->dev->ifindex, state->dev->name,
                         &im->aca_addr, im->aca_users);
              return 0;
      }
      
      static const struct seq_operations ac6_seq_ops = {
              .start        =        ac6_seq_start,
              .next        =        ac6_seq_next,
              .stop        =        ac6_seq_stop,
              .show        =        ac6_seq_show,
      };
      
      int __net_init ac6_proc_init(struct net *net)
      {
  298         if (!proc_create_net("anycast6", 0444, net->proc_net, &ac6_seq_ops,
                              sizeof(struct ac6_iter_state)))
  298                 return -ENOMEM;
      
              return 0;
      }
      
      void ac6_proc_exit(struct net *net)
      {
              remove_proc_entry("anycast6", net->proc_net);
      }
      #endif
      
      /*        Init / cleanup code
       */
      int __init ipv6_anycast_init(void)
      {
              int i;
      
              for (i = 0; i < IN6_ADDR_HSIZE; i++)
                      INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
              return 0;
      }
      
      void ipv6_anycast_cleanup(void)
      {
              int i;
      
              spin_lock(&acaddr_hash_lock);
              for (i = 0; i < IN6_ADDR_HSIZE; i++)
                      WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
              spin_unlock(&acaddr_hash_lock);
      }
      /* SPDX-License-Identifier: GPL-2.0-only */
      #ifndef __LICENSE_H
      #define __LICENSE_H
      
      static inline int license_is_gpl_compatible(const char *license)
      {
              return (strcmp(license, "GPL") == 0
  202                 || strcmp(license, "GPL v2") == 0
  202                 || strcmp(license, "GPL and additional rights") == 0
  202                 || strcmp(license, "Dual BSD/GPL") == 0
  202                 || strcmp(license, "Dual MIT/GPL") == 0
  543                 || strcmp(license, "Dual MPL/GPL") == 0);
      }
      
      #endif
      // SPDX-License-Identifier: GPL-2.0
      /*
       * Detect hard lockups on a system
       *
       * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
       *
       * Note: Most of this code is borrowed heavily from the original softlockup
       * detector, so thanks to Ingo for the initial implementation.
       * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
       * to those contributors as well.
       */
      
      #define pr_fmt(fmt) "NMI watchdog: " fmt
      
      #include <linux/nmi.h>
      #include <linux/atomic.h>
      #include <linux/module.h>
      #include <linux/sched/debug.h>
      
      #include <asm/irq_regs.h>
      #include <linux/perf_event.h>
      
      static DEFINE_PER_CPU(bool, hard_watchdog_warn);
      static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
      static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
      static DEFINE_PER_CPU(struct perf_event *, dead_event);
      static struct cpumask dead_events_mask;
      
      static unsigned long hardlockup_allcpu_dumped;
      static atomic_t watchdog_cpus = ATOMIC_INIT(0);
      
      notrace void arch_touch_nmi_watchdog(void)
      {
              /*
               * Using __raw here because some code paths have
               * preemption enabled.  If preemption is enabled
               * then interrupts should be enabled too, in which
               * case we shouldn't have to worry about the watchdog
               * going off.
               */
 3192         raw_cpu_write(watchdog_nmi_touch, true);
      }
      EXPORT_SYMBOL(arch_touch_nmi_watchdog);
      
      #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
      static DEFINE_PER_CPU(ktime_t, last_timestamp);
      static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
      static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
      
      void watchdog_update_hrtimer_threshold(u64 period)
      {
              /*
               * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
               *
               * So it runs effectively with 2.5 times the rate of the NMI
               * watchdog. That means the hrtimer should fire 2-3 times before
               * the NMI watchdog expires. The NMI watchdog on x86 is based on
               * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
               * might run way faster than expected and the NMI fires in a
               * smaller period than the one deduced from the nominal CPU
               * frequency. Depending on the Turbo-Mode factor this might be fast
               * enough to get the NMI period smaller than the hrtimer watchdog
               * period and trigger false positives.
               *
               * The sample threshold is used to check in the NMI handler whether
               * the minimum time between two NMI samples has elapsed. That
               * prevents false positives.
               *
               * Set this to 4/5 of the actual watchdog threshold period so the
               * hrtimer is guaranteed to fire at least once within the real
               * watchdog threshold.
               */
              watchdog_hrtimer_sample_threshold = period * 2;
      }
      
      static bool watchdog_check_timestamp(void)
      {
              ktime_t delta, now = ktime_get_mono_fast_ns();
      
              delta = now - __this_cpu_read(last_timestamp);
              if (delta < watchdog_hrtimer_sample_threshold) {
                      /*
                       * If ktime is jiffies based, a stalled timer would prevent
                       * jiffies from being incremented and the filter would look
                       * at a stale timestamp and never trigger.
                       */
                      if (__this_cpu_inc_return(nmi_rearmed) < 10)
                              return false;
              }
              __this_cpu_write(nmi_rearmed, 0);
              __this_cpu_write(last_timestamp, now);
              return true;
      }
      #else
      static inline bool watchdog_check_timestamp(void)
      {
              return true;
      }
      #endif
      
      static struct perf_event_attr wd_hw_attr = {
              .type                = PERF_TYPE_HARDWARE,
              .config                = PERF_COUNT_HW_CPU_CYCLES,
              .size                = sizeof(struct perf_event_attr),
              .pinned                = 1,
              .disabled        = 1,
      };
      
      /* Callback function for perf event subsystem */
      static void watchdog_overflow_callback(struct perf_event *event,
                                             struct perf_sample_data *data,
                                             struct pt_regs *regs)
      {
              /* Ensure the watchdog never gets throttled */
              event->hw.interrupts = 0;
      
              if (__this_cpu_read(watchdog_nmi_touch) == true) {
                      __this_cpu_write(watchdog_nmi_touch, false);
                      return;
              }
      
              if (!watchdog_check_timestamp())
                      return;
      
              /* check for a hardlockup
               * This is done by making sure our timer interrupt
               * is incrementing.  The timer interrupt should have
               * fired multiple times before we overflow'd.  If it hasn't
               * then this is a good indication the cpu is stuck
               */
              if (is_hardlockup()) {
                      int this_cpu = smp_processor_id();
      
                      /* only print hardlockups once */
                      if (__this_cpu_read(hard_watchdog_warn) == true)
                              return;
      
                      pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
                               this_cpu);
                      print_modules();
                      print_irqtrace_events(current);
                      if (regs)
                              show_regs(regs);
                      else
                              dump_stack();
      
                      /*
                       * Perform all-CPU dump only once to avoid multiple hardlockups
                       * generating interleaving traces
                       */
                      if (sysctl_hardlockup_all_cpu_backtrace &&
                                      !test_and_set_bit(0, &hardlockup_allcpu_dumped))
                              trigger_allbutself_cpu_backtrace();
      
                      if (hardlockup_panic)
                              nmi_panic(regs, "Hard LOCKUP");
      
                      __this_cpu_write(hard_watchdog_warn, true);
                      return;
              }
      
              __this_cpu_write(hard_watchdog_warn, false);
              return;
      }
      
      static int hardlockup_detector_event_create(void)
      {
              unsigned int cpu = smp_processor_id();
              struct perf_event_attr *wd_attr;
              struct perf_event *evt;
      
              wd_attr = &wd_hw_attr;
              wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
      
              /* Try to register using hardware perf events */
              evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
                                                     watchdog_overflow_callback, NULL);
              if (IS_ERR(evt)) {
                      pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
                               PTR_ERR(evt));
                      return PTR_ERR(evt);
              }
              this_cpu_write(watchdog_ev, evt);
              return 0;
      }
      
      /**
       * hardlockup_detector_perf_enable - Enable the local event
       */
      void hardlockup_detector_perf_enable(void)
      {
              if (hardlockup_detector_event_create())
                      return;
      
              /* use original value for check */
              if (!atomic_fetch_inc(&watchdog_cpus))
                      pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
      
              perf_event_enable(this_cpu_read(watchdog_ev));
      }
      
      /**
       * hardlockup_detector_perf_disable - Disable the local event
       */
      void hardlockup_detector_perf_disable(void)
      {
              struct perf_event *event = this_cpu_read(watchdog_ev);
      
              if (event) {
                      perf_event_disable(event);
                      this_cpu_write(watchdog_ev, NULL);
                      this_cpu_write(dead_event, event);
                      cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
                      atomic_dec(&watchdog_cpus);
              }
      }
      
      /**
       * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
       *
       * Called from lockup_detector_cleanup(). Serialized by the caller.
       */
      void hardlockup_detector_perf_cleanup(void)
      {
              int cpu;
      
              for_each_cpu(cpu, &dead_events_mask) {
                      struct perf_event *event = per_cpu(dead_event, cpu);
      
                      /*
                       * Required because for_each_cpu() reports  unconditionally
                       * CPU0 as set on UP kernels. Sigh.
                       */
                      if (event)
                              perf_event_release_kernel(event);
                      per_cpu(dead_event, cpu) = NULL;
              }
              cpumask_clear(&dead_events_mask);
      }
      
      /**
       * hardlockup_detector_perf_stop - Globally stop watchdog events
       *
       * Special interface for x86 to handle the perf HT bug.
       */
      void __init hardlockup_detector_perf_stop(void)
      {
              int cpu;
      
              lockdep_assert_cpus_held();
      
              for_each_online_cpu(cpu) {
                      struct perf_event *event = per_cpu(watchdog_ev, cpu);
      
                      if (event)
                              perf_event_disable(event);
              }
      }
      
      /**
       * hardlockup_detector_perf_restart - Globally restart watchdog events
       *
       * Special interface for x86 to handle the perf HT bug.
       */
      void __init hardlockup_detector_perf_restart(void)
      {
              int cpu;
      
              lockdep_assert_cpus_held();
      
              if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
                      return;
      
              for_each_online_cpu(cpu) {
                      struct perf_event *event = per_cpu(watchdog_ev, cpu);
      
                      if (event)
                              perf_event_enable(event);
              }
      }
      
      /**
       * hardlockup_detector_perf_init - Probe whether NMI event is available at all
       */
      int __init hardlockup_detector_perf_init(void)
      {
              int ret = hardlockup_detector_event_create();
      
              if (ret) {
                      pr_info("Perf NMI watchdog permanently disabled\n");
              } else {
                      perf_event_release_kernel(this_cpu_read(watchdog_ev));
                      this_cpu_write(watchdog_ev, NULL);
              }
              return ret;
      }
      /* SPDX-License-Identifier: GPL-2.0 */
      #ifndef _SCSI_SCSI_CMND_H
      #define _SCSI_SCSI_CMND_H
      
      #include <linux/dma-mapping.h>
      #include <linux/blkdev.h>
      #include <linux/t10-pi.h>
      #include <linux/list.h>
      #include <linux/types.h>
      #include <linux/timer.h>
      #include <linux/scatterlist.h>
      #include <scsi/scsi_device.h>
      #include <scsi/scsi_request.h>
      
      struct Scsi_Host;
      struct scsi_driver;
      
      /*
       * MAX_COMMAND_SIZE is:
       * The longest fixed-length SCSI CDB as per the SCSI standard.
       * fixed-length means: commands that their size can be determined
       * by their opcode and the CDB does not carry a length specifier, (unlike
       * the VARIABLE_LENGTH_CMD(0x7f) command). This is actually not exactly
       * true and the SCSI standard also defines extended commands and
       * vendor specific commands that can be bigger than 16 bytes. The kernel
       * will support these using the same infrastructure used for VARLEN CDB's.
       * So in effect MAX_COMMAND_SIZE means the maximum size command scsi-ml
       * supports without specifying a cmd_len by ULD's
       */
      #define MAX_COMMAND_SIZE 16
      #if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
      # error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
      #endif
      
      struct scsi_data_buffer {
              struct sg_table table;
              unsigned length;
      };
      
      /* embedded in scsi_cmnd */
      struct scsi_pointer {
              char *ptr;                /* data pointer */
              int this_residual;        /* left in this buffer */
              struct scatterlist *buffer;        /* which buffer */
              int buffers_residual;        /* how many buffers left */
      
              dma_addr_t dma_handle;
      
              volatile int Status;
              volatile int Message;
              volatile int have_data_in;
              volatile int sent_command;
              volatile int phase;
      };
      
      /* for scmd->flags */
      #define SCMD_TAGGED                (1 << 0)
      #define SCMD_UNCHECKED_ISA_DMA        (1 << 1)
      #define SCMD_INITIALIZED        (1 << 2)
      #define SCMD_LAST                (1 << 3)
      /* flags preserved across unprep / reprep */
      #define SCMD_PRESERVED_FLAGS        (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
      
      /* for scmd->state */
      #define SCMD_STATE_COMPLETE        0
      #define SCMD_STATE_INFLIGHT        1
      
      struct scsi_cmnd {
              struct scsi_request req;
              struct scsi_device *device;
              struct list_head list;  /* scsi_cmnd participates in queue lists */
              struct list_head eh_entry; /* entry for the host eh_cmd_q */
              struct delayed_work abort_work;
      
              struct rcu_head rcu;
      
              int eh_eflags;                /* Used by error handlr */
      
              /*