// SPDX-License-Identifier: GPL-2.0-or-later
      /*
       * udp_diag.c        Module for monitoring UDP transport protocols sockets.
       *
       * Authors:        Pavel Emelyanov, <xemul@parallels.com>
       */
      
      
      #include <linux/module.h>
      #include <linux/inet_diag.h>
      #include <linux/udp.h>
      #include <net/udp.h>
      #include <net/udplite.h>
      #include <linux/sock_diag.h>
      
      static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
                              struct netlink_callback *cb,
                              const struct inet_diag_req_v2 *req,
                              struct nlattr *bc, bool net_admin)
      {
              if (!inet_diag_bc_sk(bc, sk))
                      return 0;
      
              return inet_sk_diag_fill(sk, NULL, skb, req,
                              sk_user_ns(NETLINK_CB(cb->skb).sk),
                              NETLINK_CB(cb->skb).portid,
                              cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
      }
      
      static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                              const struct nlmsghdr *nlh,
                              const struct inet_diag_req_v2 *req)
      {
              int err = -EINVAL;
              struct sock *sk = NULL;
              struct sk_buff *rep;
              struct net *net = sock_net(in_skb->sk);
      
              rcu_read_lock();
              if (req->sdiag_family == AF_INET)
                      /* src and dst are swapped for historical reasons */
                      sk = __udp4_lib_lookup(net,
                                      req->id.idiag_src[0], req->id.idiag_sport,
                                      req->id.idiag_dst[0], req->id.idiag_dport,
                                      req->id.idiag_if, 0, tbl, NULL);
      #if IS_ENABLED(CONFIG_IPV6)
              else if (req->sdiag_family == AF_INET6)
                      sk = __udp6_lib_lookup(net,
                                      (struct in6_addr *)req->id.idiag_src,
                                      req->id.idiag_sport,
                                      (struct in6_addr *)req->id.idiag_dst,
                                      req->id.idiag_dport,
                                      req->id.idiag_if, 0, tbl, NULL);
      #endif
              if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                      sk = NULL;
              rcu_read_unlock();
              err = -ENOENT;
              if (!sk)
                      goto out_nosk;
      
              err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
              if (err)
                      goto out;
      
              err = -ENOMEM;
              rep = nlmsg_new(sizeof(struct inet_diag_msg) +
                              sizeof(struct inet_diag_meminfo) + 64,
                              GFP_KERNEL);
              if (!rep)
                      goto out;
      
              err = inet_sk_diag_fill(sk, NULL, rep, req,
                                 sk_user_ns(NETLINK_CB(in_skb).sk),
                                 NETLINK_CB(in_skb).portid,
                                 nlh->nlmsg_seq, 0, nlh,
                                 netlink_net_capable(in_skb, CAP_NET_ADMIN));
              if (err < 0) {
                      WARN_ON(err == -EMSGSIZE);
                      kfree_skb(rep);
                      goto out;
              }
              err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                                    MSG_DONTWAIT);
              if (err > 0)
                      err = 0;
      out:
              if (sk)
                      sock_put(sk);
      out_nosk:
              return err;
      }
      
      static void udp_dump(struct udp_table *table, struct sk_buff *skb,
                           struct netlink_callback *cb,
                           const struct inet_diag_req_v2 *r, struct nlattr *bc)
      {
    1         bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
              struct net *net = sock_net(skb->sk);
              int num, s_num, slot, s_slot;
      
              s_slot = cb->args[0];
              num = s_num = cb->args[1];
      
    1         for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
    1                 struct udp_hslot *hslot = &table->hash[slot];
                      struct sock *sk;
      
                      num = 0;
      
                      if (hlist_empty(&hslot->head))
                              continue;
      
    1                 spin_lock_bh(&hslot->lock);
    1                 sk_for_each(sk, &hslot->head) {
                              struct inet_sock *inet = inet_sk(sk);
      
    1                         if (!net_eq(sock_net(sk), net))
                                      continue;
    1                         if (num < s_num)
                                      goto next;
    1                         if (!(r->idiag_states & (1 << sk->sk_state)))
                                      goto next;
                              if (r->sdiag_family != AF_UNSPEC &&
                                              sk->sk_family != r->sdiag_family)
                                      goto next;
                              if (r->id.idiag_sport != inet->inet_sport &&
                                  r->id.idiag_sport)
                                      goto next;
                              if (r->id.idiag_dport != inet->inet_dport &&
                                  r->id.idiag_dport)
                                      goto next;
      
                              if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
                                      spin_unlock_bh(&hslot->lock);
                                      goto done;
                              }
      next:
    1                         num++;
                      }
    1                 spin_unlock_bh(&hslot->lock);
              }
      done:
    1         cb->args[0] = slot;
              cb->args[1] = num;
      }
      
      static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                const struct inet_diag_req_v2 *r, struct nlattr *bc)
      {
    1         udp_dump(&udp_table, skb, cb, r, bc);
      }
      
      static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
                                   const struct inet_diag_req_v2 *req)
      {
              return udp_dump_one(&udp_table, in_skb, nlh, req);
      }
      
      static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                      void *info)
      {
              r->idiag_rqueue = udp_rqueue_get(sk);
              r->idiag_wqueue = sk_wmem_alloc_get(sk);
      }
      
      #ifdef CONFIG_INET_DIAG_DESTROY
      static int __udp_diag_destroy(struct sk_buff *in_skb,
                                    const struct inet_diag_req_v2 *req,
                                    struct udp_table *tbl)
      {
    2         struct net *net = sock_net(in_skb->sk);
              struct sock *sk;
              int err;
      
    2         rcu_read_lock();
      
    2         if (req->sdiag_family == AF_INET)
                      sk = __udp4_lib_lookup(net,
                                      req->id.idiag_dst[0], req->id.idiag_dport,
                                      req->id.idiag_src[0], req->id.idiag_sport,
    2                                 req->id.idiag_if, 0, tbl, NULL);
      #if IS_ENABLED(CONFIG_IPV6)
              else if (req->sdiag_family == AF_INET6) {
                      if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
                          ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
                              sk = __udp4_lib_lookup(net,
                                              req->id.idiag_dst[3], req->id.idiag_dport,
                                              req->id.idiag_src[3], req->id.idiag_sport,
                                              req->id.idiag_if, 0, tbl, NULL);
      
                      else
                              sk = __udp6_lib_lookup(net,
                                              (struct in6_addr *)req->id.idiag_dst,
                                              req->id.idiag_dport,
                                              (struct in6_addr *)req->id.idiag_src,
                                              req->id.idiag_sport,
                                              req->id.idiag_if, 0, tbl, NULL);
              }
      #endif
              else {
                      rcu_read_unlock();
                      return -EINVAL;
              }
      
    2         if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                      sk = NULL;
      
    2         rcu_read_unlock();
      
              if (!sk)
                      return -ENOENT;
      
              if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
                      sock_put(sk);
                      return -ENOENT;
              }
      
              err = sock_diag_destroy(sk, ECONNABORTED);
      
    1         sock_put(sk);
      
              return err;
      }
      
      static int udp_diag_destroy(struct sk_buff *in_skb,
                                  const struct inet_diag_req_v2 *req)
      {
    2         return __udp_diag_destroy(in_skb, req, &udp_table);
      }
      
      static int udplite_diag_destroy(struct sk_buff *in_skb,
                                      const struct inet_diag_req_v2 *req)
      {
              return __udp_diag_destroy(in_skb, req, &udplite_table);
      }
      
      #endif
      
      static const struct inet_diag_handler udp_diag_handler = {
              .dump                 = udp_diag_dump,
              .dump_one         = udp_diag_dump_one,
              .idiag_get_info  = udp_diag_get_info,
              .idiag_type         = IPPROTO_UDP,
              .idiag_info_size = 0,
      #ifdef CONFIG_INET_DIAG_DESTROY
              .destroy         = udp_diag_destroy,
      #endif
      };
      
      static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                    const struct inet_diag_req_v2 *r,
                                    struct nlattr *bc)
      {
              udp_dump(&udplite_table, skb, cb, r, bc);
      }
      
      static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
                                       const struct inet_diag_req_v2 *req)
      {
              return udp_dump_one(&udplite_table, in_skb, nlh, req);
      }
      
      static const struct inet_diag_handler udplite_diag_handler = {
              .dump                 = udplite_diag_dump,
              .dump_one         = udplite_diag_dump_one,
              .idiag_get_info  = udp_diag_get_info,
              .idiag_type         = IPPROTO_UDPLITE,
              .idiag_info_size = 0,
      #ifdef CONFIG_INET_DIAG_DESTROY
              .destroy         = udplite_diag_destroy,
      #endif
      };
      
      static int __init udp_diag_init(void)
      {
              int err;
      
              err = inet_diag_register(&udp_diag_handler);
              if (err)
                      goto out;
              err = inet_diag_register(&udplite_diag_handler);
              if (err)
                      goto out_lite;
      out:
              return err;
      out_lite:
              inet_diag_unregister(&udp_diag_handler);
              goto out;
      }
      
      static void __exit udp_diag_exit(void)
      {
              inet_diag_unregister(&udplite_diag_handler);
              inet_diag_unregister(&udp_diag_handler);
      }
      
      module_init(udp_diag_init);
      module_exit(udp_diag_exit);
      MODULE_LICENSE("GPL");
      MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
      MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
      /* SPDX-License-Identifier: GPL-2.0-or-later */
      /* delayacct.h - per-task delay accounting
       *
       * Copyright (C) Shailabh Nagar, IBM Corp. 2006
       */
      
      #ifndef _LINUX_DELAYACCT_H
      #define _LINUX_DELAYACCT_H
      
      #include <uapi/linux/taskstats.h>
      
      /*
       * Per-task flags relevant to delay accounting
       * maintained privately to avoid exhausting similar flags in sched.h:PF_*
       * Used to set current->delays->flags
       */
      #define DELAYACCT_PF_SWAPIN        0x00000001        /* I am doing a swapin */
      #define DELAYACCT_PF_BLKIO        0x00000002        /* I am waiting on IO */
      
      #ifdef CONFIG_TASK_DELAY_ACCT
      struct task_delay_info {
              raw_spinlock_t        lock;
              unsigned int        flags;        /* Private per-task flags */
      
              /* For each stat XXX, add following, aligned appropriately
               *
               * struct timespec XXX_start, XXX_end;
               * u64 XXX_delay;
               * u32 XXX_count;
               *
               * Atomicity of updates to XXX_delay, XXX_count protected by
               * single lock above (split into XXX_lock if contention is an issue).
               */
      
              /*
               * XXX_count is incremented on every XXX operation, the delay
               * associated with the operation is added to XXX_delay.
               * XXX_delay contains the accumulated delay time in nanoseconds.
               */
              u64 blkio_start;        /* Shared by blkio, swapin */
              u64 blkio_delay;        /* wait for sync block io completion */
              u64 swapin_delay;        /* wait for swapin block io completion */
              u32 blkio_count;        /* total count of the number of sync block */
                                      /* io operations performed */
              u32 swapin_count;        /* total count of the number of swapin block */
                                      /* io operations performed */
      
              u64 freepages_start;
              u64 freepages_delay;        /* wait for memory reclaim */
      
              u64 thrashing_start;
              u64 thrashing_delay;        /* wait for thrashing page */
      
              u32 freepages_count;        /* total count of memory reclaim */
              u32 thrashing_count;        /* total count of thrash waits */
      };
      #endif
      
      #include <linux/sched.h>
      #include <linux/slab.h>
      
      #ifdef CONFIG_TASK_DELAY_ACCT
      extern int delayacct_on;        /* Delay accounting turned on/off */
      extern struct kmem_cache *delayacct_cache;
      extern void delayacct_init(void);
      extern void __delayacct_tsk_init(struct task_struct *);
      extern void __delayacct_tsk_exit(struct task_struct *);
      extern void __delayacct_blkio_start(void);
      extern void __delayacct_blkio_end(struct task_struct *);
      extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
      extern __u64 __delayacct_blkio_ticks(struct task_struct *);
      extern void __delayacct_freepages_start(void);
      extern void __delayacct_freepages_end(void);
      extern void __delayacct_thrashing_start(void);
      extern void __delayacct_thrashing_end(void);
      
      static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
      {
              if (p->delays)
                      return (p->delays->flags & DELAYACCT_PF_BLKIO);
              else
                      return 0;
      }
      
      static inline void delayacct_set_flag(int flag)
      {
              if (current->delays)
                      current->delays->flags |= flag;
      }
      
      static inline void delayacct_clear_flag(int flag)
      {
              if (current->delays)
                      current->delays->flags &= ~flag;
      }
      
      static inline void delayacct_tsk_init(struct task_struct *tsk)
      {
              /* reinitialize in case parent's non-null pointer was dup'ed*/
              tsk->delays = NULL;
              if (delayacct_on)
                      __delayacct_tsk_init(tsk);
      }
      
      /* Free tsk->delays. Called from bad fork and __put_task_struct
       * where there's no risk of tsk->delays being accessed elsewhere
       */
      static inline void delayacct_tsk_free(struct task_struct *tsk)
      {
              if (tsk->delays)
                      kmem_cache_free(delayacct_cache, tsk->delays);
              tsk->delays = NULL;
      }
      
      static inline void delayacct_blkio_start(void)
      {
              delayacct_set_flag(DELAYACCT_PF_BLKIO);
              if (current->delays)
                      __delayacct_blkio_start();
      }
      
      static inline void delayacct_blkio_end(struct task_struct *p)
      {
              if (p->delays)
                      __delayacct_blkio_end(p);
              delayacct_clear_flag(DELAYACCT_PF_BLKIO);
      }
      
      static inline int delayacct_add_tsk(struct taskstats *d,
                                              struct task_struct *tsk)
      {
              if (!delayacct_on || !tsk->delays)
                      return 0;
              return __delayacct_add_tsk(d, tsk);
      }
      
      static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
      {
              if (tsk->delays)
                      return __delayacct_blkio_ticks(tsk);
              return 0;
      }
      
      static inline void delayacct_freepages_start(void)
      {
   20         if (current->delays)
   20                 __delayacct_freepages_start();
      }
      
      static inline void delayacct_freepages_end(void)
      {
   18         if (current->delays)
   18                 __delayacct_freepages_end();
      }
      
      static inline void delayacct_thrashing_start(void)
      {
              if (current->delays)
                      __delayacct_thrashing_start();
      }
      
      static inline void delayacct_thrashing_end(void)
      {
              if (current->delays)
                      __delayacct_thrashing_end();
      }
      
      #else
      static inline void delayacct_set_flag(int flag)
      {}
      static inline void delayacct_clear_flag(int flag)
      {}
      static inline void delayacct_init(void)
      {}
      static inline void delayacct_tsk_init(struct task_struct *tsk)
      {}
      static inline void delayacct_tsk_free(struct task_struct *tsk)
      {}
      static inline void delayacct_blkio_start(void)
      {}
      static inline void delayacct_blkio_end(struct task_struct *p)
      {}
      static inline int delayacct_add_tsk(struct taskstats *d,
                                              struct task_struct *tsk)
      { return 0; }
      static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
      { return 0; }
      static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
      { return 0; }
      static inline void delayacct_freepages_start(void)
      {}
      static inline void delayacct_freepages_end(void)
      {}
      static inline void delayacct_thrashing_start(void)
      {}
      static inline void delayacct_thrashing_end(void)
      {}
      
      #endif /* CONFIG_TASK_DELAY_ACCT */
      
      #endif
      /* SPDX-License-Identifier: GPL-2.0-only */
      /*
       * kernfs.h - pseudo filesystem decoupled from vfs locking
       */
      
      #ifndef __LINUX_KERNFS_H
      #define __LINUX_KERNFS_H
      
      #include <linux/kernel.h>
      #include <linux/err.h>
      #include <linux/list.h>
      #include <linux/mutex.h>
      #include <linux/idr.h>
      #include <linux/lockdep.h>
      #include <linux/rbtree.h>
      #include <linux/atomic.h>
      #include <linux/uidgid.h>
      #include <linux/wait.h>
      
      struct file;
      struct dentry;
      struct iattr;
      struct seq_file;
      struct vm_area_struct;
      struct super_block;
      struct file_system_type;
      struct poll_table_struct;
      struct fs_context;
      
      struct kernfs_fs_context;
      struct kernfs_open_node;
      struct kernfs_iattrs;
      
      enum kernfs_node_type {
              KERNFS_DIR                = 0x0001,
              KERNFS_FILE                = 0x0002,
              KERNFS_LINK                = 0x0004,
      };
      
      #define KERNFS_TYPE_MASK        0x000f
      #define KERNFS_FLAG_MASK        ~KERNFS_TYPE_MASK
      
      enum kernfs_node_flag {
              KERNFS_ACTIVATED        = 0x0010,
              KERNFS_NS                = 0x0020,
              KERNFS_HAS_SEQ_SHOW        = 0x0040,
              KERNFS_HAS_MMAP                = 0x0080,
              KERNFS_LOCKDEP                = 0x0100,
              KERNFS_SUICIDAL                = 0x0400,
              KERNFS_SUICIDED                = 0x0800,
              KERNFS_EMPTY_DIR        = 0x1000,
              KERNFS_HAS_RELEASE        = 0x2000,
      };
      
      /* @flags for kernfs_create_root() */
      enum kernfs_root_flag {
              /*
               * kernfs_nodes are created in the deactivated state and invisible.
               * They require explicit kernfs_activate() to become visible.  This
               * can be used to make related nodes become visible atomically
               * after all nodes are created successfully.
               */
              KERNFS_ROOT_CREATE_DEACTIVATED                = 0x0001,
      
              /*
               * For regular files, if the opener has CAP_DAC_OVERRIDE, open(2)
               * succeeds regardless of the RW permissions.  sysfs had an extra
               * layer of enforcement where open(2) fails with -EACCES regardless
               * of CAP_DAC_OVERRIDE if the permission doesn't have the
               * respective read or write access at all (none of S_IRUGO or
               * S_IWUGO) or the respective operation isn't implemented.  The
               * following flag enables that behavior.
               */
              KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK        = 0x0002,
      
              /*
               * The filesystem supports exportfs operation, so userspace can use
               * fhandle to access nodes of the fs.
               */
              KERNFS_ROOT_SUPPORT_EXPORTOP                = 0x0004,
      };
      
      /* type-specific structures for kernfs_node union members */
      struct kernfs_elem_dir {
              unsigned long                subdirs;
              /* children rbtree starts here and goes through kn->rb */
              struct rb_root                children;
      
              /*
               * The kernfs hierarchy this directory belongs to.  This fits
               * better directly in kernfs_node but is here to save space.
               */
              struct kernfs_root        *root;
      };
      
      struct kernfs_elem_symlink {
              struct kernfs_node        *target_kn;
      };
      
      struct kernfs_elem_attr {
              const struct kernfs_ops        *ops;
              struct kernfs_open_node        *open;
              loff_t                        size;
              struct kernfs_node        *notify_next;        /* for kernfs_notify() */
      };
      
      /* represent a kernfs node */
      union kernfs_node_id {
              struct {
                      /*
                       * blktrace will export this struct as a simplified 'struct
                       * fid' (which is a big data struction), so userspace can use
                       * it to find kernfs node. The layout must match the first two
                       * fields of 'struct fid' exactly.
                       */
                      u32                ino;
                      u32                generation;
              };
              u64                        id;
      };
      
      /*
       * kernfs_node - the building block of kernfs hierarchy.  Each and every
       * kernfs node is represented by single kernfs_node.  Most fields are
       * private to kernfs and shouldn't be accessed directly by kernfs users.
       *
       * As long as s_count reference is held, the kernfs_node itself is
       * accessible.  Dereferencing elem or any other outer entity requires
       * active reference.
       */
      struct kernfs_node {
              atomic_t                count;
              atomic_t                active;
      #ifdef CONFIG_DEBUG_LOCK_ALLOC
              struct lockdep_map        dep_map;
      #endif
              /*
               * Use kernfs_get_parent() and kernfs_name/path() instead of
               * accessing the following two fields directly.  If the node is
               * never moved to a different parent, it is safe to access the
               * parent directly.
               */
              struct kernfs_node        *parent;
              const char                *name;
      
              struct rb_node                rb;
      
              const void                *ns;        /* namespace tag */
              unsigned int                hash;        /* ns + name hash */
              union {
                      struct kernfs_elem_dir                dir;
                      struct kernfs_elem_symlink        symlink;
                      struct kernfs_elem_attr                attr;
              };
      
              void                        *priv;
      
              union kernfs_node_id        id;
              unsigned short                flags;
              umode_t                        mode;
              struct kernfs_iattrs        *iattr;
      };
      
      /*
       * kernfs_syscall_ops may be specified on kernfs_create_root() to support
       * syscalls.  These optional callbacks are invoked on the matching syscalls
       * and can perform any kernfs operations which don't necessarily have to be
       * the exact operation requested.  An active reference is held for each
       * kernfs_node parameter.
       */
      struct kernfs_syscall_ops {
              int (*show_options)(struct seq_file *sf, struct kernfs_root *root);
      
              int (*mkdir)(struct kernfs_node *parent, const char *name,
                           umode_t mode);
              int (*rmdir)(struct kernfs_node *kn);
              int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
                            const char *new_name);
              int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
                               struct kernfs_root *root);
      };
      
      struct kernfs_root {
              /* published fields */
              struct kernfs_node        *kn;
              unsigned int                flags;        /* KERNFS_ROOT_* flags */
      
              /* private fields, do not use outside kernfs proper */
              struct idr                ino_idr;
              u32                        next_generation;
              struct kernfs_syscall_ops *syscall_ops;
      
              /* list of kernfs_super_info of this root, protected by kernfs_mutex */
              struct list_head        supers;
      
              wait_queue_head_t        deactivate_waitq;
      };
      
      struct kernfs_open_file {
              /* published fields */
              struct kernfs_node        *kn;
              struct file                *file;
              struct seq_file                *seq_file;
              void                        *priv;
      
              /* private fields, do not use outside kernfs proper */
              struct mutex                mutex;
              struct mutex                prealloc_mutex;
              int                        event;
              struct list_head        list;
              char                        *prealloc_buf;
      
              size_t                        atomic_write_len;
              bool                        mmapped:1;
              bool                        released:1;
              const struct vm_operations_struct *vm_ops;
      };
      
      struct kernfs_ops {
              /*
               * Optional open/release methods.  Both are called with
               * @of->seq_file populated.
               */
              int (*open)(struct kernfs_open_file *of);
              void (*release)(struct kernfs_open_file *of);
      
              /*
               * Read is handled by either seq_file or raw_read().
               *
               * If seq_show() is present, seq_file path is active.  Other seq
               * operations are optional and if not implemented, the behavior is
               * equivalent to single_open().  @sf->private points to the
               * associated kernfs_open_file.
               *
               * read() is bounced through kernel buffer and a read larger than
               * PAGE_SIZE results in partial operation of PAGE_SIZE.
               */
              int (*seq_show)(struct seq_file *sf, void *v);
      
              void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
              void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
              void (*seq_stop)(struct seq_file *sf, void *v);
      
              ssize_t (*read)(struct kernfs_open_file *of, char *buf, size_t bytes,
                              loff_t off);
      
              /*
               * write() is bounced through kernel buffer.  If atomic_write_len
               * is not set, a write larger than PAGE_SIZE results in partial
               * operations of PAGE_SIZE chunks.  If atomic_write_len is set,
               * writes upto the specified size are executed atomically but
               * larger ones are rejected with -E2BIG.
               */
              size_t atomic_write_len;
              /*
               * "prealloc" causes a buffer to be allocated at open for
               * all read/write requests.  As ->seq_show uses seq_read()
               * which does its own allocation, it is incompatible with
               * ->prealloc.  Provide ->read and ->write with ->prealloc.
               */
              bool prealloc;
              ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
                               loff_t off);
      
              __poll_t (*poll)(struct kernfs_open_file *of,
                               struct poll_table_struct *pt);
      
              int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
      
      #ifdef CONFIG_DEBUG_LOCK_ALLOC
              struct lock_class_key        lockdep_key;
      #endif
      };
      
      /*
       * The kernfs superblock creation/mount parameter context.
       */
      struct kernfs_fs_context {
              struct kernfs_root        *root;                /* Root of the hierarchy being mounted */
              void                        *ns_tag;        /* Namespace tag of the mount (or NULL) */
              unsigned long                magic;                /* File system specific magic number */
      
              /* The following are set/used by kernfs_mount() */
              bool                        new_sb_created;        /* Set to T if we allocated a new sb */
      };
      
      #ifdef CONFIG_KERNFS
      
      static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
      {
  478         return kn->flags & KERNFS_TYPE_MASK;
      }
      
      /**
       * kernfs_enable_ns - enable namespace under a directory
       * @kn: directory of interest, should be empty
       *
       * This is to be called right after @kn is created to enable namespace
       * under it.  All children of @kn must have non-NULL namespace tags and
       * only the ones which match the super_block's tag will be visible.
       */
      static inline void kernfs_enable_ns(struct kernfs_node *kn)
      {
              WARN_ON_ONCE(kernfs_type(kn) != KERNFS_DIR);
   71         WARN_ON_ONCE(!RB_EMPTY_ROOT(&kn->dir.children));
   71         kn->flags |= KERNFS_NS;
      }
      
      /**
       * kernfs_ns_enabled - test whether namespace is enabled
       * @kn: the node to test
       *
       * Test whether namespace filtering is enabled for the children of @ns.
       */
      static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
      {
              return kn->flags & KERNFS_NS;
      }
      
      int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
      int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
                                char *buf, size_t buflen);
      void pr_cont_kernfs_name(struct kernfs_node *kn);
      void pr_cont_kernfs_path(struct kernfs_node *kn);
      struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
      struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
                                                 const char *name, const void *ns);
      struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
                                                 const char *path, const void *ns);
      void kernfs_get(struct kernfs_node *kn);
      void kernfs_put(struct kernfs_node *kn);
      
      struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
      struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
      struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
      
      struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
                                        struct super_block *sb);
      struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
                                             unsigned int flags, void *priv);
      void kernfs_destroy_root(struct kernfs_root *root);
      
      struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
                                               const char *name, umode_t mode,
                                               kuid_t uid, kgid_t gid,
                                               void *priv, const void *ns);
      struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
                                                  const char *name);
      struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
                                               const char *name, umode_t mode,
                                               kuid_t uid, kgid_t gid,
                                               loff_t size,
                                               const struct kernfs_ops *ops,
                                               void *priv, const void *ns,
                                               struct lock_class_key *key);
      struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
                                             const char *name,
                                             struct kernfs_node *target);
      void kernfs_activate(struct kernfs_node *kn);
      void kernfs_remove(struct kernfs_node *kn);
      void kernfs_break_active_protection(struct kernfs_node *kn);
      void kernfs_unbreak_active_protection(struct kernfs_node *kn);
      bool kernfs_remove_self(struct kernfs_node *kn);
      int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
                                   const void *ns);
      int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
                           const char *new_name, const void *new_ns);
      int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
      __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
                                   struct poll_table_struct *pt);
      void kernfs_notify(struct kernfs_node *kn);
      
      int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
                           void *value, size_t size);
      int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
                           const void *value, size_t size, int flags);
      
      const void *kernfs_super_ns(struct super_block *sb);
      int kernfs_get_tree(struct fs_context *fc);
      void kernfs_free_fs_context(struct fs_context *fc);
      void kernfs_kill_sb(struct super_block *sb);
      
      void kernfs_init(void);
      
      struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root,
              const union kernfs_node_id *id);
      #else        /* CONFIG_KERNFS */
      
      static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
      { return 0; }        /* whatever */
      
      static inline void kernfs_enable_ns(struct kernfs_node *kn) { }
      
      static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
      { return false; }
      
      static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
      { return -ENOSYS; }
      
      static inline int kernfs_path_from_node(struct kernfs_node *root_kn,
                                              struct kernfs_node *kn,
                                              char *buf, size_t buflen)
      { return -ENOSYS; }
      
      static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
      static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { }
      
      static inline struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
      { return NULL; }
      
      static inline struct kernfs_node *
      kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name,
                             const void *ns)
      { return NULL; }
      static inline struct kernfs_node *
      kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path,
                             const void *ns)
      { return NULL; }
      
      static inline void kernfs_get(struct kernfs_node *kn) { }
      static inline void kernfs_put(struct kernfs_node *kn) { }
      
      static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
      { return NULL; }
      
      static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
      { return NULL; }
      
      static inline struct inode *
      kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
      { return NULL; }
      
      static inline struct kernfs_root *
      kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
                         void *priv)
      { return ERR_PTR(-ENOSYS); }
      
      static inline void kernfs_destroy_root(struct kernfs_root *root) { }
      
      static inline struct kernfs_node *
      kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
                           umode_t mode, kuid_t uid, kgid_t gid,
                           void *priv, const void *ns)
      { return ERR_PTR(-ENOSYS); }
      
      static inline struct kernfs_node *
      __kernfs_create_file(struct kernfs_node *parent, const char *name,
                           umode_t mode, kuid_t uid, kgid_t gid,
                           loff_t size, const struct kernfs_ops *ops,
                           void *priv, const void *ns, struct lock_class_key *key)
      { return ERR_PTR(-ENOSYS); }
      
      static inline struct kernfs_node *
      kernfs_create_link(struct kernfs_node *parent, const char *name,
                         struct kernfs_node *target)
      { return ERR_PTR(-ENOSYS); }
      
      static inline void kernfs_activate(struct kernfs_node *kn) { }
      
      static inline void kernfs_remove(struct kernfs_node *kn) { }
      
      static inline bool kernfs_remove_self(struct kernfs_node *kn)
      { return false; }
      
      static inline int kernfs_remove_by_name_ns(struct kernfs_node *kn,
                                                 const char *name, const void *ns)
      { return -ENOSYS; }
      
      static inline int kernfs_rename_ns(struct kernfs_node *kn,
                                         struct kernfs_node *new_parent,
                                         const char *new_name, const void *new_ns)
      { return -ENOSYS; }
      
      static inline int kernfs_setattr(struct kernfs_node *kn,
                                       const struct iattr *iattr)
      { return -ENOSYS; }
      
      static inline void kernfs_notify(struct kernfs_node *kn) { }
      
      static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
                                         void *value, size_t size)
      { return -ENOSYS; }
      
      static inline int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
                                         const void *value, size_t size, int flags)
      { return -ENOSYS; }
      
      static inline const void *kernfs_super_ns(struct super_block *sb)
      { return NULL; }
      
      static inline int kernfs_get_tree(struct fs_context *fc)
      { return -ENOSYS; }
      
      static inline void kernfs_free_fs_context(struct fs_context *fc) { }
      
      static inline void kernfs_kill_sb(struct super_block *sb) { }
      
      static inline void kernfs_init(void) { }
      
      #endif        /* CONFIG_KERNFS */
      
      /**
       * kernfs_path - build full path of a given node
       * @kn: kernfs_node of interest
       * @buf: buffer to copy @kn's name into
       * @buflen: size of @buf
       *
       * If @kn is NULL result will be "(null)".
       *
       * Returns the length of the full path.  If the full length is equal to or
       * greater than @buflen, @buf contains the truncated path with the trailing
       * '\0'.  On error, -errno is returned.
       */
      static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
      {
   16         return kernfs_path_from_node(kn, NULL, buf, buflen);
      }
      
      static inline struct kernfs_node *
      kernfs_find_and_get(struct kernfs_node *kn, const char *name)
      {
  130         return kernfs_find_and_get_ns(kn, name, NULL);
      }
      
      static inline struct kernfs_node *
      kernfs_walk_and_get(struct kernfs_node *kn, const char *path)
      {
              return kernfs_walk_and_get_ns(kn, path, NULL);
      }
      
      static inline struct kernfs_node *
      kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
                        void *priv)
      {
              return kernfs_create_dir_ns(parent, name, mode,
                                          GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
                                          priv, NULL);
      }
      
      static inline struct kernfs_node *
      kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
                            umode_t mode, kuid_t uid, kgid_t gid,
                            loff_t size, const struct kernfs_ops *ops,
                            void *priv, const void *ns)
      {
              struct lock_class_key *key = NULL;
      
      #ifdef CONFIG_DEBUG_LOCK_ALLOC
              key = (struct lock_class_key *)&ops->lockdep_key;
      #endif
              return __kernfs_create_file(parent, name, mode, uid, gid,
                                          size, ops, priv, ns, key);
      }
      
      static inline struct kernfs_node *
      kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
                         loff_t size, const struct kernfs_ops *ops, void *priv)
      {
              return kernfs_create_file_ns(parent, name, mode,
                                           GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
                                           size, ops, priv, NULL);
      }
      
      static inline int kernfs_remove_by_name(struct kernfs_node *parent,
                                              const char *name)
      {
  115         return kernfs_remove_by_name_ns(parent, name, NULL);
      }
      
      static inline int kernfs_rename(struct kernfs_node *kn,
                                      struct kernfs_node *new_parent,
                                      const char *new_name)
      {
              return kernfs_rename_ns(kn, new_parent, new_name, NULL);
      }
      
      #endif        /* __LINUX_KERNFS_H */
      // SPDX-License-Identifier: GPL-2.0
      /*
       * INET                An implementation of the TCP/IP protocol suite for the LINUX
       *                operating system.  INET is implemented using the  BSD Socket
       *                interface as the means of communication with the user level.
       *
       *                The IP to API glue.
       *
       * Authors:        see ip.c
       *
       * Fixes:
       *                Many                :        Split from ip.c , see ip.c for history.
       *                Martin Mares        :        TOS setting fixed.
       *                Alan Cox        :        Fixed a couple of oopses in Martin's
       *                                        TOS tweaks.
       *                Mike McLagan        :        Routing by source
       */
      
      #include <linux/module.h>
      #include <linux/types.h>
      #include <linux/mm.h>
      #include <linux/skbuff.h>
      #include <linux/ip.h>
      #include <linux/icmp.h>
      #include <linux/inetdevice.h>
      #include <linux/netdevice.h>
      #include <linux/slab.h>
      #include <net/sock.h>
      #include <net/ip.h>
      #include <net/icmp.h>
      #include <net/tcp_states.h>
      #include <linux/udp.h>
      #include <linux/igmp.h>
      #include <linux/netfilter.h>
      #include <linux/route.h>
      #include <linux/mroute.h>
      #include <net/inet_ecn.h>
      #include <net/route.h>
      #include <net/xfrm.h>
      #include <net/compat.h>
      #include <net/checksum.h>
      #if IS_ENABLED(CONFIG_IPV6)
      #include <net/transp_v6.h>
      #endif
      #include <net/ip_fib.h>
      
      #include <linux/errqueue.h>
      #include <linux/uaccess.h>
      
      #include <linux/bpfilter.h>
      
      /*
       *        SOL_IP control messages.
       */
      
      static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
      {
              struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
      
              info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
      
              put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
      }
      
      static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
      {
              int ttl = ip_hdr(skb)->ttl;
              put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
      }
      
      static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
      {
              put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
      }
      
      static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
      {
              if (IPCB(skb)->opt.optlen == 0)
                      return;
      
              put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
                       ip_hdr(skb) + 1);
      }
      
      
      static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
                                       struct sk_buff *skb)
      {
              unsigned char optbuf[sizeof(struct ip_options) + 40];
              struct ip_options *opt = (struct ip_options *)optbuf;
      
              if (IPCB(skb)->opt.optlen == 0)
                      return;
      
              if (ip_options_echo(net, opt, skb)) {
                      msg->msg_flags |= MSG_CTRUNC;
                      return;
              }
              ip_options_undo(opt);
      
              put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
      }
      
      static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
      {
              int val;
      
              if (IPCB(skb)->frag_max_size == 0)
                      return;
      
              val = IPCB(skb)->frag_max_size;
              put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
      }
      
      static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
                                        int tlen, int offset)
      {
              __wsum csum = skb->csum;
      
              if (skb->ip_summed != CHECKSUM_COMPLETE)
                      return;
      
              if (offset != 0) {
                      int tend_off = skb_transport_offset(skb) + tlen;
                      csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
              }
      
              put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
      }
      
      static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
      {
              char *secdata;
              u32 seclen, secid;
              int err;
      
              err = security_socket_getpeersec_dgram(NULL, skb, &secid);
              if (err)
                      return;
      
              err = security_secid_to_secctx(secid, &secdata, &seclen);
              if (err)
                      return;
      
              put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
              security_release_secctx(secdata, seclen);
      }
      
      static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
      {
              __be16 _ports[2], *ports;
              struct sockaddr_in sin;
      
              /* All current transport protocols have the port numbers in the
               * first four bytes of the transport header and this function is
               * written with this assumption in mind.
               */
              ports = skb_header_pointer(skb, skb_transport_offset(skb),
                                         sizeof(_ports), &_ports);
              if (!ports)
                      return;
      
              sin.sin_family = AF_INET;
              sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
              sin.sin_port = ports[1];
              memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
      
              put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
      }
      
      void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
                               struct sk_buff *skb, int tlen, int offset)
      {
              struct inet_sock *inet = inet_sk(sk);
              unsigned int flags = inet->cmsg_flags;
      
              /* Ordered by supposed usage frequency */
              if (flags & IP_CMSG_PKTINFO) {
                      ip_cmsg_recv_pktinfo(msg, skb);
      
                      flags &= ~IP_CMSG_PKTINFO;
                      if (!flags)
                              return;
              }
      
              if (flags & IP_CMSG_TTL) {
                      ip_cmsg_recv_ttl(msg, skb);
      
                      flags &= ~IP_CMSG_TTL;
                      if (!flags)
                              return;
              }
      
              if (flags & IP_CMSG_TOS) {
                      ip_cmsg_recv_tos(msg, skb);
      
                      flags &= ~IP_CMSG_TOS;
                      if (!flags)
                              return;
              }
      
              if (flags & IP_CMSG_RECVOPTS) {
                      ip_cmsg_recv_opts(msg, skb);
      
                      flags &= ~IP_CMSG_RECVOPTS;
                      if (!flags)
                              return;
              }
      
              if (flags & IP_CMSG_RETOPTS) {
                      ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
      
                      flags &= ~IP_CMSG_RETOPTS;
                      if (!flags)
                              return;
              }
      
              if (flags & IP_CMSG_PASSSEC) {
                      ip_cmsg_recv_security(msg, skb);
      
                      flags &= ~IP_CMSG_PASSSEC;
                      if (!flags)
                              return;
              }
      
              if (flags & IP_CMSG_ORIGDSTADDR) {
                      ip_cmsg_recv_dstaddr(msg, skb);
      
                      flags &= ~IP_CMSG_ORIGDSTADDR;
                      if (!flags)
                              return;
              }
      
              if (flags & IP_CMSG_CHECKSUM)
                      ip_cmsg_recv_checksum(msg, skb, tlen, offset);
      
              if (flags & IP_CMSG_RECVFRAGSIZE)
                      ip_cmsg_recv_fragsize(msg, skb);
      }
      EXPORT_SYMBOL(ip_cmsg_recv_offset);
      
      int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
                       bool allow_ipv6)
      {
              int err, val;
              struct cmsghdr *cmsg;
   93         struct net *net = sock_net(sk);
      
   94         for_each_cmsghdr(cmsg, msg) {
   93                 if (!CMSG_OK(msg, cmsg))
   94                         return -EINVAL;
      #if IS_ENABLED(CONFIG_IPV6)
   91                 if (allow_ipv6 &&
   17                     cmsg->cmsg_level == SOL_IPV6 &&
                          cmsg->cmsg_type == IPV6_PKTINFO) {
                              struct in6_pktinfo *src_info;
      
    2                         if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
                                      return -EINVAL;
                              src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
    1                         if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
                                      return -EINVAL;
                              if (src_info->ipi6_ifindex)
                                      ipc->oif = src_info->ipi6_ifindex;
                              ipc->addr = src_info->ipi6_addr.s6_addr32[3];
                              continue;
                      }
      #endif
   89                 if (cmsg->cmsg_level == SOL_SOCKET) {
    5                         err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
    3                         if (err)
                                      return err;
                              continue;
                      }
      
   84                 if (cmsg->cmsg_level != SOL_IP)
                              continue;
   82                 switch (cmsg->cmsg_type) {
                      case IP_RETOPTS:
   55                         err = cmsg->cmsg_len - sizeof(struct cmsghdr);
      
                              /* Our caller is responsible for freeing ipc->opt */
                              err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
                                                   err < 40 ? err : 40);
   37                         if (err)
                                      return err;
                              break;
                      case IP_PKTINFO:
                      {
                              struct in_pktinfo *info;
   19                         if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
                                      return -EINVAL;
                              info = (struct in_pktinfo *)CMSG_DATA(cmsg);
   18                         if (info->ipi_ifindex)
    2                                 ipc->oif = info->ipi_ifindex;
   18                         ipc->addr = info->ipi_spec_dst.s_addr;
                              break;
                      }
                      case IP_TTL:
    4                         if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
                                      return -EINVAL;
    3                         val = *(int *)CMSG_DATA(cmsg);
                              if (val < 1 || val > 255)
                                      return -EINVAL;
    2                         ipc->ttl = val;
                              break;
                      case IP_TOS:
    5                         if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
    3                                 val = *(int *)CMSG_DATA(cmsg);
    3                         else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
    2                                 val = *(u8 *)CMSG_DATA(cmsg);
                              else
                                      return -EINVAL;
                              if (val < 0 || val > 255)
                                      return -EINVAL;
    3                         ipc->tos = val;
                              ipc->priority = rt_tos2priority(ipc->tos);
                              break;
      
                      default:
                              return -EINVAL;
                      }
              }
              return 0;
      }
      
      static void ip_ra_destroy_rcu(struct rcu_head *head)
      {
              struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
      
              sock_put(ra->saved_sk);
              kfree(ra);
      }
      
      int ip_ra_control(struct sock *sk, unsigned char on,
                        void (*destructor)(struct sock *))
      {
              struct ip_ra_chain *ra, *new_ra;
              struct ip_ra_chain __rcu **rap;
    9         struct net *net = sock_net(sk);
      
    9         if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
                      return -EINVAL;
      
    8         new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
              if (on && !new_ra)
                      return -ENOMEM;
      
    8         mutex_lock(&net->ipv4.ra_mutex);
              for (rap = &net->ipv4.ra_chain;
    8              (ra = rcu_dereference_protected(*rap,
                              lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
                   rap = &ra->next) {
                      if (ra->sk == sk) {
                              if (on) {
                                      mutex_unlock(&net->ipv4.ra_mutex);
                                      kfree(new_ra);
                                      return -EADDRINUSE;
                              }
                              /* dont let ip_call_ra_chain() use sk again */
                              ra->sk = NULL;
                              RCU_INIT_POINTER(*rap, ra->next);
                              mutex_unlock(&net->ipv4.ra_mutex);
      
                              if (ra->destructor)
                                      ra->destructor(sk);
                              /*
                               * Delay sock_put(sk) and kfree(ra) after one rcu grace
                               * period. This guarantee ip_call_ra_chain() dont need
                               * to mess with socket refcounts.
                               */
                              ra->saved_sk = sk;
                              call_rcu(&ra->rcu, ip_ra_destroy_rcu);
                              return 0;
                      }
              }
    8         if (!new_ra) {
    8                 mutex_unlock(&net->ipv4.ra_mutex);
                      return -ENOBUFS;
              }
              new_ra->sk = sk;
              new_ra->destructor = destructor;
      
              RCU_INIT_POINTER(new_ra->next, ra);
              rcu_assign_pointer(*rap, new_ra);
              sock_hold(sk);
              mutex_unlock(&net->ipv4.ra_mutex);
      
    9         return 0;
      }
      
      void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                         __be16 port, u32 info, u8 *payload)
      {
              struct sock_exterr_skb *serr;
      
              skb = skb_clone(skb, GFP_ATOMIC);
              if (!skb)
                      return;
      
              serr = SKB_EXT_ERR(skb);
              serr->ee.ee_errno = err;
              serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
              serr->ee.ee_type = icmp_hdr(skb)->type;
              serr->ee.ee_code = icmp_hdr(skb)->code;
              serr->ee.ee_pad = 0;
              serr->ee.ee_info = info;
              serr->ee.ee_data = 0;
              serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
                                         skb_network_header(skb);
              serr->port = port;
      
              if (skb_pull(skb, payload - skb->data)) {
                      skb_reset_transport_header(skb);
                      if (sock_queue_err_skb(sk, skb) == 0)
                              return;
              }
              kfree_skb(skb);
      }
      
      void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
      {
              struct inet_sock *inet = inet_sk(sk);
              struct sock_exterr_skb *serr;
              struct iphdr *iph;
              struct sk_buff *skb;
      
    3         if (!inet->recverr)
                      return;
      
              skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
              if (!skb)
                      return;
      
              skb_put(skb, sizeof(struct iphdr));
              skb_reset_network_header(skb);
              iph = ip_hdr(skb);
              iph->daddr = daddr;
      
              serr = SKB_EXT_ERR(skb);
              serr->ee.ee_errno = err;
              serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
              serr->ee.ee_type = 0;
              serr->ee.ee_code = 0;
              serr->ee.ee_pad = 0;
              serr->ee.ee_info = info;
              serr->ee.ee_data = 0;
              serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
              serr->port = port;
      
              __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
              skb_reset_transport_header(skb);
      
              if (sock_queue_err_skb(sk, skb))
                      kfree_skb(skb);
      }
      
      /* For some errors we have valid addr_offset even with zero payload and
       * zero port. Also, addr_offset should be supported if port is set.
       */
      static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
      {
              return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
                     serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
      }
      
      /* IPv4 supports cmsg on all imcp errors and some timestamps
       *
       * Timestamp code paths do not initialize the fields expected by cmsg:
       * the PKTINFO fields in skb->cb[]. Fill those in here.
       */
      static bool ipv4_datagram_support_cmsg(const struct sock *sk,
                                             struct sk_buff *skb,
                                             int ee_origin)
      {
              struct in_pktinfo *info;
      
              if (ee_origin == SO_EE_ORIGIN_ICMP)
                      return true;
      
              if (ee_origin == SO_EE_ORIGIN_LOCAL)
                      return false;
      
              /* Support IP_PKTINFO on tstamp packets if requested, to correlate
               * timestamp with egress dev. Not possible for packets without iif
               * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
               */
              info = PKTINFO_SKB_CB(skb);
              if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
                  !info->ipi_ifindex)
                      return false;
      
              info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
              return true;
      }
      
      /*
       *        Handle MSG_ERRQUEUE
       */
      int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
      {
              struct sock_exterr_skb *serr;
              struct sk_buff *skb;
    3         DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
              struct {
                      struct sock_extended_err ee;
                      struct sockaddr_in         offender;
              } errhdr;
              int err;
              int copied;
      
              err = -EAGAIN;
              skb = sock_dequeue_err_skb(sk);
              if (!skb)
                      goto out;
      
              copied = skb->len;
              if (copied > len) {
                      msg->msg_flags |= MSG_TRUNC;
                      copied = len;
              }
              err = skb_copy_datagram_msg(skb, 0, msg, copied);
              if (unlikely(err)) {
                      kfree_skb(skb);
                      return err;
              }
              sock_recv_timestamp(msg, sk, skb);
      
              serr = SKB_EXT_ERR(skb);
      
              if (sin && ipv4_datagram_support_addr(serr)) {
                      sin->sin_family = AF_INET;
                      sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
                                                         serr->addr_offset);
                      sin->sin_port = serr->port;
                      memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
                      *addr_len = sizeof(*sin);
              }
      
              memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
              sin = &errhdr.offender;
              memset(sin, 0, sizeof(*sin));
      
              if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
                      sin->sin_family = AF_INET;
                      sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
                      if (inet_sk(sk)->cmsg_flags)
                              ip_cmsg_recv(msg, skb);
              }
      
              put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
      
              /* Now we could try to dump offended packet options */
      
              msg->msg_flags |= MSG_ERRQUEUE;
              err = copied;
      
    3         consume_skb(skb);
      out:
              return err;
      }
      
      
      /*
       *        Socket option code for IP. This is the end of the line after any
       *        TCP,UDP etc options on an IP socket.
       */
      static bool setsockopt_needs_rtnl(int optname)
      {
              switch (optname) {
              case IP_ADD_MEMBERSHIP:
              case IP_ADD_SOURCE_MEMBERSHIP:
              case IP_BLOCK_SOURCE:
              case IP_DROP_MEMBERSHIP:
              case IP_DROP_SOURCE_MEMBERSHIP:
              case IP_MSFILTER:
              case IP_UNBLOCK_SOURCE:
              case MCAST_BLOCK_SOURCE:
              case MCAST_MSFILTER:
              case MCAST_JOIN_GROUP:
              case MCAST_JOIN_SOURCE_GROUP:
              case MCAST_LEAVE_GROUP:
              case MCAST_LEAVE_SOURCE_GROUP:
              case MCAST_UNBLOCK_SOURCE:
                      return true;
              }
              return false;
      }
      
      static int do_ip_setsockopt(struct sock *sk, int level,
                                  int optname, char __user *optval, unsigned int optlen)
      {
              struct inet_sock *inet = inet_sk(sk);
              struct net *net = sock_net(sk);
              int val = 0, err;
              bool needs_rtnl = setsockopt_needs_rtnl(optname);
      
              switch (optname) {
              case IP_PKTINFO:
              case IP_RECVTTL:
              case IP_RECVOPTS:
              case IP_RECVTOS:
              case IP_RETOPTS:
              case IP_TOS:
              case IP_TTL:
              case IP_HDRINCL:
              case IP_MTU_DISCOVER:
              case IP_RECVERR:
              case IP_ROUTER_ALERT:
              case IP_FREEBIND:
              case IP_PASSSEC:
              case IP_TRANSPARENT:
              case IP_MINTTL:
              case IP_NODEFRAG:
              case IP_BIND_ADDRESS_NO_PORT:
              case IP_UNICAST_IF:
              case IP_MULTICAST_TTL:
              case IP_MULTICAST_ALL:
              case IP_MULTICAST_LOOP:
              case IP_RECVORIGDSTADDR:
              case IP_CHECKSUM:
              case IP_RECVFRAGSIZE:
                      if (optlen >= sizeof(int)) {
                              if (get_user(val, (int __user *) optval))
                                      return -EFAULT;
                      } else if (optlen >= sizeof(char)) {
                              unsigned char ucval;
      
                              if (get_user(ucval, (unsigned char __user *) optval))
                                      return -EFAULT;
                              val = (int) ucval;
                      }
              }
      
              /* If optlen==0, it is equivalent to val == 0 */
      
              if (optname == IP_ROUTER_ALERT)
                      return ip_ra_control(sk, val ? 1 : 0, NULL);
              if (ip_mroute_opt(optname))
                      return ip_mroute_setsockopt(sk, optname, optval, optlen);
      
              err = 0;
              if (needs_rtnl)
                      rtnl_lock();
              lock_sock(sk);
      
              switch (optname) {
              case IP_OPTIONS:
              {
                      struct ip_options_rcu *old, *opt = NULL;
      
                      if (optlen > 40)
                              goto e_inval;
                      err = ip_options_get_from_user(sock_net(sk), &opt,
                                                     optval, optlen);
                      if (err)
                              break;
                      old = rcu_dereference_protected(inet->inet_opt,
                                                      lockdep_sock_is_held(sk));
                      if (inet->is_icsk) {
                              struct inet_connection_sock *icsk = inet_csk(sk);
      #if IS_ENABLED(CONFIG_IPV6)
                              if (sk->sk_family == PF_INET ||
                                  (!((1 << sk->sk_state) &
                                     (TCPF_LISTEN | TCPF_CLOSE)) &&
                                   inet->inet_daddr != LOOPBACK4_IPV6)) {
      #endif
                                      if (old)
                                              icsk->icsk_ext_hdr_len -= old->opt.optlen;
                                      if (opt)
                                              icsk->icsk_ext_hdr_len += opt->opt.optlen;
                                      icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
      #if IS_ENABLED(CONFIG_IPV6)
                              }
      #endif
                      }
                      rcu_assign_pointer(inet->inet_opt, opt);
                      if (old)
                              kfree_rcu(old, rcu);
                      break;
              }
              case IP_PKTINFO:
                      if (val)
                              inet->cmsg_flags |= IP_CMSG_PKTINFO;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
                      break;
              case IP_RECVTTL:
                      if (val)
                              inet->cmsg_flags |=  IP_CMSG_TTL;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_TTL;
                      break;
              case IP_RECVTOS:
                      if (val)
                              inet->cmsg_flags |=  IP_CMSG_TOS;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_TOS;
                      break;
              case IP_RECVOPTS:
                      if (val)
                              inet->cmsg_flags |=  IP_CMSG_RECVOPTS;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
                      break;
              case IP_RETOPTS:
                      if (val)
                              inet->cmsg_flags |= IP_CMSG_RETOPTS;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
                      break;
              case IP_PASSSEC:
                      if (val)
                              inet->cmsg_flags |= IP_CMSG_PASSSEC;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
                      break;
              case IP_RECVORIGDSTADDR:
                      if (val)
                              inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
                      break;
              case IP_CHECKSUM:
                      if (val) {
                              if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
                                      inet_inc_convert_csum(sk);
                                      inet->cmsg_flags |= IP_CMSG_CHECKSUM;
                              }
                      } else {
                              if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
                                      inet_dec_convert_csum(sk);
                                      inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
                              }
                      }
                      break;
              case IP_RECVFRAGSIZE:
                      if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
                              goto e_inval;
                      if (val)
                              inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE;
                      else
                              inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
                      break;
              case IP_TOS:        /* This sets both TOS and Precedence */
                      if (sk->sk_type == SOCK_STREAM) {
                              val &= ~INET_ECN_MASK;
                              val |= inet->tos & INET_ECN_MASK;
                      }
                      if (inet->tos != val) {
                              inet->tos = val;
                              sk->sk_priority = rt_tos2priority(val);
                              sk_dst_reset(sk);
                      }
                      break;
              case IP_TTL:
                      if (optlen < 1)
                              goto e_inval;
                      if (val != -1 && (val < 1 || val > 255))
                              goto e_inval;
                      inet->uc_ttl = val;
                      break;
              case IP_HDRINCL:
                      if (sk->sk_type != SOCK_RAW) {
                              err = -ENOPROTOOPT;
                              break;
                      }
                      inet->hdrincl = val ? 1 : 0;
                      break;
              case IP_NODEFRAG:
                      if (sk->sk_type != SOCK_RAW) {
                              err = -ENOPROTOOPT;
                              break;
                      }
                      inet->nodefrag = val ? 1 : 0;
                      break;
              case IP_BIND_ADDRESS_NO_PORT:
                      inet->bind_address_no_port = val ? 1 : 0;
                      break;
              case IP_MTU_DISCOVER:
                      if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
                              goto e_inval;
                      inet->pmtudisc = val;
                      break;
              case IP_RECVERR:
                      inet->recverr = !!val;
                      if (!val)
                              skb_queue_purge(&sk->sk_error_queue);
                      break;
              case IP_MULTICAST_TTL:
                      if (sk->sk_type == SOCK_STREAM)
                              goto e_inval;
                      if (optlen < 1)
                              goto e_inval;
                      if (val == -1)
                              val = 1;
                      if (val < 0 || val > 255)
                              goto e_inval;
                      inet->mc_ttl = val;
                      break;
              case IP_MULTICAST_LOOP:
                      if (optlen < 1)
                              goto e_inval;
                      inet->mc_loop = !!val;
                      break;
              case IP_UNICAST_IF:
              {
                      struct net_device *dev = NULL;
                      int ifindex;
                      int midx;
      
                      if (optlen != sizeof(int))
                              goto e_inval;
      
                      ifindex = (__force int)ntohl((__force __be32)val);
                      if (ifindex == 0) {
                              inet->uc_index = 0;
                              err = 0;
                              break;
                      }
      
                      dev = dev_get_by_index(sock_net(sk), ifindex);
                      err = -EADDRNOTAVAIL;
                      if (!dev)
                              break;
      
                      midx = l3mdev_master_ifindex(dev);
                      dev_put(dev);
      
                      err = -EINVAL;
                      if (sk->sk_bound_dev_if &&
                          (!midx || midx != sk->sk_bound_dev_if))
                              break;
      
                      inet->uc_index = ifindex;
                      err = 0;
                      break;
              }
              case IP_MULTICAST_IF:
              {
                      struct ip_mreqn mreq;
                      struct net_device *dev = NULL;
                      int midx;
      
                      if (sk->sk_type == SOCK_STREAM)
                              goto e_inval;
                      /*
                       *        Check the arguments are allowable
                       */
      
                      if (optlen < sizeof(struct in_addr))
                              goto e_inval;
      
                      err = -EFAULT;
                      if (optlen >= sizeof(struct ip_mreqn)) {
                              if (copy_from_user(&mreq, optval, sizeof(mreq)))
                                      break;
                      } else {
                              memset(&mreq, 0, sizeof(mreq));
                              if (optlen >= sizeof(struct ip_mreq)) {
                                      if (copy_from_user(&mreq, optval,
                                                         sizeof(struct ip_mreq)))
                                              break;
                              } else if (optlen >= sizeof(struct in_addr)) {
                                      if (copy_from_user(&mreq.imr_address, optval,
                                                         sizeof(struct in_addr)))
                                              break;
                              }
                      }
      
                      if (!mreq.imr_ifindex) {
                              if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
                                      inet->mc_index = 0;
                                      inet->mc_addr  = 0;
                                      err = 0;
                                      break;
                              }
                              dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
                              if (dev)
                                      mreq.imr_ifindex = dev->ifindex;
                      } else
                              dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
      
      
                      err = -EADDRNOTAVAIL;
                      if (!dev)
                              break;
      
                      midx = l3mdev_master_ifindex(dev);
      
                      dev_put(dev);
      
                      err = -EINVAL;
                      if (sk->sk_bound_dev_if &&
                          mreq.imr_ifindex != sk->sk_bound_dev_if &&
                          (!midx || midx != sk->sk_bound_dev_if))
                              break;
      
                      inet->mc_index = mreq.imr_ifindex;
                      inet->mc_addr  = mreq.imr_address.s_addr;
                      err = 0;
                      break;
              }
      
              case IP_ADD_MEMBERSHIP:
              case IP_DROP_MEMBERSHIP:
              {
                      struct ip_mreqn mreq;
      
                      err = -EPROTO;
                      if (inet_sk(sk)->is_icsk)
                              break;
      
                      if (optlen < sizeof(struct ip_mreq))
                              goto e_inval;
                      err = -EFAULT;
                      if (optlen >= sizeof(struct ip_mreqn)) {
                              if (copy_from_user(&mreq, optval, sizeof(mreq)))
                                      break;
                      } else {
                              memset(&mreq, 0, sizeof(mreq));
                              if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
                                      break;
                      }
      
                      if (optname == IP_ADD_MEMBERSHIP)
                              err = ip_mc_join_group(sk, &mreq);
                      else
                              err = ip_mc_leave_group(sk, &mreq);
                      break;
              }
              case IP_MSFILTER:
              {
                      struct ip_msfilter *msf;
      
                      if (optlen < IP_MSFILTER_SIZE(0))
                              goto e_inval;
                      if (optlen > sysctl_optmem_max) {
                              err = -ENOBUFS;
                              break;
                      }
                      msf = memdup_user(optval, optlen);
                      if (IS_ERR(msf)) {
                              err = PTR_ERR(msf);
                              break;
                      }
                      /* numsrc >= (1G-4) overflow in 32 bits */
                      if (msf->imsf_numsrc >= 0x3ffffffcU ||
                          msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
                              kfree(msf);
                              err = -ENOBUFS;
                              break;
                      }
                      if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
                              kfree(msf);
                              err = -EINVAL;
                              break;
                      }
                      err = ip_mc_msfilter(sk, msf, 0);
                      kfree(msf);
                      break;
              }
              case IP_BLOCK_SOURCE:
              case IP_UNBLOCK_SOURCE:
              case IP_ADD_SOURCE_MEMBERSHIP:
              case IP_DROP_SOURCE_MEMBERSHIP:
              {
                      struct ip_mreq_source mreqs;
                      int omode, add;
      
                      if (optlen != sizeof(struct ip_mreq_source))
                              goto e_inval;
                      if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
                              err = -EFAULT;
                              break;
                      }
                      if (optname == IP_BLOCK_SOURCE) {
                              omode = MCAST_EXCLUDE;
                              add = 1;
                      } else if (optname == IP_UNBLOCK_SOURCE) {
                              omode = MCAST_EXCLUDE;
                              add = 0;
                      } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
                              struct ip_mreqn mreq;
      
                              mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
                              mreq.imr_address.s_addr = mreqs.imr_interface;
                              mreq.imr_ifindex = 0;
                              err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                              if (err && err != -EADDRINUSE)
                                      break;
                              omode = MCAST_INCLUDE;
                              add = 1;
                      } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
                              omode = MCAST_INCLUDE;
                              add = 0;
                      }
                      err = ip_mc_source(add, omode, sk, &mreqs, 0);
                      break;
              }
              case MCAST_JOIN_GROUP:
              case MCAST_LEAVE_GROUP:
              {
                      struct group_req greq;
                      struct sockaddr_in *psin;
                      struct ip_mreqn mreq;
      
                      if (optlen < sizeof(struct group_req))
                              goto e_inval;
                      err = -EFAULT;
                      if (copy_from_user(&greq, optval, sizeof(greq)))
                              break;
                      psin = (struct sockaddr_in *)&greq.gr_group;
                      if (psin->sin_family != AF_INET)
                              goto e_inval;
                      memset(&mreq, 0, sizeof(mreq));
                      mreq.imr_multiaddr = psin->sin_addr;
                      mreq.imr_ifindex = greq.gr_interface;
      
                      if (optname == MCAST_JOIN_GROUP)
                              err = ip_mc_join_group(sk, &mreq);
                      else
                              err = ip_mc_leave_group(sk, &mreq);
                      break;
              }
              case MCAST_JOIN_SOURCE_GROUP:
              case MCAST_LEAVE_SOURCE_GROUP:
              case MCAST_BLOCK_SOURCE:
              case MCAST_UNBLOCK_SOURCE:
              {
                      struct group_source_req greqs;
                      struct ip_mreq_source mreqs;
                      struct sockaddr_in *psin;
                      int omode, add;
      
                      if (optlen != sizeof(struct group_source_req))
                              goto e_inval;
                      if (copy_from_user(&greqs, optval, sizeof(greqs))) {
                              err = -EFAULT;
                              break;
                      }
                      if (greqs.gsr_group.ss_family != AF_INET ||
                          greqs.gsr_source.ss_family != AF_INET) {
                              err = -EADDRNOTAVAIL;
                              break;
                      }
                      psin = (struct sockaddr_in *)&greqs.gsr_group;
                      mreqs.imr_multiaddr = psin->sin_addr.s_addr;
                      psin = (struct sockaddr_in *)&greqs.gsr_source;
                      mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
                      mreqs.imr_interface = 0; /* use index for mc_source */
      
                      if (optname == MCAST_BLOCK_SOURCE) {
                              omode = MCAST_EXCLUDE;
                              add = 1;
                      } else if (optname == MCAST_UNBLOCK_SOURCE) {
                              omode = MCAST_EXCLUDE;
                              add = 0;
                      } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
                              struct ip_mreqn mreq;
      
                              psin = (struct sockaddr_in *)&greqs.gsr_group;
                              mreq.imr_multiaddr = psin->sin_addr;
                              mreq.imr_address.s_addr = 0;
                              mreq.imr_ifindex = greqs.gsr_interface;
                              err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                              if (err && err != -EADDRINUSE)
                                      break;
                              greqs.gsr_interface = mreq.imr_ifindex;
                              omode = MCAST_INCLUDE;
                              add = 1;
                      } else /* MCAST_LEAVE_SOURCE_GROUP */ {
                              omode = MCAST_INCLUDE;
                              add = 0;
                      }
                      err = ip_mc_source(add, omode, sk, &mreqs,
                                         greqs.gsr_interface);
                      break;
              }
              case MCAST_MSFILTER:
              {
                      struct sockaddr_in *psin;
                      struct ip_msfilter *msf = NULL;
                      struct group_filter *gsf = NULL;
                      int msize, i, ifindex;
      
                      if (optlen < GROUP_FILTER_SIZE(0))
                              goto e_inval;
                      if (optlen > sysctl_optmem_max) {
                              err = -ENOBUFS;
                              break;
                      }
                      gsf = memdup_user(optval, optlen);
                      if (IS_ERR(gsf)) {
                              err = PTR_ERR(gsf);
                              break;
                      }
      
                      /* numsrc >= (4G-140)/128 overflow in 32 bits */
                      if (gsf->gf_numsrc >= 0x1ffffff ||
                          gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
                              err = -ENOBUFS;
                              goto mc_msf_out;
                      }
                      if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
                              err = -EINVAL;
                              goto mc_msf_out;
                      }
                      msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
                      msf = kmalloc(msize, GFP_KERNEL);
                      if (!msf) {
                              err = -ENOBUFS;
                              goto mc_msf_out;
                      }
                      ifindex = gsf->gf_interface;
                      psin = (struct sockaddr_in *)&gsf->gf_group;
                      if (psin->sin_family != AF_INET) {
                              err = -EADDRNOTAVAIL;
                              goto mc_msf_out;
                      }
                      msf->imsf_multiaddr = psin->sin_addr.s_addr;
                      msf->imsf_interface = 0;
                      msf->imsf_fmode = gsf->gf_fmode;
                      msf->imsf_numsrc = gsf->gf_numsrc;
                      err = -EADDRNOTAVAIL;
                      for (i = 0; i < gsf->gf_numsrc; ++i) {
                              psin = (struct sockaddr_in *)&gsf->gf_slist[i];
      
                              if (psin->sin_family != AF_INET)
                                      goto mc_msf_out;
                              msf->imsf_slist[i] = psin->sin_addr.s_addr;
                      }
                      kfree(gsf);
                      gsf = NULL;
      
                      err = ip_mc_msfilter(sk, msf, ifindex);
      mc_msf_out:
                      kfree(msf);
                      kfree(gsf);
                      break;
              }
              case IP_MULTICAST_ALL:
                      if (optlen < 1)
                              goto e_inval;
                      if (val != 0 && val != 1)
                              goto e_inval;
                      inet->mc_all = val;
                      break;
      
              case IP_FREEBIND:
                      if (optlen < 1)
                              goto e_inval;
                      inet->freebind = !!val;
                      break;
      
              case IP_IPSEC_POLICY:
              case IP_XFRM_POLICY:
                      err = -EPERM;
                      if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
                              break;
                      err = xfrm_user_policy(sk, optname, optval, optlen);
                      break;
      
              case IP_TRANSPARENT:
                      if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
                          !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
                              err = -EPERM;
                              break;
                      }
                      if (optlen < 1)
                              goto e_inval;
                      inet->transparent = !!val;
                      break;
      
              case IP_MINTTL:
                      if (optlen < 1)
                              goto e_inval;
                      if (val < 0 || val > 255)
                              goto e_inval;
                      inet->min_ttl = val;
                      break;
      
              default:
                      err = -ENOPROTOOPT;
                      break;
              }
              release_sock(sk);
              if (needs_rtnl)
                      rtnl_unlock();
              return err;
      
      e_inval:
              release_sock(sk);
              if (needs_rtnl)
                      rtnl_unlock();
              return -EINVAL;
      }
      
      /**
       * ipv4_pktinfo_prepare - transfer some info from rtable to skb
       * @sk: socket
       * @skb: buffer
       *
       * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
       * destination in skb->cb[] before dst drop.
       * This way, receiver doesn't make cache line misses to read rtable.
       */
      void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
      {
              struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
              bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
                             ipv6_sk_rxinfo(sk);
      
              if (prepare && skb_rtable(skb)) {
                      /* skb->cb is overloaded: prior to this point it is IP{6}CB
                       * which has interface index (iif) as the first member of the
                       * underlying inet{6}_skb_parm struct. This code then overlays
                       * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
                       * element so the iif is picked up from the prior IPCB. If iif
                       * is the loopback interface, then return the sending interface
                       * (e.g., process binds socket to eth0 for Tx which is
                       * redirected to loopback in the rtable/dst).
                       */
                      struct rtable *rt = skb_rtable(skb);
                      bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
      
                      if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
                              pktinfo->ipi_ifindex = inet_iif(skb);
                      else if (l3slave && rt && rt->rt_iif)
                              pktinfo->ipi_ifindex = rt->rt_iif;
      
                      pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
              } else {
                      pktinfo->ipi_ifindex = 0;
                      pktinfo->ipi_spec_dst.s_addr = 0;
              }
              skb_dst_drop(skb);
      }
      
      int ip_setsockopt(struct sock *sk, int level,
                      int optname, char __user *optval, unsigned int optlen)
      {
              int err;
      
    5         if (level != SOL_IP)
                      return -ENOPROTOOPT;
      
              err = do_ip_setsockopt(sk, level, optname, optval, optlen);
      #if IS_ENABLED(CONFIG_BPFILTER_UMH)
              if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
                  optname < BPFILTER_IPT_SET_MAX)
                      err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
      #endif
      #ifdef CONFIG_NETFILTER
              /* we need to exclude all possible ENOPROTOOPTs except default case */
    5         if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
                              optname != IP_IPSEC_POLICY &&
                              optname != IP_XFRM_POLICY &&
                              !ip_mroute_opt(optname))
                      err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
      #endif
              return err;
      }
      EXPORT_SYMBOL(ip_setsockopt);
      
      #ifdef CONFIG_COMPAT
      int compat_ip_setsockopt(struct sock *sk, int level, int optname,
                               char __user *optval, unsigned int optlen)
      {
              int err;
      
              if (level != SOL_IP)
                      return -ENOPROTOOPT;
      
              if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
                      return compat_mc_setsockopt(sk, level, optname, optval, optlen,
                              ip_setsockopt);
      
              err = do_ip_setsockopt(sk, level, optname, optval, optlen);
      #ifdef CONFIG_NETFILTER
              /* we need to exclude all possible ENOPROTOOPTs except default case */
              if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
                              optname != IP_IPSEC_POLICY &&
                              optname != IP_XFRM_POLICY &&
                              !ip_mroute_opt(optname))
                      err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
                                                 optlen);
      #endif
              return err;
      }
      EXPORT_SYMBOL(compat_ip_setsockopt);
      #endif
      
      /*
       *        Get the options. Note for future reference. The GET of IP options gets
       *        the _received_ ones. The set sets the _sent_ ones.
       */
      
      static bool getsockopt_needs_rtnl(int optname)
      {
              switch (optname) {
              case IP_MSFILTER:
              case MCAST_MSFILTER:
                      return true;
              }
              return false;
      }
      
      static int do_ip_getsockopt(struct sock *sk, int level, int optname,
                                  char __user *optval, int __user *optlen, unsigned int flags)
      {
              struct inet_sock *inet = inet_sk(sk);
              bool needs_rtnl = getsockopt_needs_rtnl(optname);
              int val, err = 0;
              int len;
      
              if (level != SOL_IP)
                      return -EOPNOTSUPP;
      
              if (ip_mroute_opt(optname))
                      return ip_mroute_getsockopt(sk, optname, optval, optlen);
      
              if (get_user(len, optlen))
                      return -EFAULT;
              if (len < 0)
                      return -EINVAL;
      
              if (needs_rtnl)
                      rtnl_lock();
              lock_sock(sk);
      
              switch (optname) {
              case IP_OPTIONS:
              {
                      unsigned char optbuf[sizeof(struct ip_options)+40];
                      struct ip_options *opt = (struct ip_options *)optbuf;
                      struct ip_options_rcu *inet_opt;
      
                      inet_opt = rcu_dereference_protected(inet->inet_opt,
                                                           lockdep_sock_is_held(sk));
                      opt->optlen = 0;
                      if (inet_opt)
                              memcpy(optbuf, &inet_opt->opt,
                                     sizeof(struct ip_options) +
                                     inet_opt->opt.optlen);
                      release_sock(sk);
      
                      if (opt->optlen == 0)
                              return put_user(0, optlen);
      
                      ip_options_undo(opt);
      
                      len = min_t(unsigned int, len, opt->optlen);
                      if (put_user(len, optlen))
                              return -EFAULT;
                      if (copy_to_user(optval, opt->__data, len))
                              return -EFAULT;
                      return 0;
              }
              case IP_PKTINFO:
                      val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
                      break;
              case IP_RECVTTL:
                      val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
                      break;
              case IP_RECVTOS:
                      val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
                      break;
              case IP_RECVOPTS:
                      val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
                      break;
              case IP_RETOPTS:
                      val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
                      break;
              case IP_PASSSEC:
                      val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
                      break;
              case IP_RECVORIGDSTADDR:
                      val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
                      break;
              case IP_CHECKSUM:
                      val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
                      break;
              case IP_RECVFRAGSIZE:
                      val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0;
                      break;
              case IP_TOS:
                      val = inet->tos;
                      break;
              case IP_TTL:
              {
                      struct net *net = sock_net(sk);
                      val = (inet->uc_ttl == -1 ?
                             net->ipv4.sysctl_ip_default_ttl :
                             inet->uc_ttl);
                      break;
              }
              case IP_HDRINCL:
                      val = inet->hdrincl;
                      break;
              case IP_NODEFRAG:
                      val = inet->nodefrag;
                      break;
              case IP_BIND_ADDRESS_NO_PORT:
                      val = inet->bind_address_no_port;
                      break;
              case IP_MTU_DISCOVER:
                      val = inet->pmtudisc;
                      break;
              case IP_MTU:
              {
                      struct dst_entry *dst;
                      val = 0;
                      dst = sk_dst_get(sk);
                      if (dst) {
                              val = dst_mtu(dst);
                              dst_release(dst);
                      }
                      if (!val) {
                              release_sock(sk);
                              return -ENOTCONN;
                      }
                      break;
              }
              case IP_RECVERR:
                      val = inet->recverr;
                      break;
              case IP_MULTICAST_TTL:
                      val = inet->mc_ttl;
                      break;
              case IP_MULTICAST_LOOP:
                      val = inet->mc_loop;
                      break;
              case IP_UNICAST_IF:
                      val = (__force int)htonl((__u32) inet->uc_index);
                      break;
              case IP_MULTICAST_IF:
              {
                      struct in_addr addr;
                      len = min_t(unsigned int, len, sizeof(struct in_addr));
                      addr.s_addr = inet->mc_addr;
                      release_sock(sk);
      
                      if (put_user(len, optlen))
                              return -EFAULT;
                      if (copy_to_user(optval, &addr, len))
                              return -EFAULT;
                      return 0;
              }
              case IP_MSFILTER:
              {
                      struct ip_msfilter msf;
      
                      if (len < IP_MSFILTER_SIZE(0)) {
                              err = -EINVAL;
                              goto out;
                      }
                      if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
                              err = -EFAULT;
                              goto out;
                      }
                      err = ip_mc_msfget(sk, &msf,
                                         (struct ip_msfilter __user *)optval, optlen);
                      goto out;
              }
              case MCAST_MSFILTER:
              {
                      struct group_filter gsf;
      
                      if (len < GROUP_FILTER_SIZE(0)) {
                              err = -EINVAL;
                              goto out;
                      }
                      if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
                              err = -EFAULT;
                              goto out;
                      }
                      err = ip_mc_gsfget(sk, &gsf,
                                         (struct group_filter __user *)optval,
                                         optlen);
                      goto out;
              }
              case IP_MULTICAST_ALL:
                      val = inet->mc_all;
                      break;
              case IP_PKTOPTIONS:
              {
                      struct msghdr msg;
      
                      release_sock(sk);
      
                      if (sk->sk_type != SOCK_STREAM)
                              return -ENOPROTOOPT;
      
                      msg.msg_control = (__force void *) optval;
                      msg.msg_controllen = len;
                      msg.msg_flags = flags;
      
                      if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
                              struct in_pktinfo info;
      
                              info.ipi_addr.s_addr = inet->inet_rcv_saddr;
                              info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
                              info.ipi_ifindex = inet->mc_index;
                              put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
                      }
                      if (inet->cmsg_flags & IP_CMSG_TTL) {
                              int hlim = inet->mc_ttl;
                              put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
                      }
                      if (inet->cmsg_flags & IP_CMSG_TOS) {
                              int tos = inet->rcv_tos;
                              put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
                      }
                      len -= msg.msg_controllen;
                      return put_user(len, optlen);
              }
              case IP_FREEBIND:
                      val = inet->freebind;
                      break;
              case IP_TRANSPARENT:
                      val = inet->transparent;
                      break;
              case IP_MINTTL:
                      val = inet->min_ttl;
                      break;
              default:
                      release_sock(sk);
                      return -ENOPROTOOPT;
              }
              release_sock(sk);
      
              if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
                      unsigned char ucval = (unsigned char)val;
                      len = 1;
                      if (put_user(len, optlen))
                              return -EFAULT;
                      if (copy_to_user(optval, &ucval, 1))
                              return -EFAULT;
              } else {
                      len = min_t(unsigned int, sizeof(int), len);
                      if (put_user(len, optlen))
                              return -EFAULT;
                      if (copy_to_user(optval, &val, len))
                              return -EFAULT;
              }
              return 0;
      
      out:
              release_sock(sk);
              if (needs_rtnl)
                      rtnl_unlock();
              return err;
      }
      
      int ip_getsockopt(struct sock *sk, int level,
                        int optname, char __user *optval, int __user *optlen)
      {
              int err;
      
              err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
      #if IS_ENABLED(CONFIG_BPFILTER_UMH)
              if (optname >= BPFILTER_IPT_SO_GET_INFO &&
                  optname < BPFILTER_IPT_GET_MAX)
                      err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
      #endif
      #ifdef CONFIG_NETFILTER
              /* we need to exclude all possible ENOPROTOOPTs except default case */
              if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
                              !ip_mroute_opt(optname)) {
                      int len;
      
                      if (get_user(len, optlen))
                              return -EFAULT;
      
                      err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
                      if (err >= 0)
                              err = put_user(len, optlen);
                      return err;
              }
      #endif
              return err;
      }
      EXPORT_SYMBOL(ip_getsockopt);
      
      #ifdef CONFIG_COMPAT
      int compat_ip_getsockopt(struct sock *sk, int level, int optname,
                               char __user *optval, int __user *optlen)
      {
              int err;
      
              if (optname == MCAST_MSFILTER)
                      return compat_mc_getsockopt(sk, level, optname, optval, optlen,
                              ip_getsockopt);
      
              err = do_ip_getsockopt(sk, level, optname, optval, optlen,
                      MSG_CMSG_COMPAT);
      
      #if IS_ENABLED(CONFIG_BPFILTER_UMH)
              if (optname >= BPFILTER_IPT_SO_GET_INFO &&
                  optname < BPFILTER_IPT_GET_MAX)
                      err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
      #endif
      #ifdef CONFIG_NETFILTER
              /* we need to exclude all possible ENOPROTOOPTs except default case */
              if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
                              !ip_mroute_opt(optname)) {
                      int len;
      
                      if (get_user(len, optlen))
                              return -EFAULT;
      
                      err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
                      if (err >= 0)
                              err = put_user(len, optlen);
                      return err;
              }
      #endif
              return err;
      }
      EXPORT_SYMBOL(compat_ip_getsockopt);
      #endif
      // SPDX-License-Identifier: GPL-2.0-or-later
      /* SCTP kernel implementation
       * (C) Copyright IBM Corp. 2001, 2004
       * Copyright (c) 1999-2000 Cisco, Inc.
       * Copyright (c) 1999-2001 Motorola, Inc.
       * Copyright (c) 2001 Intel Corp.
       * Copyright (c) 2001 La Monte H.P. Yarroll
       *
       * This file is part of the SCTP kernel implementation
       *
       * This module provides the abstraction for an SCTP association.
       *
       * Please send any bug reports or fixes you make to the
       * email address(es):
       *    lksctp developers <linux-sctp@vger.kernel.org>
       *
       * Written or modified by:
       *    La Monte H.P. Yarroll <piggy@acm.org>
       *    Karl Knutson          <karl@athena.chicago.il.us>
       *    Jon Grimm             <jgrimm@us.ibm.com>
       *    Xingang Guo           <xingang.guo@intel.com>
       *    Hui Huang             <hui.huang@nokia.com>
       *    Sridhar Samudrala            <sri@us.ibm.com>
       *    Daisy Chang            <daisyc@us.ibm.com>
       *    Ryan Layer            <rmlayer@us.ibm.com>
       *    Kevin Gao             <kevin.gao@intel.com>
       */
      
      #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      
      #include <linux/types.h>
      #include <linux/fcntl.h>
      #include <linux/poll.h>
      #include <linux/init.h>
      
      #include <linux/slab.h>
      #include <linux/in.h>
      #include <net/ipv6.h>
      #include <net/sctp/sctp.h>
      #include <net/sctp/sm.h>
      
      /* Forward declarations for internal functions. */
      static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
      static void sctp_assoc_bh_rcv(struct work_struct *work);
      static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
      static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
      
      /* 1st Level Abstractions. */
      
      /* Initialize a new association from provided memory. */
      static struct sctp_association *sctp_association_init(
                                              struct sctp_association *asoc,
                                              const struct sctp_endpoint *ep,
                                              const struct sock *sk,
                                              enum sctp_scope scope, gfp_t gfp)
      {
              struct sctp_sock *sp;
              struct sctp_paramhdr *p;
              int i;
      
              /* Retrieve the SCTP per socket area.  */
              sp = sctp_sk((struct sock *)sk);
      
              /* Discarding const is appropriate here.  */
  161         asoc->ep = (struct sctp_endpoint *)ep;
              asoc->base.sk = (struct sock *)sk;
      
              sctp_endpoint_hold(asoc->ep);
              sock_hold(asoc->base.sk);
      
              /* Initialize the common base substructure.  */
              asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
      
              /* Initialize the object handling fields.  */
              refcount_set(&asoc->base.refcnt, 1);
      
              /* Initialize the bind addr area.  */
              sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
      
              asoc->state = SCTP_STATE_CLOSED;
              asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
              asoc->user_frag = sp->user_frag;
      
              /* Set the association max_retrans and RTO values from the
               * socket values.
               */
              asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
              asoc->pf_retrans  = sp->pf_retrans;
      
              asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
              asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
              asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
      
              /* Initialize the association's heartbeat interval based on the
               * sock configured value.
               */
              asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
      
              /* Initialize path max retrans value. */
              asoc->pathmaxrxt = sp->pathmaxrxt;
      
              asoc->flowlabel = sp->flowlabel;
              asoc->dscp = sp->dscp;
      
              /* Set association default SACK delay */
              asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
              asoc->sackfreq = sp->sackfreq;
      
              /* Set the association default flags controlling
               * Heartbeat, SACK delay, and Path MTU Discovery.
               */
              asoc->param_flags = sp->param_flags;
      
              /* Initialize the maximum number of new data packets that can be sent
               * in a burst.
               */
              asoc->max_burst = sp->max_burst;
      
              asoc->subscribe = sp->subscribe;
      
              /* initialize association timers */
              asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
              asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
              asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
      
              /* sctpimpguide Section 2.12.2
               * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
               * recommended value of 5 times 'RTO.Max'.
               */
              asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
                      = 5 * asoc->rto_max;
      
              asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
              asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
      
              /* Initializes the timers */
              for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
  161                 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
      
              /* Pull default initialization values from the sock options.
               * Note: This assumes that the values have already been
               * validated in the sock.
               */
  161         asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
              asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
              asoc->max_init_attempts        = sp->initmsg.sinit_max_attempts;
      
              asoc->max_init_timeo =
                       msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
      
              /* Set the local window size for receive.
               * This is also the rcvbuf space per association.
               * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
               * 1500 bytes in one SCTP packet.
               */
  161         if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
                      asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
              else
  161                 asoc->rwnd = sk->sk_rcvbuf/2;
      
              asoc->a_rwnd = asoc->rwnd;
      
              /* Use my own max window until I learn something better.  */
              asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
      
              /* Initialize the receive memory counter */
              atomic_set(&asoc->rmem_alloc, 0);
      
              init_waitqueue_head(&asoc->wait);
      
              asoc->c.my_vtag = sctp_generate_tag(ep);
              asoc->c.my_port = ep->base.bind_addr.port;
      
              asoc->c.initial_tsn = sctp_generate_tsn(ep);
      
              asoc->next_tsn = asoc->c.initial_tsn;
      
              asoc->ctsn_ack_point = asoc->next_tsn - 1;
              asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
              asoc->highest_sacked = asoc->ctsn_ack_point;
              asoc->last_cwr_tsn = asoc->ctsn_ack_point;
      
              /* ADDIP Section 4.1 Asconf Chunk Procedures
               *
               * When an endpoint has an ASCONF signaled change to be sent to the
               * remote endpoint it should do the following:
               * ...
               * A2) a serial number should be assigned to the chunk. The serial
               * number SHOULD be a monotonically increasing number. The serial
               * numbers SHOULD be initialized at the start of the
               * association to the same value as the initial TSN.
               */
              asoc->addip_serial = asoc->c.initial_tsn;
              asoc->strreset_outseq = asoc->c.initial_tsn;
      
              INIT_LIST_HEAD(&asoc->addip_chunk_list);
              INIT_LIST_HEAD(&asoc->asconf_ack_list);
      
              /* Make an empty list of remote transport addresses.  */
              INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
      
              /* RFC 2960 5.1 Normal Establishment of an Association
               *
               * After the reception of the first data chunk in an
               * association the endpoint must immediately respond with a
               * sack to acknowledge the data chunk.  Subsequent
               * acknowledgements should be done as described in Section
               * 6.2.
               *
               * [We implement this by telling a new association that it
               * already received one packet.]
               */
              asoc->peer.sack_needed = 1;
              asoc->peer.sack_generation = 1;
      
              /* Create an input queue.  */
              sctp_inq_init(&asoc->base.inqueue);
              sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
      
              /* Create an output queue.  */
              sctp_outq_init(asoc, &asoc->outqueue);
      
              if (!sctp_ulpq_init(&asoc->ulpq, asoc))
                      goto fail_init;
      
  161         if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
                                   0, gfp))
                      goto fail_init;
      
              /* Initialize default path MTU. */
  161         asoc->pathmtu = sp->pathmtu;
              sctp_assoc_update_frag_point(asoc);
      
              /* Assume that peer would support both address types unless we are
               * told otherwise.
               */
              asoc->peer.ipv4_address = 1;
              if (asoc->base.sk->sk_family == PF_INET6)
  119                 asoc->peer.ipv6_address = 1;
  161         INIT_LIST_HEAD(&asoc->asocs);
      
              asoc->default_stream = sp->default_stream;
              asoc->default_ppid = sp->default_ppid;
              asoc->default_flags = sp->default_flags;
              asoc->default_context = sp->default_context;
              asoc->default_timetolive = sp->default_timetolive;
              asoc->default_rcv_context = sp->default_rcv_context;
      
              /* AUTH related initializations */
              INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
              if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
                      goto stream_free;
      
  161         asoc->active_key_id = ep->active_key_id;
              asoc->strreset_enable = ep->strreset_enable;
      
              /* Save the hmacs and chunks list into this association */
              if (ep->auth_hmacs_list)
                      memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
                              ntohs(ep->auth_hmacs_list->param_hdr.length));
  161         if (ep->auth_chunk_list)
                      memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
                              ntohs(ep->auth_chunk_list->param_hdr.length));
      
              /* Get the AUTH random number for this association */
              p = (struct sctp_paramhdr *)asoc->c.auth_random;
  161         p->type = SCTP_PARAM_RANDOM;
              p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
              get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
      
  161         return asoc;
      
      stream_free:
              sctp_stream_free(&asoc->stream);
      fail_init:
              sock_put(asoc->base.sk);
              sctp_endpoint_put(asoc->ep);
              return NULL;
      }
      
      /* Allocate and initialize a new association */
      struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
                                                    const struct sock *sk,
                                                    enum sctp_scope scope, gfp_t gfp)
      {
              struct sctp_association *asoc;
      
  161         asoc = kzalloc(sizeof(*asoc), gfp);
              if (!asoc)
                      goto fail;
      
  161         if (!sctp_association_init(asoc, ep, sk, scope, gfp))
                      goto fail_init;
      
              SCTP_DBG_OBJCNT_INC(assoc);
      
              pr_debug("Created asoc %p\n", asoc);
      
              return asoc;
      
      fail_init:
              kfree(asoc);
      fail:
              return NULL;
      }
      
      /* Free this association if possible.  There may still be users, so
       * the actual deallocation may be delayed.
       */
      void sctp_association_free(struct sctp_association *asoc)
      {
   57         struct sock *sk = asoc->base.sk;
              struct sctp_transport *transport;
              struct list_head *pos, *temp;
              int i;
      
              /* Only real associations count against the endpoint, so
               * don't bother for if this is a temporary association.
               */
   64         if (!list_empty(&asoc->asocs)) {
   57                 list_del(&asoc->asocs);
      
                      /* Decrement the backlog value for a TCP-style listening
                       * socket.
                       */
    8                 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
                              sk->sk_ack_backlog--;
              }
      
              /* Mark as dead, so other users can know this structure is
               * going away.
               */
   64         asoc->base.dead = true;
      
              /* Dispose of any data lying around in the outqueue. */
              sctp_outq_free(&asoc->outqueue);
      
              /* Dispose of any pending messages for the upper layer. */
              sctp_ulpq_free(&asoc->ulpq);
      
              /* Dispose of any pending chunks on the inqueue. */
              sctp_inq_free(&asoc->base.inqueue);
      
              sctp_tsnmap_free(&asoc->peer.tsn_map);
      
              /* Free stream information. */
              sctp_stream_free(&asoc->stream);
      
              if (asoc->strreset_chunk)
                      sctp_chunk_free(asoc->strreset_chunk);
      
              /* Clean up the bound address list. */
   61         sctp_bind_addr_free(&asoc->base.bind_addr);
      
              /* Do we need to go through all of our timers and
               * delete them?   To be safe we will try to delete all, but we
               * should be able to go through and make a guess based
               * on our state.
               */
   60         for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
   60                 if (del_timer(&asoc->timers[i]))
    3                         sctp_association_put(asoc);
              }
      
              /* Free peer's cached cookie. */
   60         kfree(asoc->peer.cookie);
              kfree(asoc->peer.peer_random);
              kfree(asoc->peer.peer_chunks);
              kfree(asoc->peer.peer_hmacs);
      
              /* Release the transport structures. */
              list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
                      transport = list_entry(pos, struct sctp_transport, transports);
   60                 list_del_rcu(pos);
                      sctp_unhash_transport(transport);
                      sctp_transport_free(transport);
              }
      
   60         asoc->peer.transport_count = 0;
      
              sctp_asconf_queue_teardown(asoc);
      
              /* Free pending address space being deleted */
              kfree(asoc->asconf_addr_del_pending);
      
              /* AUTH - Free the endpoint shared keys */
              sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
      
              /* AUTH - Free the association shared key */
              sctp_auth_key_put(asoc->asoc_shared_key);
      
              sctp_association_put(asoc);
      }
      
      /* Cleanup and free up an association. */
      static void sctp_association_destroy(struct sctp_association *asoc)
      {
   56         if (unlikely(!asoc->base.dead)) {
                      WARN(1, "Attempt to destroy undead association %p!\n", asoc);
                      return;
              }
      
   56         sctp_endpoint_put(asoc->ep);
              sock_put(asoc->base.sk);
      
   56         if (asoc->assoc_id != 0) {
                      spin_lock_bh(&sctp_assocs_id_lock);
                      idr_remove(&sctp_assocs_id, asoc->assoc_id);
                      spin_unlock_bh(&sctp_assocs_id_lock);
              }
      
   56         WARN_ON(atomic_read(&asoc->rmem_alloc));
      
   56         kfree_rcu(asoc, rcu);
              SCTP_DBG_OBJCNT_DEC(assoc);
      }
      
      /* Change the primary destination address for the peer. */
      void sctp_assoc_set_primary(struct sctp_association *asoc,
                                  struct sctp_transport *transport)
      {
              int changeover = 0;
      
              /* it's a changeover only if we already have a primary path
               * that we are changing
               */
  155         if (asoc->peer.primary_path != NULL &&
                  asoc->peer.primary_path != transport)
                      changeover = 1 ;
      
  155         asoc->peer.primary_path = transport;
              sctp_ulpevent_nofity_peer_addr_change(transport,
                                                    SCTP_ADDR_MADE_PRIM, 0);
      
              /* Set a default msg_name for events. */
              memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
                     sizeof(union sctp_addr));
      
              /* If the primary path is changing, assume that the
               * user wants to use this new path.
               */
  155         if ((transport->state == SCTP_ACTIVE) ||
                  (transport->state == SCTP_UNKNOWN))
  155                 asoc->peer.active_path = transport;
      
              /*
               * SFR-CACC algorithm:
               * Upon the receipt of a request to change the primary
               * destination address, on the data structure for the new
               * primary destination, the sender MUST do the following:
               *
               * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
               * to this destination address earlier. The sender MUST set
               * CYCLING_CHANGEOVER to indicate that this switch is a
               * double switch to the same destination address.
               *
               * Really, only bother is we have data queued or outstanding on
               * the association.
               */
  155         if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
                      return;
      
              if (transport->cacc.changeover_active)
                      transport->cacc.cycling_changeover = changeover;
      
              /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
               * a changeover has occurred.
               */
              transport->cacc.changeover_active = changeover;
      
              /* 3) The sender MUST store the next TSN to be sent in
               * next_tsn_at_change.
               */
  155         transport->cacc.next_tsn_at_change = asoc->next_tsn;
      }
      
      /* Remove a transport from an association.  */
      void sctp_assoc_rm_peer(struct sctp_association *asoc,
                              struct sctp_transport *peer)
      {
              struct sctp_transport *transport;
              struct list_head *pos;
              struct sctp_chunk *ch;
      
              pr_debug("%s: association:%p addr:%pISpc\n",
                       __func__, asoc, &peer->ipaddr.sa);
      
              /* If we are to remove the current retran_path, update it
               * to the next peer before removing this peer from the list.
               */
              if (asoc->peer.retran_path == peer)
                      sctp_assoc_update_retran_path(asoc);
      
              /* Remove this peer from the list. */
              list_del_rcu(&peer->transports);
              /* Remove this peer from the transport hashtable */
              sctp_unhash_transport(peer);
      
              /* Get the first transport of asoc. */
              pos = asoc->peer.transport_addr_list.next;
              transport = list_entry(pos, struct sctp_transport, transports);
      
              /* Update any entries that match the peer to be deleted. */
              if (asoc->peer.primary_path == peer)
                      sctp_assoc_set_primary(asoc, transport);
              if (asoc->peer.active_path == peer)
                      asoc->peer.active_path = transport;
              if (asoc->peer.retran_path == peer)
                      asoc->peer.retran_path = transport;
              if (asoc->peer.last_data_from == peer)
                      asoc->peer.last_data_from = transport;
      
              if (asoc->strreset_chunk &&
                  asoc->strreset_chunk->transport == peer) {
                      asoc->strreset_chunk->transport = transport;
                      sctp_transport_reset_reconf_timer(transport);
              }
      
              /* If we remove the transport an INIT was last sent to, set it to
               * NULL. Combined with the update of the retran path above, this
               * will cause the next INIT to be sent to the next available
               * transport, maintaining the cycle.
               */
              if (asoc->init_last_sent_to == peer)
                      asoc->init_last_sent_to = NULL;
      
              /* If we remove the transport an SHUTDOWN was last sent to, set it
               * to NULL. Combined with the update of the retran path above, this
               * will cause the next SHUTDOWN to be sent to the next available
               * transport, maintaining the cycle.
               */
              if (asoc->shutdown_last_sent_to == peer)
                      asoc->shutdown_last_sent_to = NULL;
      
              /* If we remove the transport an ASCONF was last sent to, set it to
               * NULL.
               */
              if (asoc->addip_last_asconf &&
                  asoc->addip_last_asconf->transport == peer)
                      asoc->addip_last_asconf->transport = NULL;
      
              /* If we have something on the transmitted list, we have to
               * save it off.  The best place is the active path.
               */
              if (!list_empty(&peer->transmitted)) {
                      struct sctp_transport *active = asoc->peer.active_path;
      
                      /* Reset the transport of each chunk on this list */
                      list_for_each_entry(ch, &peer->transmitted,
                                              transmitted_list) {
                              ch->transport = NULL;
                              ch->rtt_in_progress = 0;
                      }
      
                      list_splice_tail_init(&peer->transmitted,
                                              &active->transmitted);
      
                      /* Start a T3 timer here in case it wasn't running so
                       * that these migrated packets have a chance to get
                       * retransmitted.
                       */
                      if (!timer_pending(&active->T3_rtx_timer))
                              if (!mod_timer(&active->T3_rtx_timer,
                                              jiffies + active->rto))
                                      sctp_transport_hold(active);
              }
      
              list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
                      if (ch->transport == peer)
                              ch->transport = NULL;
      
              asoc->peer.transport_count--;
      
              sctp_ulpevent_nofity_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0);
              sctp_transport_free(peer);
      }
      
      /* Add a transport address to an association.  */
      struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                                                 const union sctp_addr *addr,
                                                 const gfp_t gfp,
                                                 const int peer_state)
      {
  158         struct net *net = sock_net(asoc->base.sk);
              struct sctp_transport *peer;
              struct sctp_sock *sp;
              unsigned short port;
      
              sp = sctp_sk(asoc->base.sk);
      
              /* AF_INET and AF_INET6 share common port field. */
  158         port = ntohs(addr->v4.sin_port);
      
              pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
                       asoc, &addr->sa, peer_state);
      
              /* Set the port if it has not been set yet.  */
              if (0 == asoc->peer.port)
                      asoc->peer.port = port;
      
              /* Check to see if this is a duplicate. */
  158         peer = sctp_assoc_lookup_paddr(asoc, addr);
              if (peer) {
                      /* An UNKNOWN state is only set on transports added by
                       * user in sctp_connectx() call.  Such transports should be
                       * considered CONFIRMED per RFC 4960, Section 5.4.
                       */
  155                 if (peer->state == SCTP_UNKNOWN) {
                              peer->state = SCTP_ACTIVE;
                      }
                      return peer;
              }
      
  158         peer = sctp_transport_new(net, addr, gfp);
              if (!peer)
                      return NULL;
      
  156         sctp_transport_set_owner(peer, asoc);
      
              /* Initialize the peer's heartbeat interval based on the
               * association configured value.
               */
              peer->hbinterval = asoc->hbinterval;
      
              /* Set the path max_retrans.  */
              peer->pathmaxrxt = asoc->pathmaxrxt;
      
              /* And the partial failure retrans threshold */
              peer->pf_retrans = asoc->pf_retrans;
      
              /* Initialize the peer's SACK delay timeout based on the
               * association configured value.
               */
              peer->sackdelay = asoc->sackdelay;
              peer->sackfreq = asoc->sackfreq;
      
              if (addr->sa.sa_family == AF_INET6) {
   84                 __be32 info = addr->v6.sin6_flowinfo;
      
                      if (info) {
   16                         peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
                              peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
                      } else {
   68                         peer->flowlabel = asoc->flowlabel;
                      }
              }
  156         peer->dscp = asoc->dscp;
      
              /* Enable/disable heartbeat, SACK delay, and path MTU discovery
               * based on association setting.
               */
              peer->param_flags = asoc->param_flags;
      
              /* Initialize the pmtu of the transport. */
              sctp_transport_route(peer, NULL, sp);
      
              /* If this is the first transport addr on this association,
               * initialize the association PMTU to the peer's PMTU.
               * If not and the current association PMTU is higher than the new
               * peer's PMTU, reset the association PMTU to the new peer's PMTU.
               */
  155         sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
   12                                   min_t(int, peer->pathmtu, asoc->pathmtu) :
                                        peer->pathmtu);
      
  155         peer->pmtu_pending = 0;
      
              /* The asoc->peer.port might not be meaningful yet, but
               * initialize the packet structure anyway.
               */
              sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
                               asoc->peer.port);
      
              /* 7.2.1 Slow-Start
               *
               * o The initial cwnd before DATA transmission or after a sufficiently
               *   long idle period MUST be set to
               *      min(4*MTU, max(2*MTU, 4380 bytes))
               *
               * o The initial value of ssthresh MAY be arbitrarily high
               *   (for example, implementations MAY use the size of the
               *   receiver advertised window).
               */
              peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
      
              /* At this point, we may not have the receiver's advertised window,
               * so initialize ssthresh to the default value and it will be set
               * later when we process the INIT.
               */
              peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
      
              peer->partial_bytes_acked = 0;
              peer->flight_size = 0;
              peer->burst_limited = 0;
      
              /* Set the transport's RTO.initial value */
              peer->rto = asoc->rto_initial;
  155         sctp_max_rto(asoc, peer);
      
              /* Set the peer's active state. */
  155         peer->state = peer_state;
      
              /* Add this peer into the transport hashtable */
              if (sctp_hash_transport(peer)) {
                      sctp_transport_free(peer);
                      return NULL;
              }
      
              /* Attach the remote transport to our asoc.  */
  155         list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
  155         asoc->peer.transport_count++;
      
              sctp_ulpevent_nofity_peer_addr_change(peer, SCTP_ADDR_ADDED, 0);
      
              /* If we do not yet have a primary path, set one.  */
   12         if (!asoc->peer.primary_path) {
  155                 sctp_assoc_set_primary(asoc, peer);
                      asoc->peer.retran_path = peer;
              }
      
  155         if (asoc->peer.active_path == asoc->peer.retran_path &&
  155             peer->state != SCTP_UNCONFIRMED) {
  155                 asoc->peer.retran_path = peer;
              }
      
              return peer;
      }
      
      /* Delete a transport address from an association.  */
      void sctp_assoc_del_peer(struct sctp_association *asoc,
                               const union sctp_addr *addr)
      {
              struct list_head        *pos;
              struct list_head        *temp;
              struct sctp_transport        *transport;
      
              list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
                      transport = list_entry(pos, struct sctp_transport, transports);
                      if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
                              /* Do book keeping for removing the peer and free it. */
                              sctp_assoc_rm_peer(asoc, transport);
                              break;
                      }
              }
      }
      
      /* Lookup a transport by address. */
      struct sctp_transport *sctp_assoc_lookup_paddr(
                                              const struct sctp_association *asoc,
                                              const union sctp_addr *address)
      {
              struct sctp_transport *t;
      
              /* Cycle through all transports searching for a peer address. */
      
  159         list_for_each_entry(t, &asoc->peer.transport_addr_list,
                              transports) {
   13                 if (sctp_cmp_addr_exact(address, &t->ipaddr))
                              return t;
              }
      
              return NULL;
      }
      
      /* Remove all transports except a give one */
      void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
                                           struct sctp_transport *primary)
      {
              struct sctp_transport        *temp;
              struct sctp_transport        *t;
      
              list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
                                       transports) {
                      /* if the current transport is not the primary one, delete it */
                      if (t != primary)
                              sctp_assoc_rm_peer(asoc, t);
              }
      }
      
      /* Engage in transport control operations.
       * Mark the transport up or down and send a notification to the user.
       * Select and update the new active and retran paths.
       */
      void sctp_assoc_control_transport(struct sctp_association *asoc,
                                        struct sctp_transport *transport,
                                        enum sctp_transport_cmd command,
                                        sctp_sn_error_t error)
      {
              bool ulp_notify = true;
              int spc_state = 0;
      
              /* Record the transition on the transport.  */
              switch (command) {
              case SCTP_TRANSPORT_UP:
                      /* If we are moving from UNCONFIRMED state due
                       * to heartbeat success, report the SCTP_ADDR_CONFIRMED
                       * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
                       */
                      if (SCTP_UNCONFIRMED == transport->state &&
                          SCTP_HEARTBEAT_SUCCESS == error)
                              spc_state = SCTP_ADDR_CONFIRMED;
                      else
                              spc_state = SCTP_ADDR_AVAILABLE;
                      /* Don't inform ULP about transition from PF to
                       * active state and set cwnd to 1 MTU, see SCTP
                       * Quick failover draft section 5.1, point 5
                       */
                      if (transport->state == SCTP_PF) {
                              ulp_notify = false;
                              transport->cwnd = asoc->pathmtu;
                      }
                      transport->state = SCTP_ACTIVE;
                      break;
      
              case SCTP_TRANSPORT_DOWN:
                      /* If the transport was never confirmed, do not transition it
                       * to inactive state.  Also, release the cached route since
                       * there may be a better route next time.
                       */
                      if (transport->state != SCTP_UNCONFIRMED)
                              transport->state = SCTP_INACTIVE;
                      else {
                              sctp_transport_dst_release(transport);
                              ulp_notify = false;
                      }
      
                      spc_state = SCTP_ADDR_UNREACHABLE;
                      break;
      
              case SCTP_TRANSPORT_PF:
                      transport->state = SCTP_PF;
                      ulp_notify = false;
                      break;
      
              default:
                      return;
              }
      
              /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
               * to the user.
               */
              if (ulp_notify)
                      sctp_ulpevent_nofity_peer_addr_change(transport,
                                                            spc_state, error);
      
              /* Select new active and retran paths. */
              sctp_select_active_and_retran_path(asoc);
      }
      
      /* Hold a reference to an association. */
      void sctp_association_hold(struct sctp_association *asoc)
      {
  157         refcount_inc(&asoc->base.refcnt);