/* SPDX-License-Identifier: GPL-2.0 */
      #ifndef __LINUX_BACKING_DEV_DEFS_H
      #define __LINUX_BACKING_DEV_DEFS_H
      
      #include <linux/list.h>
      #include <linux/radix-tree.h>
      #include <linux/rbtree.h>
      #include <linux/spinlock.h>
      #include <linux/percpu_counter.h>
      #include <linux/percpu-refcount.h>
      #include <linux/flex_proportions.h>
      #include <linux/timer.h>
      #include <linux/workqueue.h>
      #include <linux/kref.h>
      #include <linux/refcount.h>
      
      struct page;
      struct device;
      struct dentry;
      
      /*
       * Bits in bdi_writeback.state
       */
      enum wb_state {
              WB_registered,                /* bdi_register() was done */
              WB_writeback_running,        /* Writeback is in progress */
              WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
              WB_start_all,                /* nr_pages == 0 (all) work pending */
      };
      
      enum wb_congested_state {
              WB_async_congested,        /* The async (write) queue is getting full */
              WB_sync_congested,        /* The sync queue is getting full */
      };
      
      typedef int (congested_fn)(void *, int);
      
      enum wb_stat_item {
              WB_RECLAIMABLE,
              WB_WRITEBACK,
              WB_DIRTIED,
              WB_WRITTEN,
              NR_WB_STAT_ITEMS
      };
      
      #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
      
      /*
       * why some writeback work was initiated
       */
      enum wb_reason {
              WB_REASON_BACKGROUND,
              WB_REASON_VMSCAN,
              WB_REASON_SYNC,
              WB_REASON_PERIODIC,
              WB_REASON_LAPTOP_TIMER,
              WB_REASON_FREE_MORE_MEM,
              WB_REASON_FS_FREE_SPACE,
              /*
               * There is no bdi forker thread any more and works are done
               * by emergency worker, however, this is TPs userland visible
               * and we'll be exposing exactly the same information,
               * so it has a mismatch name.
               */
              WB_REASON_FORKER_THREAD,
      
              WB_REASON_MAX,
      };
      
      /*
       * For cgroup writeback, multiple wb's may map to the same blkcg.  Those
       * wb's can operate mostly independently but should share the congested
       * state.  To facilitate such sharing, the congested state is tracked using
       * the following struct which is created on demand, indexed by blkcg ID on
       * its bdi, and refcounted.
       */
      struct bdi_writeback_congested {
              unsigned long state;                /* WB_[a]sync_congested flags */
              refcount_t refcnt;                /* nr of attached wb's and blkg */
      
      #ifdef CONFIG_CGROUP_WRITEBACK
              struct backing_dev_info *__bdi;        /* the associated bdi, set to NULL
                                               * on bdi unregistration. For memcg-wb
                                               * internal use only! */
              int blkcg_id;                        /* ID of the associated blkcg */
              struct rb_node rb_node;                /* on bdi->cgwb_congestion_tree */
      #endif
      };
      
      /*
       * Each wb (bdi_writeback) can perform writeback operations, is measured
       * and throttled, independently.  Without cgroup writeback, each bdi
       * (bdi_writeback) is served by its embedded bdi->wb.
       *
       * On the default hierarchy, blkcg implicitly enables memcg.  This allows
       * using memcg's page ownership for attributing writeback IOs, and every
       * memcg - blkcg combination can be served by its own wb by assigning a
       * dedicated wb to each memcg, which enables isolation across different
       * cgroups and propagation of IO back pressure down from the IO layer upto
       * the tasks which are generating the dirty pages to be written back.
       *
       * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
       * refcounted with the number of inodes attached to it, and pins the memcg
       * and the corresponding blkcg.  As the corresponding blkcg for a memcg may
       * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
       * is tested for blkcg after lookup and removed from index on mismatch so
       * that a new wb for the combination can be created.
       */
      struct bdi_writeback {
              struct backing_dev_info *bdi;        /* our parent bdi */
      
              unsigned long state;                /* Always use atomic bitops on this */
              unsigned long last_old_flush;        /* last old data flush */
      
              struct list_head b_dirty;        /* dirty inodes */
              struct list_head b_io;                /* parked for writeback */
              struct list_head b_more_io;        /* parked for more writeback */
              struct list_head b_dirty_time;        /* time stamps are dirty */
              spinlock_t list_lock;                /* protects the b_* lists */
      
              struct percpu_counter stat[NR_WB_STAT_ITEMS];
      
              struct bdi_writeback_congested *congested;
      
              unsigned long bw_time_stamp;        /* last time write bw is updated */
              unsigned long dirtied_stamp;
              unsigned long written_stamp;        /* pages written at bw_time_stamp */
              unsigned long write_bandwidth;        /* the estimated write bandwidth */
              unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
      
              /*
               * The base dirty throttle rate, re-calculated on every 200ms.
               * All the bdi tasks' dirty rate will be curbed under it.
               * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
               * in small steps and is much more smooth/stable than the latter.
               */
              unsigned long dirty_ratelimit;
              unsigned long balanced_dirty_ratelimit;
      
              struct fprop_local_percpu completions;
              int dirty_exceeded;
              enum wb_reason start_all_reason;
      
              spinlock_t work_lock;                /* protects work_list & dwork scheduling */
              struct list_head work_list;
              struct delayed_work dwork;        /* work item used for writeback */
      
              unsigned long dirty_sleep;        /* last wait */
      
              struct list_head bdi_node;        /* anchored at bdi->wb_list */
      
      #ifdef CONFIG_CGROUP_WRITEBACK
              struct percpu_ref refcnt;        /* used only for !root wb's */
              struct fprop_local_percpu memcg_completions;
              struct cgroup_subsys_state *memcg_css; /* the associated memcg */
              struct cgroup_subsys_state *blkcg_css; /* and blkcg */
              struct list_head memcg_node;        /* anchored at memcg->cgwb_list */
              struct list_head blkcg_node;        /* anchored at blkcg->cgwb_list */
      
              union {
                      struct work_struct release_work;
                      struct rcu_head rcu;
              };
      #endif
      };
      
      struct backing_dev_info {
              struct list_head bdi_list;
              unsigned long ra_pages;        /* max readahead in PAGE_SIZE units */
              unsigned long io_pages;        /* max allowed IO size */
              congested_fn *congested_fn; /* Function pointer if device is md/dm */
              void *congested_data;        /* Pointer to aux data for congested func */
      
              const char *name;
      
              struct kref refcnt;        /* Reference counter for the structure */
              unsigned int capabilities; /* Device capabilities */
              unsigned int min_ratio;
              unsigned int max_ratio, max_prop_frac;
      
              /*
               * Sum of avg_write_bw of wbs with dirty inodes.  > 0 if there are
               * any dirty wbs, which is depended upon by bdi_has_dirty().
               */
              atomic_long_t tot_write_bandwidth;
      
              struct bdi_writeback wb;  /* the root writeback info for this bdi */
              struct list_head wb_list; /* list of all wbs */
      #ifdef CONFIG_CGROUP_WRITEBACK
              struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
              struct rb_root cgwb_congested_tree; /* their congested states */
              struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
              struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
      #else
              struct bdi_writeback_congested *wb_congested;
      #endif
              wait_queue_head_t wb_waitq;
      
              struct device *dev;
              struct device *owner;
      
              struct timer_list laptop_mode_wb_timer;
      
      #ifdef CONFIG_DEBUG_FS
              struct dentry *debug_dir;
      #endif
      };
      
      enum {
              BLK_RW_ASYNC        = 0,
              BLK_RW_SYNC        = 1,
      };
      
      void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
      void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
      
      static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
      {
              clear_wb_congested(bdi->wb.congested, sync);
      }
      
      static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
      {
              set_wb_congested(bdi->wb.congested, sync);
      }
      
      struct wb_lock_cookie {
              bool locked;
              unsigned long flags;
      };
      
      #ifdef CONFIG_CGROUP_WRITEBACK
      
      /**
       * wb_tryget - try to increment a wb's refcount
       * @wb: bdi_writeback to get
       */
      static inline bool wb_tryget(struct bdi_writeback *wb)
      {
 1092         if (wb != &wb->bdi->wb)
 1092                 return percpu_ref_tryget(&wb->refcnt);
              return true;
      }
      
      /**
       * wb_get - increment a wb's refcount
       * @wb: bdi_writeback to get
       */
      static inline void wb_get(struct bdi_writeback *wb)
      {
              if (wb != &wb->bdi->wb)
 1381                 percpu_ref_get(&wb->refcnt);
      }
      
      /**
       * wb_put - decrement a wb's refcount
       * @wb: bdi_writeback to put
       */
      static inline void wb_put(struct bdi_writeback *wb)
      {
 1400         if (WARN_ON_ONCE(!wb->bdi)) {
                      /*
                       * A driver bug might cause a file to be removed before bdi was
                       * initialized.
                       */
                      return;
              }
      
 1451         if (wb != &wb->bdi->wb)
 1443                 percpu_ref_put(&wb->refcnt);
      }
      
      /**
       * wb_dying - is a wb dying?
       * @wb: bdi_writeback of interest
       *
       * Returns whether @wb is unlinked and being drained.
       */
      static inline bool wb_dying(struct bdi_writeback *wb)
      {
              return percpu_ref_is_dying(&wb->refcnt);
      }
      
      #else        /* CONFIG_CGROUP_WRITEBACK */
      
      static inline bool wb_tryget(struct bdi_writeback *wb)
      {
              return true;
      }
      
      static inline void wb_get(struct bdi_writeback *wb)
      {
      }
      
      static inline void wb_put(struct bdi_writeback *wb)
      {
      }
      
      static inline bool wb_dying(struct bdi_writeback *wb)
      {
              return false;
      }
      
      #endif        /* CONFIG_CGROUP_WRITEBACK */
      
      #endif        /* __LINUX_BACKING_DEV_DEFS_H */
      /* SPDX-License-Identifier: GPL-2.0-only */
      /*
       * include/linux/idr.h
       * 
       * 2002-10-18  written by Jim Houston jim.houston@ccur.com
       *        Copyright (C) 2002 by Concurrent Computer Corporation
       *
       * Small id to pointer translation service avoiding fixed sized
       * tables.
       */
      
      #ifndef __IDR_H__
      #define __IDR_H__
      
      #include <linux/radix-tree.h>
      #include <linux/gfp.h>
      #include <linux/percpu.h>
      
      struct idr {
              struct radix_tree_root        idr_rt;
              unsigned int                idr_base;
              unsigned int                idr_next;
      };
      
      /*
       * The IDR API does not expose the tagging functionality of the radix tree
       * to users.  Use tag 0 to track whether a node has free space below it.
       */
      #define IDR_FREE        0
      
      /* Set the IDR flag and the IDR_FREE tag */
      #define IDR_RT_MARKER        (ROOT_IS_IDR | (__force gfp_t)                        \
                                              (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
      
      #define IDR_INIT_BASE(name, base) {                                        \
              .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER),                        \
              .idr_base = (base),                                                \
              .idr_next = 0,                                                        \
      }
      
      /**
       * IDR_INIT() - Initialise an IDR.
       * @name: Name of IDR.
       *
       * A freshly-initialised IDR contains no IDs.
       */
      #define IDR_INIT(name)        IDR_INIT_BASE(name, 0)
      
      /**
       * DEFINE_IDR() - Define a statically-allocated IDR.
       * @name: Name of IDR.
       *
       * An IDR defined using this macro is ready for use with no additional
       * initialisation required.  It contains no IDs.
       */
      #define DEFINE_IDR(name)        struct idr name = IDR_INIT(name)
      
      /**
       * idr_get_cursor - Return the current position of the cyclic allocator
       * @idr: idr handle
       *
       * The value returned is the value that will be next returned from
       * idr_alloc_cyclic() if it is free (otherwise the search will start from
       * this position).
       */
      static inline unsigned int idr_get_cursor(const struct idr *idr)
      {
              return READ_ONCE(idr->idr_next);
      }
      
      /**
       * idr_set_cursor - Set the current position of the cyclic allocator
       * @idr: idr handle
       * @val: new position
       *
       * The next call to idr_alloc_cyclic() will return @val if it is free
       * (otherwise the search will start from this position).
       */
      static inline void idr_set_cursor(struct idr *idr, unsigned int val)
      {
              WRITE_ONCE(idr->idr_next, val);
      }
      
      /**
       * DOC: idr sync
       * idr synchronization (stolen from radix-tree.h)
       *
       * idr_find() is able to be called locklessly, using RCU. The caller must
       * ensure calls to this function are made within rcu_read_lock() regions.
       * Other readers (lock-free or otherwise) and modifications may be running
       * concurrently.
       *
       * It is still required that the caller manage the synchronization and
       * lifetimes of the items. So if RCU lock-free lookups are used, typically
       * this would mean that the items have their own locks, or are amenable to
       * lock-free access; and that the items are freed by RCU (or only freed after
       * having been deleted from the idr tree *and* a synchronize_rcu() grace
       * period).
       */
      
      #define idr_lock(idr)                xa_lock(&(idr)->idr_rt)
      #define idr_unlock(idr)                xa_unlock(&(idr)->idr_rt)
      #define idr_lock_bh(idr)        xa_lock_bh(&(idr)->idr_rt)
      #define idr_unlock_bh(idr)        xa_unlock_bh(&(idr)->idr_rt)
      #define idr_lock_irq(idr)        xa_lock_irq(&(idr)->idr_rt)
      #define idr_unlock_irq(idr)        xa_unlock_irq(&(idr)->idr_rt)
      #define idr_lock_irqsave(idr, flags) \
                                      xa_lock_irqsave(&(idr)->idr_rt, flags)
      #define idr_unlock_irqrestore(idr, flags) \
                                      xa_unlock_irqrestore(&(idr)->idr_rt, flags)
      
      void idr_preload(gfp_t gfp_mask);
      
      int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
      int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
                                      unsigned long max, gfp_t);
      int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
      void *idr_remove(struct idr *, unsigned long id);
      void *idr_find(const struct idr *, unsigned long id);
      int idr_for_each(const struct idr *,
                       int (*fn)(int id, void *p, void *data), void *data);
      void *idr_get_next(struct idr *, int *nextid);
      void *idr_get_next_ul(struct idr *, unsigned long *nextid);
      void *idr_replace(struct idr *, void *, unsigned long id);
      void idr_destroy(struct idr *);
      
      /**
       * idr_init_base() - Initialise an IDR.
       * @idr: IDR handle.
       * @base: The base value for the IDR.
       *
       * This variation of idr_init() creates an IDR which will allocate IDs
       * starting at %base.
       */
      static inline void idr_init_base(struct idr *idr, int base)
      {
              INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
              idr->idr_base = base;
              idr->idr_next = 0;
      }
      
      /**
       * idr_init() - Initialise an IDR.
       * @idr: IDR handle.
       *
       * Initialise a dynamically allocated IDR.  To initialise a
       * statically allocated IDR, use DEFINE_IDR().
       */
      static inline void idr_init(struct idr *idr)
      {
              idr_init_base(idr, 0);
      }
      
      /**
       * idr_is_empty() - Are there any IDs allocated?
       * @idr: IDR handle.
       *
       * Return: %true if any IDs have been allocated from this IDR.
       */
      static inline bool idr_is_empty(const struct idr *idr)
      {
              return radix_tree_empty(&idr->idr_rt) &&
                      radix_tree_tagged(&idr->idr_rt, IDR_FREE);
      }
      
      /**
       * idr_preload_end - end preload section started with idr_preload()
       *
       * Each idr_preload() should be matched with an invocation of this
       * function.  See idr_preload() for details.
       */
      static inline void idr_preload_end(void)
      {
  809         preempt_enable();
      }
      
      /**
       * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
       * @idr: IDR handle.
       * @entry: The type * to use as cursor
       * @id: Entry ID.
       *
       * @entry and @id do not need to be initialized before the loop, and
       * after normal termination @entry is left with the value NULL.  This
       * is convenient for a "not found" value.
       */
      #define idr_for_each_entry(idr, entry, id)                        \
              for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
      
      /**
       * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
       * @idr: IDR handle.
       * @entry: The type * to use as cursor.
       * @tmp: A temporary placeholder for ID.
       * @id: Entry ID.
       *
       * @entry and @id do not need to be initialized before the loop, and
       * after normal termination @entry is left with the value NULL.  This
       * is convenient for a "not found" value.
       */
      #define idr_for_each_entry_ul(idr, entry, tmp, id)                        \
              for (tmp = 0, id = 0;                                                \
                   tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
                   tmp = id, ++id)
      
      /**
       * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
       * @idr: IDR handle.
       * @entry: The type * to use as a cursor.
       * @id: Entry ID.
       *
       * Continue to iterate over entries, continuing after the current position.
       */
      #define idr_for_each_entry_continue(idr, entry, id)                        \
              for ((entry) = idr_get_next((idr), &(id));                        \
                   entry;                                                        \
                   ++id, (entry) = idr_get_next((idr), &(id)))
      
      /**
       * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
       * @idr: IDR handle.
       * @entry: The type * to use as a cursor.
       * @tmp: A temporary placeholder for ID.
       * @id: Entry ID.
       *
       * Continue to iterate over entries, continuing after the current position.
       */
      #define idr_for_each_entry_continue_ul(idr, entry, tmp, id)                \
              for (tmp = id;                                                        \
                   tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
                   tmp = id, ++id)
      
      /*
       * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
       */
      #define IDA_CHUNK_SIZE                128        /* 128 bytes per chunk */
      #define IDA_BITMAP_LONGS        (IDA_CHUNK_SIZE / sizeof(long))
      #define IDA_BITMAP_BITS         (IDA_BITMAP_LONGS * sizeof(long) * 8)
      
      struct ida_bitmap {
              unsigned long                bitmap[IDA_BITMAP_LONGS];
      };
      
      struct ida {
              struct xarray xa;
      };
      
      #define IDA_INIT_FLAGS        (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
      
      #define IDA_INIT(name)        {                                                \
              .xa = XARRAY_INIT(name, IDA_INIT_FLAGS)                                \
      }
      #define DEFINE_IDA(name)        struct ida name = IDA_INIT(name)
      
      int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
      void ida_free(struct ida *, unsigned int id);
      void ida_destroy(struct ida *ida);
      
      /**
       * ida_alloc() - Allocate an unused ID.
       * @ida: IDA handle.
       * @gfp: Memory allocation flags.
       *
       * Allocate an ID between 0 and %INT_MAX, inclusive.
       *
       * Context: Any context.
       * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
       * or %-ENOSPC if there are no free IDs.
       */
      static inline int ida_alloc(struct ida *ida, gfp_t gfp)
      {
              return ida_alloc_range(ida, 0, ~0, gfp);
      }
      
      /**
       * ida_alloc_min() - Allocate an unused ID.
       * @ida: IDA handle.
       * @min: Lowest ID to allocate.
       * @gfp: Memory allocation flags.
       *
       * Allocate an ID between @min and %INT_MAX, inclusive.
       *
       * Context: Any context.
       * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
       * or %-ENOSPC if there are no free IDs.
       */
      static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
      {
              return ida_alloc_range(ida, min, ~0, gfp);
      }
      
      /**
       * ida_alloc_max() - Allocate an unused ID.
       * @ida: IDA handle.
       * @max: Highest ID to allocate.
       * @gfp: Memory allocation flags.
       *
       * Allocate an ID between 0 and @max, inclusive.
       *
       * Context: Any context.
       * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
       * or %-ENOSPC if there are no free IDs.
       */
      static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
      {
              return ida_alloc_range(ida, 0, max, gfp);
      }
      
      static inline void ida_init(struct ida *ida)
      {
              xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
      }
      
      #define ida_simple_get(ida, start, end, gfp)        \
                              ida_alloc_range(ida, start, (end) - 1, gfp)
      #define ida_simple_remove(ida, id)        ida_free(ida, id)
      
      static inline bool ida_is_empty(const struct ida *ida)
      {
              return xa_empty(&ida->xa);
      }
      #endif /* __IDR_H__ */
      // SPDX-License-Identifier: GPL-2.0
      #include <linux/kernel.h>
      #include <linux/export.h>
      #include <linux/uaccess.h>
      
      #include <asm/word-at-a-time.h>
      
      /* Set bits in the first 'n' bytes when loaded from memory */
      #ifdef __LITTLE_ENDIAN
      #  define aligned_byte_mask(n) ((1ul << 8*(n))-1)
      #else
      #  define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
      #endif
      
      /*
       * Do a strnlen, return length of string *with* final '\0'.
       * 'count' is the user-supplied count, while 'max' is the
       * address space maximum.
       *
       * Return 0 for exceptions (which includes hitting the address
       * space maximum), or 'count+1' if hitting the user-supplied
       * maximum count.
       *
       * NOTE! We can sometimes overshoot the user-supplied maximum
       * if it fits in a aligned 'long'. The caller needs to check
       * the return value against "> max".
       */
      static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
      {
              const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
              unsigned long align, res = 0;
              unsigned long c;
      
              /*
               * Truncate 'max' to the user-specified limit, so that
               * we only have one limit we need to check in the loop
               */
              if (max > count)
                      max = count;
      
              /*
               * Do everything aligned. But that means that we
               * need to also expand the maximum..
               */
              align = (sizeof(unsigned long) - 1) & (unsigned long)src;
              src -= align;
              max += align;
      
              unsafe_get_user(c, (unsigned long __user *)src, efault);
  153         c |= aligned_byte_mask(align);
      
              for (;;) {
                      unsigned long data;
  153                 if (has_zero(c, &data, &constants)) {
                              data = prep_zero_mask(c, data, &constants);
  153                         data = create_zero_mask(data);
                              return res + find_zero(data) + 1 - align;
                      }
  136                 res += sizeof(unsigned long);
                      /* We already handled 'unsigned long' bytes. Did we do it all ? */
                      if (unlikely(max <= sizeof(unsigned long)))
                              break;
  136                 max -= sizeof(unsigned long);
                      unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
              }
              res -= align;
      
              /*
               * Uhhuh. We hit 'max'. But was that the user-specified maximum
               * too? If so, return the marker for "too long".
               */
              if (res >= count)
                      return count+1;
      
              /*
               * Nope: we hit the address space limit, and we still had more
               * characters the caller would have wanted. That's 0.
               */
      efault:
              return 0;
      }
      
      /**
       * strnlen_user: - Get the size of a user string INCLUDING final NUL.
       * @str: The string to measure.
       * @count: Maximum count (including NUL character)
       *
       * Context: User context only. This function may sleep if pagefaults are
       *          enabled.
       *
       * Get the size of a NUL-terminated string in user space.
       *
       * Returns the size of the string INCLUDING the terminating NUL.
       * If the string is too long, returns a number larger than @count. User
       * has to check the return value against "> count".
       * On exception (or invalid count), returns 0.
       *
       * NOTE! You should basically never use this function. There is
       * almost never any valid case for using the length of a user space
       * string, since the string can be changed at any time by other
       * threads. Use "strncpy_from_user()" instead to get a stable copy
       * of the string.
       */
      long strnlen_user(const char __user *str, long count)
      {
              unsigned long max_addr, src_addr;
      
  166         if (unlikely(count <= 0))
                      return 0;
      
  166         max_addr = user_addr_max();
              src_addr = (unsigned long)str;
              if (likely(src_addr < max_addr)) {
  165                 unsigned long max = max_addr - src_addr;
                      long retval;
      
  165                 if (user_access_begin(str, max)) {
  153                         retval = do_strnlen_user(str, count, max);
  165                         user_access_end();
  166                         return retval;
                      }
              }
              return 0;
      }
      EXPORT_SYMBOL(strnlen_user);
      /* SPDX-License-Identifier: GPL-2.0 */
      #ifndef _LINUX_RCULIST_BL_H
      #define _LINUX_RCULIST_BL_H
      
      /*
       * RCU-protected bl list version. See include/linux/list_bl.h.
       */
      #include <linux/list_bl.h>
      #include <linux/rcupdate.h>
      
      static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
                                              struct hlist_bl_node *n)
      {
              LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
 1014         LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
                                                              LIST_BL_LOCKMASK);
 1014         rcu_assign_pointer(h->first,
                      (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
      }
      
      static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
      {
              return (struct hlist_bl_node *)
 2038                 ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK);
      }
      
      /**
       * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
       * @n: the element to delete from the hash list.
       *
       * Note: hlist_bl_unhashed() on the node returns true after this. It is
       * useful for RCU based read lockfree traversal if the writer side
       * must know if the list entry is still hashed or already unhashed.
       *
       * In particular, it means that we can not poison the forward pointers
       * that may still be used for walking the hash list and we can only
       * zero the pprev pointer so list_unhashed() will return true after
       * this.
       *
       * The caller must take whatever precautions are necessary (such as
       * holding appropriate locks) to avoid racing with another
       * list-mutation primitive, such as hlist_bl_add_head_rcu() or
       * hlist_bl_del_rcu(), running on this same list.  However, it is
       * perfectly legal to run concurrently with the _rcu list-traversal
       * primitives, such as hlist_bl_for_each_entry_rcu().
       */
      static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
      {
              if (!hlist_bl_unhashed(n)) {
                      __hlist_bl_del(n);
                      n->pprev = NULL;
              }
      }
      
      /**
       * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
       * @n: the element to delete from the hash list.
       *
       * Note: hlist_bl_unhashed() on entry does not return true after this,
       * the entry is in an undefined state. It is useful for RCU based
       * lockfree traversal.
       *
       * In particular, it means that we can not poison the forward
       * pointers that may still be used for walking the hash list.
       *
       * The caller must take whatever precautions are necessary
       * (such as holding appropriate locks) to avoid racing
       * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
       * or hlist_bl_del_rcu(), running on this same list.
       * However, it is perfectly legal to run concurrently with
       * the _rcu list-traversal primitives, such as
       * hlist_bl_for_each_entry().
       */
      static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
      {
              __hlist_bl_del(n);
              n->pprev = LIST_POISON2;
      }
      
      /**
       * hlist_bl_add_head_rcu
       * @n: the element to add to the hash list.
       * @h: the list to add to.
       *
       * Description:
       * Adds the specified element to the specified hlist_bl,
       * while permitting racing traversals.
       *
       * The caller must take whatever precautions are necessary
       * (such as holding appropriate locks) to avoid racing
       * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
       * or hlist_bl_del_rcu(), running on this same list.
       * However, it is perfectly legal to run concurrently with
       * the _rcu list-traversal primitives, such as
       * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
       * problems on Alpha CPUs.  Regardless of the type of CPU, the
       * list-traversal primitive must be guarded by rcu_read_lock().
       */
      static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
                                              struct hlist_bl_head *h)
      {
              struct hlist_bl_node *first;
      
              /* don't need hlist_bl_first_rcu because we're under lock */
              first = hlist_bl_first(h);
      
              n->next = first;
              if (first)
  140                 first->pprev = &n->next;
 1014         n->pprev = &h->first;
      
              /* need _rcu because we can have concurrent lock free readers */
 1014         hlist_bl_set_first_rcu(h, n);
      }
      /**
       * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
       * @tpos:        the type * to use as a loop cursor.
       * @pos:        the &struct hlist_bl_node to use as a loop cursor.
       * @head:        the head for your list.
       * @member:        the name of the hlist_bl_node within the struct.
       *
       */
      #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member)                \
              for (pos = hlist_bl_first_rcu(head);                                \
                      pos &&                                                        \
                      ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
                      pos = rcu_dereference_raw(pos->next))
      
      #endif
      /*
       * net/tipc/socket.c: TIPC socket API
       *
       * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
       * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
       * All rights reserved.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions are met:
       *
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the names of the copyright holders nor the names of its
       *    contributors may be used to endorse or promote products derived from
       *    this software without specific prior written permission.
       *
       * Alternatively, this software may be distributed under the terms of the
       * GNU General Public License ("GPL") version 2 as published by the Free
       * Software Foundation.
       *
       * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
       * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
       * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      #include <linux/rhashtable.h>
      #include <linux/sched/signal.h>
      
      #include "core.h"
      #include "name_table.h"
      #include "node.h"
      #include "link.h"
      #include "name_distr.h"
      #include "socket.h"
      #include "bcast.h"
      #include "netlink.h"
      #include "group.h"
      #include "trace.h"
      
      #define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
      #define CONN_PROBING_INTV        msecs_to_jiffies(3600000)  /* [ms] => 1 h */
      #define TIPC_FWD_MSG                1
      #define TIPC_MAX_PORT                0xffffffff
      #define TIPC_MIN_PORT                1
      #define TIPC_ACK_RATE                4       /* ACK at 1/4 of of rcv window size */
      
      enum {
              TIPC_LISTEN = TCP_LISTEN,
              TIPC_ESTABLISHED = TCP_ESTABLISHED,
              TIPC_OPEN = TCP_CLOSE,
              TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
              TIPC_CONNECTING = TCP_SYN_SENT,
      };
      
      struct sockaddr_pair {
              struct sockaddr_tipc sock;
              struct sockaddr_tipc member;
      };
      
      /**
       * struct tipc_sock - TIPC socket structure
       * @sk: socket - interacts with 'port' and with user via the socket API
       * @conn_type: TIPC type used when connection was established
       * @conn_instance: TIPC instance used when connection was established
       * @published: non-zero if port has one or more associated names
       * @max_pkt: maximum packet size "hint" used when building messages sent by port
       * @portid: unique port identity in TIPC socket hash table
       * @phdr: preformatted message header used when sending messages
       * #cong_links: list of congested links
       * @publications: list of publications for port
       * @blocking_link: address of the congested link we are currently sleeping on
       * @pub_count: total # of publications port has made during its lifetime
       * @conn_timeout: the time we can wait for an unresponded setup request
       * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
       * @cong_link_cnt: number of congested links
       * @snt_unacked: # messages sent by socket, and not yet acked by peer
       * @rcv_unacked: # messages read by user, but not yet acked back to peer
       * @peer: 'connected' peer for dgram/rdm
       * @node: hash table node
       * @mc_method: cookie for use between socket and broadcast layer
       * @rcu: rcu struct for tipc_sock
       */
      struct tipc_sock {
              struct sock sk;
              u32 conn_type;
              u32 conn_instance;
              int published;
              u32 max_pkt;
              u32 portid;
              struct tipc_msg phdr;
              struct list_head cong_links;
              struct list_head publications;
              u32 pub_count;
              atomic_t dupl_rcvcnt;
              u16 conn_timeout;
              bool probe_unacked;
              u16 cong_link_cnt;
              u16 snt_unacked;
              u16 snd_win;
              u16 peer_caps;
              u16 rcv_unacked;
              u16 rcv_win;
              struct sockaddr_tipc peer;
              struct rhash_head node;
              struct tipc_mc_method mc_method;
              struct rcu_head rcu;
              struct tipc_group *group;
              bool group_is_open;
      };
      
      static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
      static void tipc_data_ready(struct sock *sk);
      static void tipc_write_space(struct sock *sk);
      static void tipc_sock_destruct(struct sock *sk);
      static int tipc_release(struct socket *sock);
      static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
                             bool kern);
      static void tipc_sk_timeout(struct timer_list *t);
      static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
                                 struct tipc_name_seq const *seq);
      static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
                                  struct tipc_name_seq const *seq);
      static int tipc_sk_leave(struct tipc_sock *tsk);
      static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
      static int tipc_sk_insert(struct tipc_sock *tsk);
      static void tipc_sk_remove(struct tipc_sock *tsk);
      static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
      static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
      
      static const struct proto_ops packet_ops;
      static const struct proto_ops stream_ops;
      static const struct proto_ops msg_ops;
      static struct proto tipc_proto;
      static const struct rhashtable_params tsk_rht_params;
      
      static u32 tsk_own_node(struct tipc_sock *tsk)
      {
              return msg_prevnode(&tsk->phdr);
      }
      
      static u32 tsk_peer_node(struct tipc_sock *tsk)
      {
    9         return msg_destnode(&tsk->phdr);
      }
      
      static u32 tsk_peer_port(struct tipc_sock *tsk)
      {
              return msg_destport(&tsk->phdr);
      }
      
      static  bool tsk_unreliable(struct tipc_sock *tsk)
      {
              return msg_src_droppable(&tsk->phdr) != 0;
      }
      
      static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
      {
   19         msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
      }
      
      static bool tsk_unreturnable(struct tipc_sock *tsk)
      {
              return msg_dest_droppable(&tsk->phdr) != 0;
      }
      
      static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
      {
   20         msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
    2 }
      
      static int tsk_importance(struct tipc_sock *tsk)
      {
              return msg_importance(&tsk->phdr);
      }
      
      static int tsk_set_importance(struct tipc_sock *tsk, int imp)
      {
              if (imp > TIPC_CRITICAL_IMPORTANCE)
                      return -EINVAL;
              msg_set_importance(&tsk->phdr, (u32)imp);
              return 0;
      }
      
      static struct tipc_sock *tipc_sk(const struct sock *sk)
      {
              return container_of(sk, struct tipc_sock, sk);
      }
      
      static bool tsk_conn_cong(struct tipc_sock *tsk)
      {
  184         return tsk->snt_unacked > tsk->snd_win;
      }
      
      static u16 tsk_blocks(int len)
      {
              return ((len / FLOWCTL_BLK_SZ) + 1);
      }
      
      /* tsk_blocks(): translate a buffer size in bytes to number of
       * advertisable blocks, taking into account the ratio truesize(len)/len
       * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
       */
      static u16 tsk_adv_blocks(int len)
      {
              return len / FLOWCTL_BLK_SZ / 4;
      }
      
      /* tsk_inc(): increment counter for sent or received data
       * - If block based flow control is not supported by peer we
       *   fall back to message based ditto, incrementing the counter
       */
      static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
      {
              if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
  109                 return ((msglen / FLOWCTL_BLK_SZ) + 1);
              return 1;
      }
      
      /**
       * tsk_advance_rx_queue - discard first buffer in socket receive queue
       *
       * Caller must hold socket lock
       */
      static void tsk_advance_rx_queue(struct sock *sk)
      {
   78         trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
   78         kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
      }
      
      /* tipc_sk_respond() : send response message back to sender
       */
      static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
      {
              u32 selector;
              u32 dnode;
   40         u32 onode = tipc_own_addr(sock_net(sk));
      
   39         if (!tipc_msg_reverse(onode, &skb, err))
                      return;
      
    9         trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
    9         dnode = msg_destnode(buf_msg(skb));
    9         selector = msg_origport(buf_msg(skb));
              tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
      }
      
      /**
       * tsk_rej_rx_queue - reject all buffers in socket receive queue
       *
       * Caller must hold socket lock
       */
      static void tsk_rej_rx_queue(struct sock *sk)
      {
              struct sk_buff *skb;
      
              while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
                      tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
      }
      
      static bool tipc_sk_connected(struct sock *sk)
      {
  193         return sk->sk_state == TIPC_ESTABLISHED;
      }
      
      /* tipc_sk_type_connectionless - check if the socket is datagram socket
       * @sk: socket
       *
       * Returns true if connection less, false otherwise
       */
      static bool tipc_sk_type_connectionless(struct sock *sk)
      {
  235         return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
      }
      
      /* tsk_peer_msg - verify if message was sent by connected port's peer
       *
       * Handles cases where the node's network address has changed from
       * the default of <0.0.0> to its configured setting.
       */
      static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
      {
              struct sock *sk = &tsk->sk;
  148         u32 self = tipc_own_addr(sock_net(sk));
              u32 peer_port = tsk_peer_port(tsk);
              u32 orig_node, peer_node;
      
              if (unlikely(!tipc_sk_connected(sk)))
                      return false;
      
  148         if (unlikely(msg_origport(msg) != peer_port))
                      return false;
      
  148         orig_node = msg_orignode(msg);
              peer_node = tsk_peer_node(tsk);
      
              if (likely(orig_node == peer_node))
  148                 return true;
      
              if (!orig_node && peer_node == self)
                      return true;
      
              if (!peer_node && orig_node == self)
                      return true;
      
              return false;
      }
      
      /* tipc_set_sk_state - set the sk_state of the socket
       * @sk: socket
       *
       * Caller must hold socket lock
       *
       * Returns 0 on success, errno otherwise
       */
      static int tipc_set_sk_state(struct sock *sk, int state)
      {
   94         int oldsk_state = sk->sk_state;
              int res = -EINVAL;
      
              switch (state) {
              case TIPC_OPEN:
                      res = 0;
                      break;
              case TIPC_LISTEN:
              case TIPC_CONNECTING:
                      if (oldsk_state == TIPC_OPEN)
                              res = 0;
                      break;
              case TIPC_ESTABLISHED:
  111                 if (oldsk_state == TIPC_CONNECTING ||
                          oldsk_state == TIPC_OPEN)
                              res = 0;
                      break;
              case TIPC_DISCONNECTING:
                      if (oldsk_state == TIPC_CONNECTING ||
                          oldsk_state == TIPC_ESTABLISHED)
                              res = 0;
                      break;
              }
      
              if (!res)
  152                 sk->sk_state = state;
      
              return res;
      }
      
      static int tipc_sk_sock_err(struct socket *sock, long *timeout)
      {
              struct sock *sk = sock->sk;
   41         int err = sock_error(sk);
   41         int typ = sock->type;
      
              if (err)
                      return err;
   41         if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
   41                 if (sk->sk_state == TIPC_DISCONNECTING)
                              return -EPIPE;
   36                 else if (!tipc_sk_connected(sk))
                              return -ENOTCONN;
              }
   36         if (!*timeout)
                      return -EAGAIN;
   41         if (signal_pending(current))
                      return sock_intr_errno(*timeout);
      
              return 0;
      }
      
      #define tipc_wait_for_cond(sock_, timeo_, condition_)                               \
      ({                                                                             \
              DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
              struct sock *sk_;                                                       \
              int rc_;                                                               \
                                                                                     \
              while ((rc_ = !(condition_))) {                                               \
                      /* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
                      smp_rmb();                                                     \
                      sk_ = (sock_)->sk;                                               \
                      rc_ = tipc_sk_sock_err((sock_), timeo_);                       \
                      if (rc_)                                                       \
                              break;                                                       \
                      add_wait_queue(sk_sleep(sk_), &wait_);                         \
                      release_sock(sk_);                                               \
                      *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
                      sched_annotate_sleep();                                               \
                      lock_sock(sk_);                                                       \
                      remove_wait_queue(sk_sleep(sk_), &wait_);                       \
              }                                                                       \
              rc_;                                                                       \
      })
      
      /**
       * tipc_sk_create - create a TIPC socket
       * @net: network namespace (must be default network)
       * @sock: pre-allocated socket structure
       * @protocol: protocol indicator (must be 0)
       * @kern: caused by kernel or by userspace?
       *
       * This routine creates additional data structures used by the TIPC socket,
       * initializes them, and links them together.
       *
       * Returns 0 on success, errno otherwise
       */
      static int tipc_sk_create(struct net *net, struct socket *sock,
                                int protocol, int kern)
      {
              const struct proto_ops *ops;
              struct sock *sk;
              struct tipc_sock *tsk;
              struct tipc_msg *msg;
      
              /* Validate arguments */
  113         if (unlikely(protocol != 0))
                      return -EPROTONOSUPPORT;
      
  113         switch (sock->type) {
              case SOCK_STREAM:
                      ops = &stream_ops;
                      break;
              case SOCK_SEQPACKET:
                      ops = &packet_ops;
                      break;
              case SOCK_DGRAM:
              case SOCK_RDM:
                      ops = &msg_ops;
                      break;
              default:
                      return -EPROTOTYPE;
              }
      
              /* Allocate socket's protocol area */
  111         sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
              if (sk == NULL)
                      return -ENOMEM;
      
              tsk = tipc_sk(sk);
  111         tsk->max_pkt = MAX_PKT_DEFAULT;
              INIT_LIST_HEAD(&tsk->publications);
              INIT_LIST_HEAD(&tsk->cong_links);
              msg = &tsk->phdr;
      
              /* Finish initializing socket data structures */
              sock->ops = ops;
              sock_init_data(sock, sk);
              tipc_set_sk_state(sk, TIPC_OPEN);
  111         if (tipc_sk_insert(tsk)) {
                      pr_warn("Socket create failed; port number exhausted\n");
                      return -EINVAL;
              }
      
              /* Ensure tsk is visible before we read own_addr. */
  111         smp_mb();
      
  111         tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
                            TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
      
              msg_set_origport(msg, tsk->portid);
              timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
              sk->sk_shutdown = 0;
              sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
              sk->sk_rcvbuf = sysctl_tipc_rmem[1];
              sk->sk_data_ready = tipc_data_ready;
              sk->sk_write_space = tipc_write_space;
              sk->sk_destruct = tipc_sock_destruct;
              tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
              tsk->group_is_open = true;
              atomic_set(&tsk->dupl_rcvcnt, 0);
      
              /* Start out with safe limits until we receive an advertised window */
              tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
              tsk->rcv_win = tsk->snd_win;
      
              if (tipc_sk_type_connectionless(sk)) {
   20                 tsk_set_unreturnable(tsk, true);
                      if (sock->type == SOCK_DGRAM)
   19                         tsk_set_unreliable(tsk, true);
              }
  111         __skb_queue_head_init(&tsk->mc_method.deferredq);
  111         trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
              return 0;
      }
      
      static void tipc_sk_callback(struct rcu_head *head)
      {
              struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
      
              sock_put(&tsk->sk);
      }
      
      /* Caller should hold socket lock for the socket. */
      static void __tipc_shutdown(struct socket *sock, int error)
      {
  119         struct sock *sk = sock->sk;
              struct tipc_sock *tsk = tipc_sk(sk);
              struct net *net = sock_net(sk);
              long timeout = CONN_TIMEOUT_DEFAULT;
              u32 dnode = tsk_peer_node(tsk);
              struct sk_buff *skb;
      
              /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
  119         tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
                                                  !tsk_conn_cong(tsk)));
      
              /* Remove any pending SYN message */
              __skb_queue_purge(&sk->sk_write_queue);
      
              /* Reject all unreceived messages, except on an active connection
               * (which disconnects locally & sends a 'FIN+' to peer).
               */
  119         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
                      if (TIPC_SKB_CB(skb)->bytes_read) {
    2                         kfree_skb(skb);
                              continue;
                      }
   40                 if (!tipc_sk_type_connectionless(sk) &&
   39                     sk->sk_state != TIPC_DISCONNECTING) {
    8                         tipc_set_sk_state(sk, TIPC_DISCONNECTING);
    8                         tipc_node_remove_conn(net, dnode, tsk->portid);
                      }
   40                 tipc_sk_respond(sk, skb, error);
              }
      
  118         if (tipc_sk_type_connectionless(sk))
                      return;
      
  118         if (sk->sk_state != TIPC_DISCONNECTING) {
   89                 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                            TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
                                            tsk_own_node(tsk), tsk_peer_port(tsk),
                                            tsk->portid, error);
                      if (skb)
   88                         tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
   86                 tipc_node_remove_conn(net, dnode, tsk->portid);
   86                 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
              }
      }
      
      /**
       * tipc_release - destroy a TIPC socket
       * @sock: socket to destroy
       *
       * This routine cleans up any messages that are still queued on the socket.
       * For DGRAM and RDM socket types, all queued messages are rejected.
       * For SEQPACKET and STREAM socket types, the first message is rejected
       * and any others are discarded.  (If the first message on a STREAM socket
       * is partially-read, it is discarded and the next one is rejected instead.)
       *
       * NOTE: Rejected messages are not necessarily returned to the sender!  They
       * are returned or discarded according to the "destination droppable" setting
       * specified for the message by the sender.
       *
       * Returns 0 on success, errno otherwise
       */
      static int tipc_release(struct socket *sock)
      {
  120         struct sock *sk = sock->sk;
              struct tipc_sock *tsk;
      
              /*
               * Exit if socket isn't fully initialized (occurs when a failed accept()
               * releases a pre-allocated child socket that was never used)
               */
              if (sk == NULL)
                      return 0;
      
              tsk = tipc_sk(sk);
  120         lock_sock(sk);
      
  119         trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
  119         __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
              sk->sk_shutdown = SHUTDOWN_MASK;
              tipc_sk_leave(tsk);
              tipc_sk_withdraw(tsk, 0, NULL);
              __skb_queue_purge(&tsk->mc_method.deferredq);
  112         sk_stop_timer(sk, &sk->sk_timer);
  112         tipc_sk_remove(tsk);
      
  112         sock_orphan(sk);
              /* Reject any messages that accumulated in backlog queue */
              release_sock(sk);
              tipc_dest_list_purge(&tsk->cong_links);
              tsk->cong_link_cnt = 0;
              call_rcu(&tsk->rcu, tipc_sk_callback);
              sock->sk = NULL;
      
  112         return 0;
      }
      
      /**
       * tipc_bind - associate or disassocate TIPC name(s) with a socket
       * @sock: socket structure
       * @uaddr: socket address describing name(s) and desired operation
       * @uaddr_len: size of socket address data structure
       *
       * Name and name sequence binding is indicated using a positive scope value;
       * a negative scope value unbinds the specified name.  Specifying no name
       * (i.e. a socket address length of 0) unbinds all names from the socket.
       *
       * Returns 0 on success, errno otherwise
       *
       * NOTE: This routine doesn't need to take the socket lock since it doesn't
       *       access any non-constant socket information.
       */
      static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
                           int uaddr_len)
      {
              struct sock *sk = sock->sk;
              struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
              struct tipc_sock *tsk = tipc_sk(sk);
              int res = -EINVAL;
      
              lock_sock(sk);
              if (unlikely(!uaddr_len)) {
                      res = tipc_sk_withdraw(tsk, 0, NULL);
                      goto exit;
              }
              if (tsk->group) {
                      res = -EACCES;
                      goto exit;
              }
              if (uaddr_len < sizeof(struct sockaddr_tipc)) {
                      res = -EINVAL;
                      goto exit;
              }
              if (addr->family != AF_TIPC) {
                      res = -EAFNOSUPPORT;
                      goto exit;
              }
      
              if (addr->addrtype == TIPC_ADDR_NAME)
                      addr->addr.nameseq.upper = addr->addr.nameseq.lower;
              else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
                      res = -EAFNOSUPPORT;
                      goto exit;
              }
      
              if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
                  (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
                  (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
                      res = -EACCES;
                      goto exit;
              }
      
              res = (addr->scope >= 0) ?
                      tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
                      tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
      exit:
              release_sock(sk);
              return res;
      }
      
      /**
       * tipc_getname - get port ID of socket or peer socket
       * @sock: socket structure
       * @uaddr: area for returned socket address
       * @uaddr_len: area for returned length of socket address
       * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
       *
       * Returns 0 on success, errno otherwise
       *
       * NOTE: This routine doesn't need to take the socket lock since it only
       *       accesses socket information that is unchanging (or which changes in
       *       a completely predictable manner).
       */
      static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
                              int peer)
      {
              struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
              struct sock *sk = sock->sk;
              struct tipc_sock *tsk = tipc_sk(sk);
      
              memset(addr, 0, sizeof(*addr));
              if (peer) {
                      if ((!tipc_sk_connected(sk)) &&
                          ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
                              return -ENOTCONN;
                      addr->addr.id.ref = tsk_peer_port(tsk);
                      addr->addr.id.node = tsk_peer_node(tsk);
              } else {
                      addr->addr.id.ref = tsk->portid;
                      addr->addr.id.node = tipc_own_addr(sock_net(sk));
              }
      
              addr->addrtype = TIPC_ADDR_ID;
              addr->family = AF_TIPC;
              addr->scope = 0;
              addr->addr.name.domain = 0;
      
              return sizeof(*addr);
      }
      
      /**
       * tipc_poll - read and possibly block on pollmask
       * @file: file structure associated with the socket
       * @sock: socket for which to calculate the poll bits
       * @wait: ???
       *
       * Returns pollmask value
       *
       * COMMENTARY:
       * It appears that the usual socket locking mechanisms are not useful here
       * since the pollmask info is potentially out-of-date the moment this routine
       * exits.  TCP and other protocols seem to rely on higher level poll routines
       * to handle any preventable race conditions, so TIPC will do the same ...
       *
       * IMPORTANT: The fact that a read or write operation is indicated does NOT
       * imply that the operation will succeed, merely that it should be performed
       * and will not block.
       */
      static __poll_t tipc_poll(struct file *file, struct socket *sock,
                                    poll_table *wait)
      {
              struct sock *sk = sock->sk;
              struct tipc_sock *tsk = tipc_sk(sk);
              __poll_t revents = 0;
      
              sock_poll_wait(file, sock, wait);
              trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
      
              if (sk->sk_shutdown & RCV_SHUTDOWN)
                      revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
              if (sk->sk_shutdown == SHUTDOWN_MASK)
                      revents |= EPOLLHUP;
      
              switch (sk->sk_state) {
              case TIPC_ESTABLISHED:
                      if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
                              revents |= EPOLLOUT;
                      /* fall through */
              case TIPC_LISTEN:
              case TIPC_CONNECTING:
                      if (!skb_queue_empty(&sk->sk_receive_queue))
                              revents |= EPOLLIN | EPOLLRDNORM;
                      break;
              case TIPC_OPEN:
                      if (tsk->group_is_open && !tsk->cong_link_cnt)
                              revents |= EPOLLOUT;
                      if (!tipc_sk_type_connectionless(sk))
                              break;
                      if (skb_queue_empty(&sk->sk_receive_queue))
                              break;
                      revents |= EPOLLIN | EPOLLRDNORM;
                      break;
              case TIPC_DISCONNECTING:
                      revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
                      break;
              }
              return revents;
      }
      
      /**
       * tipc_sendmcast - send multicast message
       * @sock: socket structure
       * @seq: destination address
       * @msg: message to send
       * @dlen: length of data to send
       * @timeout: timeout to wait for wakeup
       *
       * Called from function tipc_sendmsg(), which has done all sanity checks
       * Returns the number of bytes sent on success, or errno
       */
      static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
                                struct msghdr *msg, size_t dlen, long timeout)
      {
   18         struct sock *sk = sock->sk;
              struct tipc_sock *tsk = tipc_sk(sk);
              struct tipc_msg *hdr = &tsk->phdr;
              struct net *net = sock_net(sk);
              int mtu = tipc_bcast_get_mtu(net);
              struct tipc_mc_method *method = &tsk->mc_method;
              struct sk_buff_head pkts;
              struct tipc_nlist dsts;
              int rc;
      
              if (tsk->group)
                      return -EACCES;
      
              /* Block or return if any destination link is congested */
   18         rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
              if (unlikely(rc))
                      return rc;
      
              /* Lookup destination nodes */
   18         tipc_nlist_init(&dsts, tipc_own_addr(net));
              tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
                                            seq->upper, &dsts);
    4         if (!dsts.local && !dsts.remote)
                      return -EHOSTUNREACH;
      
              /* Build message header */
   14         msg_set_type(hdr, TIPC_MCAST_MSG);
              msg_set_hdr_sz(hdr, MCAST_H_SIZE);
              msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
              msg_set_destport(hdr, 0);
              msg_set_destnode(hdr, 0);
              msg_set_nametype(hdr, seq->type);
              msg_set_namelower(hdr, seq->lower);
              msg_set_nameupper(hdr, seq->upper);
      
              /* Build message as chain of buffers */
              skb_queue_head_init(&pkts);
              rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
      
              /* Send message if build was successful */
              if (unlikely(rc == dlen)) {
   13                 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
                                              TIPC_DUMP_SK_SNDQ, " ");
   13                 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
                                           &tsk->cong_link_cnt);
              }
      
   14         tipc_nlist_purge(&dsts);
      
   18         return rc ? rc : dlen;
      }
      
      /**
       * tipc_send_group_msg - send a message to a member in the group
       * @net: network namespace
       * @m: message to send
       * @mb: group member
       * @dnode: destination node
       * @dport: destination port
       * @dlen: total length of message data
       */
      static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
                                     struct msghdr *m, struct tipc_member *mb,
                                     u32 dnode, u32 dport, int dlen)
      {
   18         u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
              struct tipc_mc_method *method = &tsk->mc_method;
              int blks = tsk_blocks(GROUP_H_SIZE + dlen);
              struct tipc_msg *hdr = &tsk->phdr;
              struct sk_buff_head pkts;
              int mtu, rc;
      
              /* Complete message header */
              msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
              msg_set_hdr_sz(hdr, GROUP_H_SIZE);
              msg_set_destport(hdr, dport);
              msg_set_destnode(hdr, dnode);
              msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
      
              /* Build message as chain of buffers */
              skb_queue_head_init(&pkts);
              mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
              rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
              if (unlikely(rc != dlen))
                      return rc;
      
              /* Send message */
   15         rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
              if (unlikely(rc == -ELINKCONG)) {
                      tipc_dest_push(&tsk->cong_links, dnode, 0);
                      tsk->cong_link_cnt++;
              }
      
              /* Update send window */
   15         tipc_group_update_member(mb, blks);
      
              /* A broadcast sent within next EXPIRE period must follow same path */
              method->rcast = true;
              method->mandatory = true;
   16         return dlen;
      }
      
      /**
       * tipc_send_group_unicast - send message to a member in the group
       * @sock: socket structure
       * @m: message to send
       * @dlen: total length of message data
       * @timeout: timeout to wait for wakeup
       *
       * Called from function tipc_sendmsg(), which has done all sanity checks
       * Returns the number of bytes sent on success, or errno
       */
      static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
                                         int dlen, long timeout)
      {
   15         struct sock *sk = sock->sk;
              DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
              int blks = tsk_blocks(GROUP_H_SIZE + dlen);
              struct tipc_sock *tsk = tipc_sk(sk);
              struct net *net = sock_net(sk);
              struct tipc_member *mb = NULL;
              u32 node, port;
              int rc;
      
              node = dest->addr.id.node;
              port = dest->addr.id.ref;
              if (!port && !node)
                      return -EHOSTUNREACH;
      
              /* Block or return if destination link or member is congested */
   14         rc = tipc_wait_for_cond(sock, &timeout,
                                      !tipc_dest_find(&tsk->cong_links, node, 0) &&
                                      tsk->group &&
                                      !tipc_group_cong(tsk->group, node, port, blks,
                                                       &mb));
              if (unlikely(rc))
                      return rc;
      
              if (unlikely(!mb))
                      return -EHOSTUNREACH;
      
   12         rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
      
   14         return rc ? rc : dlen;
      }
      
      /**
       * tipc_send_group_anycast - send message to any member with given identity
       * @sock: socket structure
       * @m: message to send
       * @dlen: total length of message data
       * @timeout: timeout to wait for wakeup
       *
       * Called from function tipc_sendmsg(), which has done all sanity checks
       * Returns the number of bytes sent on success, or errno
       */
      static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
                                         int dlen, long timeout)
      {
    8         DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
              struct sock *sk = sock->sk;
              struct tipc_sock *tsk = tipc_sk(sk);
              struct list_head *cong_links = &tsk->cong_links;
              int blks = tsk_blocks(GROUP_H_SIZE + dlen);
              struct tipc_msg *hdr = &tsk->phdr;
              struct tipc_member *first = NULL;
              struct tipc_member *mbr = NULL;
              struct net *net = sock_net(sk);
              u32 node, port, exclude;
              struct list_head dsts;
              u32 type, inst, scope;
              int lookups = 0;
              int dstcnt, rc;
              bool cong;
      
              INIT_LIST_HEAD(&dsts);
      
              type = msg_nametype(hdr);
              inst = dest->addr.name.name.instance;
              scope = msg_lookup_scope(hdr);
      
    1         while (++lookups < 4) {
    8                 exclude = tipc_group_exclude(tsk->group);
      
                      first = NULL;
      
                      /* Look for a non-congested destination member, if any */
                      while (1) {
    8                         if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
                                                       &dstcnt, exclude, false))
    7                                 return -EHOSTUNREACH;
    6                         tipc_dest_pop(&dsts, &node, &port);
                              cong = tipc_group_cong(tsk->group, node, port, blks,
                                                     &mbr);
                              if (!cong)
                                      break;
                              if (mbr == first)
                                      break;
                              if (!first)
                                      first = mbr;
                      }
      
                      /* Start over if destination was not in member list */
    6                 if (unlikely(!mbr))
                              continue;
      
    6                 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
                              break;
      
                      /* Block or return if destination link or member is congested */
                      rc = tipc_wait_for_cond(sock, &timeout,
                                              !tipc_dest_find(cong_links, node, 0) &&
                                              tsk->group &&
                                              !tipc_group_cong(tsk->group, node, port,
                                                               blks, &mbr));
                      if (unlikely(rc))
                              return rc;
      
                      /* Send, unless destination disappeared while waiting */
                      if (likely(mbr))
                              break;
              }
      
              if (unlikely(lookups >= 4))
                      return -EHOSTUNREACH;
      
    6         rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
      
              return rc ? rc : dlen;
      }
      
      /**
       * tipc_send_group_bcast - send message to all members in communication group
       * @sk: socket structure
       * @m: message to send
       * @dlen: total length of message data
       * @timeout: timeout to wait for wakeup
       *
       * Called from function tipc_sendmsg(), which has done all sanity checks
       * Returns the number of bytes sent on success, or errno
       */
      static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
                                       int dlen, long timeout)
      {
   52         DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
              struct sock *sk = sock->sk;
              struct net *net = sock_net(sk);
              struct tipc_sock *tsk = tipc_sk(sk);
              struct tipc_nlist *dsts;
              struct tipc_mc_method *method = &tsk->mc_method;
   52         bool ack = method->mandatory && method->rcast;
   52         int blks = tsk_blocks(MCAST_H_SIZE + dlen);
              struct tipc_msg *hdr = &tsk->phdr;
              int mtu = tipc_bcast_get_mtu(net);
              struct sk_buff_head pkts;
              int rc = -EHOSTUNREACH;
      
              /* Block or return if any destination link or member is congested */
   52         rc = tipc_wait_for_cond(sock, &timeout,
                                      !tsk->cong_link_cnt && tsk->group &&
                                      !tipc_group_bc_cong(tsk->group, blks));
              if (unlikely(rc))
                      return rc;
      
              dsts = tipc_group_dests(tsk->group);
              if (!dsts->local && !dsts->remote)
                      return -EHOSTUNREACH;
      
              /* Complete message header */
   52         if (dest) {
   52                 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
                      msg_set_nameinst(hdr, dest->addr.name.name.instance);
              } else {
                      msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
                      msg_set_nameinst(hdr, 0);
              }
   52         msg_set_hdr_sz(hdr, GROUP_H_SIZE);
              msg_set_destport(hdr, 0);
              msg_set_destnode(hdr, 0);
              msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
      
              /* Avoid getting stuck with repeated forced replicasts */
              msg_set_grp_bc_ack_req(hdr, ack);
      
              /* Build message as chain of buffers */
              skb_queue_head_init(&pkts);
              rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
              if (unlikely(rc != dlen))
                      return rc;
      
              /* Send message */
   51         rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
              if (unlikely(rc))
                      return rc;
      
              /* Update broadcast sequence number and send windows */
   48         tipc_group_update_bc_members(tsk->group, blks, ack);
      
              /* Broadcast link is now free to choose method for next broadcast */
              method->mandatory = false;
              method->expires = jiffies;
      
   49         return dlen;
      }
      
      /**
       * tipc_send_group_mcast - send message to all members with given identity
       * @sock: socket structure
       * @m: message to send
       * @dlen: total length of message data
       * @timeout: timeout to wait for wakeup
       *
       * Called from function tipc_sendmsg(), which has done all sanity checks
       * Returns the number of bytes sent on success, or errno
       */
      static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
                                       int dlen, long timeout)
      {
              struct sock *sk = sock->sk;
              DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
              struct tipc_sock *tsk = tipc_sk(sk);
              struct tipc_group *grp = tsk->group;
              struct tipc_msg *hdr = &tsk->phdr;
              struct net *net = sock_net(sk);
              u32 type, inst, scope, exclude;
              struct list_head dsts;
              u32 dstcnt;
      
              INIT_LIST_HEAD(&dsts);
      
              type = msg_nametype(hdr);
              inst = dest->addr.name.name.instance;
              scope = msg_lookup_scope(hdr);
              exclude = tipc_group_exclude(grp);
      
              if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
                                       &dstcnt, exclude, true))
                      return -EHOSTUNREACH;
      
   62         if (dstcnt == 1) {
   12                 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
                      return tipc_send_group_unicast(sock, m, dlen, timeout);
              }
      
   52         tipc_dest_list_purge(&dsts);
              return tipc_send_group_bcast(sock, m, dlen, timeout);
      }
      
      /**
       * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
       * @arrvq: queue with arriving messages, to be cloned after destination lookup
       * @inputq: queue with cloned messages, delivered to socket after dest lookup
       *
       * Multi-threaded: parallel calls with reference to same queues may occur
       */
      void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                             struct sk_buff_head *inputq)
      {
   64         u32 self = tipc_own_addr(net);
              u32 type, lower, upper, scope;
              struct sk_buff *skb, *_skb;
              u32 portid, onode;
              struct sk_buff_head tmpq;
              struct list_head dports;
              struct tipc_msg *hdr;
              int user, mtyp, hlen;
              bool exact;
      
              __skb_queue_head_init(&tmpq);
              INIT_LIST_HEAD(&dports);
      
   64         skb = tipc_skb_peek(arrvq, &inputq->lock);
   63         for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
   64                 hdr = buf_msg(skb);
                      user = msg_user(hdr);
   64                 mtyp = msg_type(hdr);
   64                 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
   64                 onode = msg_orignode(hdr);
                      type = msg_nametype(hdr);
      
   64                 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
                              spin_lock_bh(&inputq->lock);
                              if (skb_peek(arrvq) == skb) {
                                      __skb_dequeue(arrvq);
                                      __skb_queue_tail(inputq, skb);
                              }
                              kfree_skb(skb);
                              spin_unlock_bh(&inputq->lock);
                              continue;
                      }
      
                      /* Group messages require exact scope match */
                      if (msg_in_group(hdr)) {
                              lower = 0;
                              upper = ~0;
   51                         scope = msg_lookup_scope(hdr);
                              exact = true;
                      } else {
                              /* TIPC_NODE_SCOPE means "any scope" in this context */
   13                         if (onode == self)
                                      scope = TIPC_NODE_SCOPE;
                              else
                                      scope = TIPC_CLUSTER_SCOPE;
                              exact = false;
   13                         lower = msg_namelower(hdr);
                              upper = msg_nameupper(hdr);
                      }
      
                      /* Create destination port list: */
                      tipc_nametbl_mc_lookup(net, type, lower, upper,
                                             scope, exact, &dports);
      
                      /* Clone message per destination */
   64                 while (tipc_dest_pop(&dports, NULL, &portid)) {
                              _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
                              if (_skb) {
   64                                 msg_set_destport(buf_msg(_skb), portid);
                                      __skb_queue_tail(&tmpq, _skb);
                                      continue;
                              }
                              pr_warn("Failed to clone mcast rcv buffer\n");
                      }
                      /* Append to inputq if not already done by other thread */
   64                 spin_lock_bh(&inputq->lock);
   64                 if (skb_peek(arrvq) == skb) {
   64                         skb_queue_splice_tail_init(&tmpq, inputq);
   64                         kfree_skb(__skb_dequeue(arrvq));
                      }
   64                 spin_unlock_bh(&inputq->lock);
                      __skb_queue_purge(&tmpq);
   64                 kfree_skb(skb);
              }
   63         tipc_sk_rcv(net, inputq);
      }
      
      /**
       * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
       * @tsk: receiving socket
       * @skb: pointer to message buffer.
       */
      static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
                                         struct sk_buff_head *inputq,
                                         struct sk_buff_head *xmitq)
      {
              struct tipc_msg *hdr = buf_msg(skb);
              u32 onode = tsk_own_node(tsk);
              struct sock *sk = &tsk->sk;
   60         int mtyp = msg_type(hdr);
              bool conn_cong;
      
              /* Ignore if connection cannot be validated: */
              if (!tsk_peer_msg(tsk, hdr)) {
                      trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
                      goto exit;
              }
      
   60         if (unlikely(msg_errcode(hdr))) {
                      tipc_set_sk_state(sk, TIPC_DISCONNECTING);
                      tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
                                            tsk_peer_port(tsk));
                      sk->sk_state_change(sk);
      
                      /* State change is ignored if socket already awake,
                       * - convert msg to abort msg and add to inqueue
                       */
                      msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
                      msg_set_type(hdr, TIPC_CONN_MSG);
                      msg_set_size(hdr, BASIC_H_SIZE);
                      msg_set_hdr_sz(hdr, BASIC_H_SIZE);
                      __skb_queue_tail(inputq, skb);
                      return;
              }
      
              tsk->probe_unacked = false;
      
              if (mtyp == CONN_PROBE) {
                      msg_set_type(hdr, CONN_PROBE_REPLY);
                      if (tipc_msg_reverse(onode, &skb, TIPC_OK))
                              __skb_queue_tail(xmitq, skb);
                      return;
              } else if (mtyp == CONN_ACK) {
   60                 conn_cong = tsk_conn_cong(tsk);
                      tsk->snt_unacked -= msg_conn_ack(hdr);
                      if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
   60                         tsk->snd_win = msg_adv_win(hdr);
   60                 if (conn_cong)
   58                         sk->sk_write_space(sk);
              } else if (mtyp != CONN_PROBE_REPLY) {
                      pr_warn("Received unknown CONN_PROTO msg\n");
              }
      exit:
   60         kfree_skb(skb);
      }
      
      /**
       * tipc_sendmsg - send message in connectionless manner
       * @sock: socket structure
       * @m: message to send
       * @dsz: amount of user data to be sent
       *
       * Message must have an destination specified explicitly.
       * Used for SOCK_RDM and SOCK_DGRAM messages,
       * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
       * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
       *
       * Returns the number of bytes sent on success, or errno otherwise
       */
      static int tipc_sendmsg(struct socket *sock,
                              struct msghdr *m, size_t dsz)
      {
   51         struct sock *sk = sock->sk;
              int ret;
      
              lock_sock(sk);
              ret = __tipc_sendmsg(sock, m, dsz);
              release_sock(sk);
      
              return ret;
      }
      
      static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
      {
  132         struct sock *sk = sock->sk;
              struct net *net = sock_net(sk);
              struct tipc_sock *tsk = tipc_sk(sk);
              DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
  132         long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
   29         struct list_head *clinks = &tsk->cong_links;
              bool syn = !tipc_sk_type_connectionless(sk);
              struct tipc_group *grp = tsk->group;
              struct tipc_msg *hdr = &tsk->phdr;
              struct tipc_name_seq *seq;
              struct sk_buff_head pkts;
              u32 dport, dnode = 0;
              u32 type, inst;
              int mtu, rc;
      
              if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
                      return -EMSGSIZE;
      
  131         if (likely(dest)) {
  118                 if (unlikely(m->msg_namelen < sizeof(*dest)))
                              return -EINVAL;
  117                 if (unlikely(dest->family != AF_TIPC))
                              return -EINVAL;
              }
      
  128         if (grp) {
                      if (!dest)
                              return tipc_send_group_bcast(sock, m, dlen, timeout);
   77                 if (dest->addrtype == TIPC_ADDR_NAME)
    8                         return tipc_send_group_anycast(sock, m, dlen, timeout);
   70                 if (dest->addrtype == TIPC_ADDR_ID)
    3                         return tipc_send_group_unicast(sock, m, dlen, timeout);
   67                 if (dest->addrtype == TIPC_ADDR_MCAST)
   66                         return tipc_send_group_mcast(sock, m, dlen, timeout);
                      return -EINVAL;
              }
      
              if (unlikely(!dest)) {
   13                 dest = &tsk->peer;
   13                 if (!syn && dest->family != AF_TIPC)
                              return -EDESTADDRREQ;
              }
      
   38         if (unlikely(syn)) {
    2                 if (sk->sk_state == TIPC_LISTEN)
                              return -EPIPE;
    2                 if (sk->sk_state != TIPC_OPEN)
                              return -EISCONN;
                      if (tsk->published)
                              return -EOPNOTSUPP;
                      if (dest->addrtype == TIPC_ADDR_NAME) {
                              tsk->conn_type = dest->addr.name.name.type;
                              tsk->conn_instance = dest->addr.name.name.instance;
                      }
                      msg_set_syn(hdr, 1);
              }
      
   18         seq = &dest->addr.nameseq;
   49         if (dest->addrtype == TIPC_ADDR_MCAST)
                      return tipc_sendmcast(sock, seq, m, dlen, timeout);
      
   32         if (dest->addrtype == TIPC_ADDR_NAME) {
   13                 type = dest->addr.name.name.type;
                      inst = dest->addr.name.name.instance;
                      dnode = dest->addr.name.domain;
                      msg_set_type(hdr, TIPC_NAMED_MSG);
                      msg_set_hdr_sz(hdr, NAMED_H_SIZE);
                      msg_set_nametype(hdr, type);
                      msg_set_nameinst(hdr, inst);
   13                 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
                      dport = tipc_nametbl_translate(net, type, inst, &dnode);
                      msg_set_destnode(hdr, dnode);
                      msg_set_destport(hdr, dport);
                      if (unlikely(!dport && !dnode))
                              return -EHOSTUNREACH;
   19         } else if (dest->addrtype == TIPC_ADDR_ID) {
   18                 dnode = dest->addr.id.node;
                      msg_set_type(hdr, TIPC_DIRECT_MSG);
                      msg_set_lookup_scope(hdr, 0);
                      msg_set_destnode(hdr, dnode);
                      msg_set_destport(hdr, dest->addr.id.ref);
                      msg_set_hdr_sz(hdr, BASIC_H_SIZE);
              } else {
                      return -EINVAL;
              }
      
              /* Block or return if destination link is congested */
   91         rc = tipc_wait_for_cond(sock, &timeout,
                                      !tipc_dest_find(clinks, dnode, 0));
  126         if (unlikely(rc))
                      return rc;
      
              skb_queue_head_init(&pkts);
              mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
              rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
              if (unlikely(rc != dlen))
                      return rc;
   26         if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
                      return -ENOMEM;
      
   26         trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
   26         rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
              if (unlikely(rc == -ELINKCONG)) {
                      tipc_dest_push(clinks, dnode, 0);
                      tsk->cong_link_cnt++;
                      rc = 0;
              }
      
   26         if (unlikely(syn && !rc))
                      tipc_set_sk_state(sk, TIPC_CONNECTING);
      
   26         return rc ? rc : dlen;
      }
      
      /**
       * tipc_sendstream - send stream-oriented data
       * @sock: socket structure
       * @m: data to send
       * @dsz: total length of data to be transmitted
       *
       * Used for SOCK_STREAM data.
       *
       * Returns the number of bytes sent on success (or partial success),
       * or errno if no data sent
       */
      static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
      {
  123         struct sock *sk = sock->sk;
              int ret;
      
              lock_sock(sk);
              ret = __tipc_sendstream(sock, m, dsz);
              release_sock(sk);
      
              return ret;
      }
      
      static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
      {
  123         struct sock *sk = sock->sk;
              DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
  123         long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
              struct tipc_sock *tsk = tipc_sk(sk);
              struct tipc_msg *hdr = &tsk->phdr;
              struct net *net = sock_net(sk);
              struct sk_buff_head pkts;
              u32 dnode = tsk_peer_node(tsk);
              int send, sent = 0;
              int rc = 0;
      
              skb_queue_head_init(&pkts);
      
              if (unlikely(dlen > INT_MAX))
                      return -EMSGSIZE;
      
              /* Handle implicit connection setup */
  123         if (unlikely(dest)) {
   81                 rc = __tipc_sendmsg(sock, m, dlen);
   29                 if (dlen && dlen == rc) {
   25                         tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
   25                         tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
                      }
                      return rc;
              }
      
              do {
   42                 rc = tipc_wait_for_cond(sock, &timeout,
                                              (!tsk->cong_link_cnt &&
                                               !tsk_conn_cong(tsk) &&
                                               tipc_sk_connected(sk)));
                      if (unlikely(rc))
                              break;
      
                      send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
                      rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
                      if (unlikely(rc != send))
                              break;
      
   38                 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
                                               TIPC_DUMP_SK_SNDQ, " ");
   38                 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
                      if (unlikely(rc == -ELINKCONG)) {
                              tsk->cong_link_cnt = 1;
                              rc = 0;
                      }
   37                 if (likely(!rc)) {
   37                         tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
                              sent += send;
                      }
              } while (sent < dlen && !rc);
      
   90         return sent ? sent : rc;
      }
      
      /**
       * tipc_send_packet - send a connection-oriented message
       * @sock: socket structure
       * @m: message to send
       * @dsz: length of data to be transmitted
       *
       * Used for SOCK_SEQPACKET messages.
       *
       * Returns the number of bytes sent on success, or errno otherwise
       */
      static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
      {
   75         if (dsz > TIPC_MAX_USER_MSG_SIZE)
                      return -EMSGSIZE;
      
   75         return tipc_sendstream(sock, m, dsz);
      }
      
      /* tipc_sk_finish_conn - complete the setup of a connection
       */
      static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
                                      u32 peer_node)
      {
  111         struct sock *sk = &tsk->sk;
              struct net *net = sock_net(sk);
              struct tipc_msg *msg = &tsk->phdr;
      
              msg_set_syn(msg, 0);
              msg_set_destnode(msg, peer_node);
              msg_set_destport(msg, peer_port);
              msg_set_type(msg, TIPC_CONN_MSG);
              msg_set_lookup_scope(msg, 0);
              msg_set_hdr_sz(msg, SHORT_H_SIZE);
      
              sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
  111         tipc_set_sk_state(sk, TIPC_ESTABLISHED);
  111         tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
              tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
              tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
              __skb_queue_purge(&sk->sk_write_queue);
  111         if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
                      return;
      
              /* Fall back to message based flow control */
              tsk->rcv_win = FLOWCTL_MSG_WIN;
  111         tsk->snd_win = FLOWCTL_MSG_WIN;
      }
      
      /**
       * tipc_sk_set_orig_addr - capture sender's address for received message
       * @m: descriptor for message info
       * @hdr: received message header
       *
       * Note: Address is not captured if not requested by receiver.
       */
      static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
      {
              DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
   40         struct tipc_msg *hdr = buf_msg(skb);
      
   86         if (!srcaddr)
                      return;
      
              srcaddr->sock.family = AF_TIPC;
              srcaddr->sock.addrtype = TIPC_ADDR_ID;
              srcaddr->sock.scope = 0;
   40         srcaddr->sock.addr.id.ref = msg_origport(hdr);
   40         srcaddr->sock.addr.id.node = msg_orignode(hdr);
              srcaddr->sock.addr.name.domain = 0;
              m->msg_namelen = sizeof(struct sockaddr_tipc);
      
              if (!msg_in_group(hdr))
                      return;
      
              /* Group message users may also want to know sending member's id */
              srcaddr->member.family = AF_TIPC;
              srcaddr->member.addrtype = TIPC_ADDR_NAME;
              srcaddr->member.scope = 0;
              srcaddr->member.addr.name.name.type = msg_nametype(hdr);
              srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
              srcaddr->member.addr.name.domain = 0;
   86         m->msg_namelen = sizeof(*srcaddr);
      }
      
      /**
       * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
       * @m: descriptor for message info
       * @skb: received message buffer
       * @tsk: TIPC port associated with message
       *
       * Note: Ancillary data is not captured if not requested by receiver.
       *
       * Returns 0 if successful, otherwise errno
       */
      static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
                                       struct tipc_sock *tsk)
      {
              struct tipc_msg *msg;
              u32 anc_data[3];
              u32 err;
              u32 dest_type;
              int has_name;
              int res;
      
   60         if (likely(m->msg_controllen == 0))
                      return 0;
   74         msg = buf_msg(skb);
      
              /* Optionally capture errored message object(s) */
   74         err = msg ? msg_errcode(msg) : 0;
              if (unlikely(err)) {
   24                 anc_data[0] = err;
                      anc_data[1] = msg_data_sz(msg);
                      res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
                      if (res)
                              return res;
   22                 if (anc_data[1]) {
   10                         if (skb_linearize(skb))
                                      return -ENOMEM;
   10                         msg = buf_msg(skb);
                              res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
                                             msg_data(msg));
                              if (res)
                                      return res;
                      }
              }
      
              /* Optionally capture message destination object */
   72         dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
   74         switch (dest_type) {
              case TIPC_NAMED_MSG:
                      has_name = 1;
                      anc_data[0] = msg_nametype(msg);
                      anc_data[1] = msg_namelower(msg);
                      anc_data[2] = msg_namelower(msg);
                      break;
              case TIPC_MCAST_MSG:
                      has_name = 1;
                      anc_data[0] = msg_nametype(msg);
                      anc_data[1] = msg_namelower(msg);
                      anc_data[2] = msg_nameupper(msg);
                      break;
              case TIPC_CONN_MSG:
   71                 has_name = (tsk->conn_type != 0);
                      anc_data[0] = tsk->conn_type;
                      anc_data[1] = tsk->conn_instance;
                      anc_data[2] = tsk->conn_instance;
                      break;
              default:
                      has_name = 0;
              }
              if (has_name) {
                      res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
                      if (res)
                              return res;
              }
      
              return 0;
      }
      
      static void tipc_sk_send_ack(struct tipc_sock *tsk)
      {
              struct sock *sk = &tsk->sk;
   53         struct net *net = sock_net(sk);
              struct sk_buff *skb = NULL;
              struct tipc_msg *msg;
              u32 peer_port = tsk_peer_port(tsk);
              u32 dnode = tsk_peer_node(tsk);
      
   53         if (!tipc_sk_connected(sk))
                      return;
              skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
                                    dnode, tsk_own_node(tsk), peer_port,
                                    tsk->portid, TIPC_OK);
              if (!skb)
                      return;
   52         msg = buf_msg(skb);
              msg_set_conn_ack(msg, tsk->rcv_unacked);
              tsk->rcv_unacked = 0;
      
              /* Adjust to and advertize the correct window limit */
              if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
   52                 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
                      msg_set_adv_win(msg, tsk->rcv_win);
              }
   52         tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
      }
      
      static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
      {
              struct sock *sk = sock->sk;
   89         DEFINE_WAIT_FUNC(wait, woken_wake_function);
              long timeo = *timeop;
              int err = sock_error(sk);
      
              if (err)
                      return err;
      
              for (;;) {
   89                 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
   63                         if (sk->sk_shutdown & RCV_SHUTDOWN) {
                                      err = -ENOTCONN;
                                      break;
                              }
   63                         add_wait_queue(sk_sleep(sk), &wait);
                              release_sock(sk);
                              timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
                              sched_annotate_sleep();
                              lock_sock(sk);
                              remove_wait_queue(sk_sleep(sk), &wait);
                      }
                      err = 0;
    6                 if (!skb_queue_empty(&sk->sk_receive_queue))
                              break;
                      err = -EAGAIN;
   13                 if (!timeo)
                              break;
   13                 err = sock_intr_errno(timeo);
   13                 if (signal_pending(current))
                              break;
      
   13                 err = sock_error(sk);
                      if (err)
                              break;
              }
   88         *timeop = timeo;
   88         return err;
      }
      
      /**
       * tipc_recvmsg - receive packet-oriented message
       * @m: descriptor for message info
       * @buflen: length of user buffer area
       * @flags: receive flags
       *
       * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
       * If the complete message doesn't fit in user area, truncate it.
       *
       * Returns size of returned message data, errno otherwise
       */
      static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
                              size_t buflen,        int flags)
      {
   25         struct sock *sk = sock->sk;
   14         bool connected = !tipc_sk_type_connectionless(sk);
              struct tipc_sock *tsk = tipc_sk(sk);
              int rc, err, hlen, dlen, copy;
              struct sk_buff_head xmitq;
              struct tipc_msg *hdr;
              struct sk_buff *skb;
              bool grp_evt;
              long timeout;
      
              /* Catch invalid receive requests */
   25         if (unlikely(!buflen))
                      return -EINVAL;
      
   23         lock_sock(sk);
              if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
                      rc = -ENOTCONN;
                      goto exit;
              }
   23         timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
      
              /* Step rcv queue to first msg with data or error; wait if necessary */
              do {
   23                 rc = tipc_wait_for_rcvmsg(sock, &timeout);
                      if (unlikely(rc))
                              goto exit;
   22                 skb = skb_peek(&sk->sk_receive_queue);
   22                 hdr = buf_msg(skb);
                      dlen = msg_data_sz(hdr);
   20                 hlen = msg_hdr_sz(hdr);
                      err = msg_errcode(hdr);
   17                 grp_evt = msg_is_grp_evt(hdr);
                      if (likely(dlen || err))
                              break;
    3                 tsk_advance_rx_queue(sk);
              } while (1);
      
              /* Collect msg meta data, including error code and rejected data */
   21         tipc_sk_set_orig_addr(m, skb);
   14         rc = tipc_sk_anc_data_recv(m, skb, tsk);
              if (unlikely(rc))
                      goto exit;
              hdr = buf_msg(skb);
      
              /* Capture data if non-error msg, otherwise just set return value */
              if (likely(!err)) {
   10                 copy = min_t(int, dlen, buflen);
                      if (unlikely(copy != dlen))
    2                         m->msg_flags |= MSG_TRUNC;
   10                 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
              } else {
                      copy = 0;
                      rc = 0;
   10                 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
                              rc = -ECONNRESET;
              }
              if (unlikely(rc))
                      goto exit;
      
              /* Mark message as group event if applicable */
              if (unlikely(grp_evt)) {
                      if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
                              m->msg_flags |= MSG_EOR;
                      m->msg_flags |= MSG_OOB;
                      copy = 0;
              }
      
              /* Caption of data or error code/rejected data was successful */
   17         if (unlikely(flags & MSG_PEEK))
                      goto exit;
      
              /* Send group flow control advertisement when applicable */
   16         if (tsk->group && msg_in_group(hdr) && !grp_evt) {
                      skb_queue_head_init(&xmitq);
                      tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
                                                msg_orignode(hdr), msg_origport(hdr),
                                                &xmitq);
                      tipc_node_distr_xmit(sock_net(sk), &xmitq);
              }
      
   16         tsk_advance_rx_queue(sk);
      
              if (likely(!connected))
                      goto exit;
      
              /* Send connection flow control advertisement when applicable */
   12         tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
              if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
    2                 tipc_sk_send_ack(tsk);
      exit:
   19         release_sock(sk);
              return rc ? rc : copy;
      }
      
      /**
       * tipc_recvstream - receive stream-oriented data
       * @m: descriptor for message info
       * @buflen: total size of user buffer area
       * @flags: receive flags
       *
       * Used for SOCK_STREAM messages only.  If not enough data is available
       * will optionally wait for more; never truncates data.
       *
       * Returns size of returned message data, errno otherwise
       */
      static int tipc_recvstream(struct socket *sock, struct msghdr *m,
                                 size_t buflen, int flags)
      {
   66         struct sock *sk = sock->sk;
              struct tipc_sock *tsk = tipc_sk(sk);
              struct sk_buff *skb;
              struct tipc_msg *hdr;
              struct tipc_skb_cb *skb_cb;
              bool peek = flags & MSG_PEEK;
              int offset, required, copy, copied = 0;
              int hlen, dlen, err, rc;
              long timeout;
      
              /* Catch invalid receive attempts */
              if (unlikely(!buflen))
                      return -EINVAL;
      
   66         lock_sock(sk);
      
              if (unlikely(sk->sk_state == TIPC_OPEN)) {
                      rc = -ENOTCONN;
                      goto exit;
              }
   66         required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
   66         timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
      
              do {
                      /* Look at first msg in receive queue; wait if necessary */
   66                 rc = tipc_wait_for_rcvmsg(sock, &timeout);
                      if (unlikely(rc))
                              break;
   65                 skb = skb_peek(&sk->sk_receive_queue);
                      skb_cb = TIPC_SKB_CB(skb);
   65                 hdr = buf_msg(skb);
                      dlen = msg_data_sz(hdr);
                      hlen = msg_hdr_sz(hdr);
                      err = msg_errcode(hdr);
      
                      /* Discard any empty non-errored (SYN-) message */
                      if (unlikely(!dlen && !err)) {
    1                         tsk_advance_rx_queue(sk);
                              continue;
                      }
      
                      /* Collect msg meta data, incl. error code and rejected data */
   65                 if (!copied) {
   65                         tipc_sk_set_orig_addr(m, skb);
   60                         rc = tipc_sk_anc_data_recv(m, skb, tsk);
                              if (rc)
                                      break;
                              hdr = buf_msg(skb);
                      }
      
                      /* Copy data if msg ok, otherwise return error/partial data */
   64                 if (likely(!err)) {
   55                         offset = skb_cb->bytes_read;
                              copy = min_t(int, dlen - offset, buflen - copied);
                              rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
                              if (unlikely(rc))
                                      break;
   55                         copied += copy;
                              offset += copy;
                              if (unlikely(offset < dlen)) {
    5                                 if (!peek)
    4                                         skb_cb->bytes_read = offset;
                                      break;
                              }
                      } else {
                              rc = 0;
   16                         if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
                                      rc = -ECONNRESET;
   15                         if (copied || rc)
                                      break;
                      }
      
   62                 if (unlikely(peek))
                              break;
      
   61                 tsk_advance_rx_queue(sk);
      
                      /* Send connection flow control advertisement when applicable */
   61                 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
                      if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
   51                         tipc_sk_send_ack(tsk);
      
                      /* Exit if all requested data or FIN/error received */
   60                 if (copied == buflen || err)
                              break;
      
   52         } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
      exit:
   31         release_sock(sk);
   33         return copied ? copied : rc;
      }
      
      /**
       * tipc_write_space - wake up thread if port congestion is released
       * @sk: socket
       */
      static void tipc_write_space(struct sock *sk)
      {
              struct socket_wq *wq;
      
  139         rcu_read_lock();
  139         wq = rcu_dereference(sk->sk_wq);
  139         if (skwq_has_sleeper(wq))
   57                 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
                                                      EPOLLWRNORM | EPOLLWRBAND);
  139         rcu_read_unlock();
      }
      
      /**
       * tipc_data_ready - wake up threads to indicate messages have been received
       * @sk: socket
       * @len: the length of messages
       */
      static void tipc_data_ready(struct sock *sk)
      {
              struct socket_wq *wq;
      
  160         rcu_read_lock();
  160         wq = rcu_dereference(sk->sk_wq);
  160         if (skwq_has_sleeper(wq))
   62                 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
                                                      EPOLLRDNORM | EPOLLRDBAND);
  160         rcu_read_unlock();
      }
      
      static void tipc_sock_destruct(struct sock *sk)
      {
              __skb_queue_purge(&sk->sk_receive_queue);
      }
      
      static void tipc_sk_proto_rcv(struct sock *sk,
                                    struct sk_buff_head *inputq,
                                    struct sk_buff_head *xmitq)
      {
  167         struct sk_buff *skb = __skb_dequeue(inputq);
              struct tipc_sock *tsk = tipc_sk(sk);
  166         struct tipc_msg *hdr = buf_msg(skb);
              struct tipc_group *grp = tsk->group;
              bool wakeup = false;
      
              switch (msg_user(hdr)) {
              case CONN_MANAGER:
   60                 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
                      return;
              case SOCK_WAKEUP:
                      tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
                      /* coupled with smp_rmb() in tipc_wait_for_cond() */
                      smp_wmb();
                      tsk->cong_link_cnt--;
                      wakeup = true;
                      break;
              case GROUP_PROTOCOL:
   88                 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
                      break;
              case TOP_SRV:
   95                 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
                                            hdr, inputq, xmitq);
                      break;
              default:
                      break;
              }
      
  106         if (wakeup)
   80                 sk->sk_write_space(sk);
      
  106         kfree_skb(skb);
      }
      
      /**
       * tipc_sk_filter_connect - check incoming message for a connection-based socket
       * @tsk: TIPC socket
       * @skb: pointer to message buffer.
       * Returns true if message should be added to receive queue, false otherwise
       */
      static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
      {
  187         struct sock *sk = &tsk->sk;
              struct net *net = sock_net(sk);
              struct tipc_msg *hdr = buf_msg(skb);
              bool con_msg = msg_connected(hdr);
              u32 pport = tsk_peer_port(tsk);
              u32 pnode = tsk_peer_node(tsk);
  187         u32 oport = msg_origport(hdr);
  187         u32 onode = msg_orignode(hdr);
  187         int err = msg_errcode(hdr);
              unsigned long delay;
      
  174         if (unlikely(msg_mcast(hdr)))
                      return false;
      
  163         switch (sk->sk_state) {
              case TIPC_CONNECTING:
                      /* Setup ACK */
                      if (likely(con_msg)) {
                              if (err)
                                      break;
                              tipc_sk_finish_conn(tsk, oport, onode);
                              msg_set_importance(&tsk->phdr, msg_importance(hdr));
                              /* ACK+ message with data is added to receive queue */
                              if (msg_data_sz(hdr))
                                      return true;
                              /* Empty ACK-, - wake up sleeping connect() and drop */
                              sk->sk_state_change(sk);
                              msg_set_dest_droppable(hdr, 1);
                              return false;
                      }
                      /* Ignore connectionless message if not from listening socket */
                      if (oport != pport || onode != pnode)
                              return false;
      
                      /* Rejected SYN */
                      if (err != TIPC_ERR_OVERLOAD)
                              break;
      
                      /* Prepare for new setup attempt if we have a SYN clone */
                      if (skb_queue_empty(&sk->sk_write_queue))
                              break;
                      get_random_bytes(&delay, 2);
                      delay %= (tsk->conn_timeout / 4);
                      delay = msecs_to_jiffies(delay + 100);
                      sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
                      return false;
              case TIPC_OPEN:
              case TIPC_DISCONNECTING:
                      return false;
              case TIPC_LISTEN:
                      /* Accept only SYN message */
                      if (!msg_is_syn(hdr) &&
    6                     tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
                              return false;
   92                 if (!con_msg && !err)
                              return true;
                      return false;
              case TIPC_ESTABLISHED:
                      /* Accept only connection-based messages sent by peer */
  148                 if (likely(con_msg && !err && pport == oport && pnode == onode))
                              return true;
   90                 if (!tsk_peer_msg(tsk, hdr))
                              return false;
                      if (!err)
                              return true;
   90                 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
   90                 tipc_node_remove_conn(net, pnode, tsk->portid);
                      sk->sk_state_change(sk);
                      return true;
              default:
                      pr_err("Unknown sk_state %u\n", sk->sk_state);
              }
              /* Abort connection setup attempt */
              tipc_set_sk_state(sk, TIPC_DISCONNECTING);
              sk->sk_err = ECONNREFUSED;
              sk->sk_state_change(sk);
              return true;
      }
      
      /**
       * rcvbuf_limit - get proper overload limit of socket receive queue
       * @sk: socket
       * @skb: message
       *
       * For connection oriented messages, irrespective of importance,
       * default queue limit is 2 MB.
       *
       * For connectionless messages, queue limits are based on message
       * importance as follows:
       *
       * TIPC_LOW_IMPORTANCE       (2 MB)
       * TIPC_MEDIUM_IMPORTANCE    (4 MB)
       * TIPC_HIGH_IMPORTANCE      (8 MB)
       * TIPC_CRITICAL_IMPORTANCE  (16 MB)
       *
       * Returns overload limit according to corresponding message importance
       */
      static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
      {
              struct tipc_sock *tsk = tipc_sk(sk);
  204         struct tipc_msg *hdr = buf_msg(skb);
      
              if (unlikely(msg_in_group(hdr)))
  204                 return sk->sk_rcvbuf;
      
  201         if (unlikely(!msg_connected(hdr)))
  147                 return sk->sk_rcvbuf << msg_importance(hdr);
      
  153         if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
                      return sk->sk_rcvbuf;
      
              return FLOWCTL_MSG_LIM;
      }
      
      /**
       * tipc_sk_filter_rcv - validate incoming message
       * @sk: socket
       * @skb: pointer to message.
       *
       * Enqueues message on receive queue if acceptable; optionally handles
       * disconnect indication for a connected socket.
       *
       * Called with socket lock already taken
       *
       */
      static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
                                     struct sk_buff_head *xmitq)
      {
  225         bool sk_conn = !tipc_sk_type_connectionless(sk);
              struct tipc_sock *tsk = tipc_sk(sk);
              struct tipc_group *grp = tsk->group;
              struct tipc_msg *hdr = buf_msg(skb);
              struct net *net = sock_net(sk);
              struct sk_buff_head inputq;
  182         int mtyp = msg_type(hdr);
              int limit, err = TIPC_OK;
      
  225         trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
  225         TIPC_SKB_CB(skb)->bytes_read = 0;
              __skb_queue_head_init(&inputq);
              __skb_queue_tail(&inputq, skb);
      
              if (unlikely(!msg_isdata(hdr)))
  167                 tipc_sk_proto_rcv(sk, &inputq, xmitq);
      
  225         if (unlikely(grp))
  110                 tipc_group_filter_msg(grp, &inputq, xmitq);
      
              if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
   14                 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
      
              /* Validate and add to receive buffer if there is space */
  225         while ((skb = __skb_dequeue(&inputq))) {
                      hdr = buf_msg(skb);
                      limit = rcvbuf_limit(sk, skb);
  187                 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
  160                     (!sk_conn && msg_connected(hdr)) ||
  160                     (!grp && msg_in_group(hdr)))
                              err = TIPC_ERR_NO_PORT;
  160                 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
                              trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
                                                 "err_overload2!");
                              atomic_inc(&sk->sk_drops);
                              err = TIPC_ERR_OVERLOAD;
                      }
      
                      if (unlikely(err)) {
   92                         if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
    1                                 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
                                                            "@filter_rcv!");
    1                                 __skb_queue_tail(xmitq, skb);
                              }
                              err = TIPC_OK;
                              continue;
                      }
  160                 __skb_queue_tail(&sk->sk_receive_queue, skb);
  160                 skb_set_owner_r(skb, sk);
  160                 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
                                               "rcvq >90% allocated!");
  160                 sk->sk_data_ready(sk);
              }
      }
      
      /**
       * tipc_sk_backlog_rcv - handle incoming message from backlog queue
       * @sk: socket
       * @skb: message
       *
       * Caller must hold socket lock
       */
      static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
      {
  159         unsigned int before = sk_rmem_alloc_get(sk);
              struct sk_buff_head xmitq;
              unsigned int added;
      
              __skb_queue_head_init(&xmitq);
      
              tipc_sk_filter_rcv(sk, skb, &xmitq);
              added = sk_rmem_alloc_get(sk) - before;
              atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
      
              /* Send pending response/rejected messages, if any */
              tipc_node_distr_xmit(sock_net(sk), &xmitq);
              return 0;
      }
      
      /**
       * tipc_sk_enqueue - extract all buffers with destination 'dport' from
       *                   inputq and try adding them to socket or backlog queue
       * @inputq: list of incoming buffers with potentially different destinations
       * @sk: socket where the buffers should be enqueued
       * @dport: port number for the socket
       *
       * Caller must hold socket lock
       */
      static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
                                  u32 dport, struct sk_buff_head *xmitq)
      {
  216         unsigned long time_limit = jiffies + 2;
              struct sk_buff *skb;
              unsigned int lim;
              atomic_t *dcnt;
              u32 onode;
      
  216         while (skb_queue_len(inputq)) {
  216                 if (unlikely(time_after_eq(jiffies, time_limit)))
                              return;
      
  216                 skb = tipc_skb_dequeue(inputq, dport);
                      if (unlikely(!skb))
                              return;
      
                      /* Add message directly to receive queue if possible */
  216                 if (!sock_owned_by_user(sk)) {
  212                         tipc_sk_filter_rcv(sk, skb, xmitq);
                              continue;
                      }
      
                      /* Try backlog, compensating for double-counted bytes */
  132                 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
                      if (!sk->sk_backlog.len)
  114                         atomic_set(dcnt, 0);
  132                 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
  132                 if (likely(!sk_add_backlog(sk, skb, lim))) {
  132                         trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
                                                       "bklg & rcvq >90% allocated!");
                              continue;
                      }
      
                      trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
                      /* Overload => reject message back to sender */
                      onode = tipc_own_addr(sock_net(sk));
                      atomic_inc(&sk->sk_drops);
                      if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
                              trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
                                                    "@sk_enqueue!");
                              __skb_queue_tail(xmitq, skb);
                      }
                      break;
              }
      }
      
      /**
       * tipc_sk_rcv - handle a chain of incoming buffers
       * @inputq: buffer list containing the buffers
       * Consumes all buffers in list until inputq is empty
       * Note: may be called in multiple threads referring to the same queue
       */
      void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
      {
              struct sk_buff_head xmitq;
              u32 dnode, dport = 0;
              int err;
              struct tipc_sock *tsk;
              struct sock *sk;
              struct sk_buff *skb;
      
  221         __skb_queue_head_init(&xmitq);
  221         while (skb_queue_len(inputq)) {
  221                 dport = tipc_skb_peek_port(inputq, dport);
                      tsk = tipc_sk_lookup(net, dport);
      
                      if (likely(tsk)) {
  216                         sk = &tsk->sk;
                              if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
  216                                 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
                                      spin_unlock_bh(&sk->sk_lock.slock);
                              }
                              /* Send pending response/rejected messages, if any */
  215                         tipc_node_distr_xmit(sock_net(sk), &xmitq);
                              sock_put(sk);
                              continue;
                      }
                      /* No destination socket => dequeue skb if still there */
    8                 skb = tipc_skb_dequeue(inputq, dport);
                      if (!skb)
  220                         return;
      
                      /* Try secondary lookup if unresolved named message */
    8                 err = TIPC_ERR_NO_PORT;
                      if (tipc_msg_lookup_dest(net, skb, &err))
                              goto xmit;
      
                      /* Prepare for message rejection */
    8                 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
                              continue;
      
    3                 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
      xmit:
    3                 dnode = msg_destnode(buf_msg(skb));
                      tipc_node_xmit_skb(net, skb, dnode, dport);
              }
      }
      
      static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
      {
              DEFINE_WAIT_FUNC(wait, woken_wake_function);
              struct sock *sk = sock->sk;
              int done;
      
              do {
                      int err = sock_error(sk);
                      if (err)
                              return err;
                      if (!*timeo_p)
                              return -ETIMEDOUT;
                      if (signal_pending(current))
                              return sock_intr_errno(*timeo_p);
      
                      add_wait_queue(sk_sleep(sk), &wait);
                      done = sk_wait_event(sk, timeo_p,
                                           sk->sk_state != TIPC_CONNECTING, &wait);
                      remove_wait_queue(sk_sleep(sk), &wait);
              } while (!done);
              return 0;
      }
      
      static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
      {
              if (addr->family != AF_TIPC)
                      return false;
              if (addr->addrtype == TIPC_SERVICE_RANGE)
                      return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
              return (addr->addrtype == TIPC_SERVICE_ADDR ||
                      addr->addrtype == TIPC_SOCKET_ADDR);
      }
      
      /**
       * tipc_connect - establish a connection to another TIPC port
       * @sock: socket structure
       * @dest: socket address for destination port
       * @destlen: size of socket address data structure
       * @flags: file-related flags associated with socket
       *
       * Returns 0 on success, errno otherwise
       */
      static int tipc_connect(struct socket *sock, struct sockaddr *dest,
                              int destlen, int flags)
      {
              struct sock *sk = sock->sk;
              struct tipc_sock *tsk = tipc_sk(sk);
              struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
              struct msghdr m = {NULL,};
              long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
              int previous;
              int res = 0;
      
              if (destlen != sizeof(struct sockaddr_tipc))
                      return -EINVAL;
      
              lock_sock(sk);
      
              if (tsk->group) {
                      res = -EINVAL;
                      goto exit;
              }
      
              if (dst->family == AF_UNSPEC) {
                      memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
                      if (!tipc_sk_type_connectionless(sk))
                              res = -EINVAL;
                      goto exit;
              }
              if (!tipc_sockaddr_is_sane(dst)) {
                      res = -EINVAL;
                      goto exit;
              }
              /* DGRAM/RDM connect(), just save the destaddr */
              if (tipc_sk_type_connectionless(sk)) {
                      memcpy(&tsk->peer, dest, destlen);
                      goto exit;
              } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
                      res = -EINVAL;
                      goto exit;
              }
      
              previous = sk->sk_state;
      
              switch (sk->sk_state) {
              case TIPC_OPEN:
                      /* Send a 'SYN-' to destination */
                      m.msg_name = dest;
                      m.msg_namelen = destlen;
      
                      /* If connect is in non-blocking case, set MSG_DONTWAIT to
                       * indicate send_msg() is never blocked.
                       */
                      if (!timeout)
                              m.msg_flags = MSG_DONTWAIT;
      
                      res = __tipc_sendmsg(sock, &m, 0);
                      if ((res < 0) && (res != -EWOULDBLOCK))
                              goto exit;
      
                      /* Just entered TIPC_CONNECTING state; the only
                       * difference is that return value in non-blocking
                       * case is EINPROGRESS, rather than EALREADY.
                       */
                      res = -EINPROGRESS;
                      /* fall through */
              case TIPC_CONNECTING:
                      if (!timeout) {
                              if (previous == TIPC_CONNECTING)
                                      res = -EALREADY;
                              goto exit;
                      }
                      timeout = msecs_to_jiffies(timeout);
                      /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
                      res = tipc_wait_for_connect(sock, &timeout);
                      break;
              case TIPC_ESTABLISHED:
                      res = -EISCONN;
                      break;
              default:
                      res = -EINVAL;
              }
      
      exit:
              release_sock(sk);
              return res;
      }
      
      /**
       * tipc_listen - allow socket to listen for incoming connections
       * @sock: socket structure
       * @len: (unused)
       *
       * Returns 0 on success, errno otherwise
       */
      static int tipc_listen(struct socket *sock, int len)
      {
              struct sock *sk = sock->sk;
              int res;
      
              lock_sock(sk);
              res = tipc_set_sk_state(sk, TIPC_LISTEN);
              release_sock(sk);
      
              return res;
      }
      
      static int tipc_wait_for_accept(struct socket *sock, long timeo)
      {
              struct sock *sk = sock->sk;
              DEFINE_WAIT(wait);
              int err;
      
              /* True wake-one mechanism for incoming connections: only
               * one process gets woken up, not the 'whole herd'.
               * Since we do not 'race & poll' for established sockets
               * anymore, the common case will execute the loop only once.
              */
              for (;;) {
                      prepare_to_wait_exclusive(sk_sleep(sk), &wait,
                                                TASK_INTERRUPTIBLE);
                      if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
                              release_sock(sk);
                              timeo = schedule_timeout(timeo);
                              lock_sock(sk);
                      }
                      err = 0;
                      if (!skb_queue_empty(&sk->sk_receive_queue))
                              break;
                      err = -EAGAIN;
                      if (!timeo)
                              break;
                      err = sock_intr_errno(timeo);
                      if (signal_pending(current))
                              break;
              }
              finish_wait(sk_sleep(sk), &wait);
              return err;
      }
      
      /**
       * tipc_accept - wait for connection request
       * @sock: listening socket
       * @newsock: new socket that is to be connected
       * @flags: file-related flags associated with socket
       *
       * Returns 0 on success, errno otherwise
       */
      static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
                             bool kern)
      {
              struct sock *new_sk, *sk = sock->sk;
              struct sk_buff *buf;
              struct tipc_sock *new_tsock;
              struct tipc_msg *msg;
              long timeo;
              int res;
      
              lock_sock(sk);
      
              if (sk->sk_state != TIPC_LISTEN) {
                      res = -EINVAL;
                      goto exit;
              }
              timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
              res = tipc_wait_for_accept(sock, timeo);
              if (res)
                      goto exit;
      
              buf = skb_peek(&sk->sk_receive_queue);
      
              res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
              if (res)
                      goto exit;
              security_sk_clone(sock->sk, new_sock->sk);
      
              new_sk = new_sock->sk;
              new_tsock = tipc_sk(new_sk);
              msg = buf_msg(buf);
      
              /* we lock on new_sk; but lockdep sees the lock on sk */
              lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
      
              /*
               * Reject any stray messages received by new socket
               * before the socket lock was taken (very, very unlikely)
               */
              tsk_rej_rx_queue(new_sk);
      
              /* Connect new socket to it's peer */
              tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
      
              tsk_set_importance(new_tsock, msg_importance(msg));
              if (msg_named(msg)) {
                      new_tsock->conn_type = msg_nametype(msg);
                      new_tsock->conn_instance = msg_nameinst(msg);
              }
      
              /*
               * Respond to 'SYN-' by discarding it & returning 'ACK'-.
               * Respond to 'SYN+' by queuing it on new socket.
               */
              if (!msg_data_sz(msg)) {
                      struct msghdr m = {NULL,};
      
                      tsk_advance_rx_queue(sk);
                      __tipc_sendstream(new_sock, &m, 0);
              } else {
                      __skb_dequeue(&sk->sk_receive_queue);
                      __skb_queue_head(&new_sk->sk_receive_queue, buf);
                      skb_set_owner_r(buf, new_sk);
              }
              release_sock(new_sk);
      exit:
              release_sock(sk);
              return res;
      }
      
      /**
       * tipc_shutdown - shutdown socket connection
       * @sock: socket structure
       * @how: direction to close (must be SHUT_RDWR)
       *
       * Terminates connection (if necessary), then purges socket's receive queue.
       *
       * Returns 0 on success, errno otherwise
       */
      static int tipc_shutdown(struct socket *sock, int how)
      {
              struct sock *sk = sock->sk;
              int res;
      
              if (how != SHUT_RDWR)
                      return -EINVAL;
      
              lock_sock(sk);
      
              trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
              __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
              sk->sk_shutdown = SEND_SHUTDOWN;
      
              if (sk->sk_state == TIPC_DISCONNECTING) {
                      /* Discard any unreceived messages */
                      __skb_queue_purge(&sk->sk_receive_queue);
      
                      /* Wake up anyone sleeping in poll */
                      sk->sk_state_change(sk);
                      res = 0;
              } else {
                      res = -ENOTCONN;
              }
      
              release_sock(sk);
              return res;
      }
      
      static void tipc_sk_check_probing_state(struct sock *sk,
                                              struct sk_buff_head *list)
      {
              struct tipc_sock *tsk = tipc_sk(sk);
              u32 pnode = tsk_peer_node(tsk);
              u32 pport = tsk_peer_port(tsk);
              u32 self = tsk_own_node(tsk);
              u32 oport = tsk->portid;
              struct sk_buff *skb;
      
              if (tsk->probe_unacked) {
                      tipc_set_sk_state(sk, TIPC_DISCONNECTING);
                      sk->sk_err = ECONNABORTED;
                      tipc_node_remove_conn(sock_net(sk), pnode, pport);
                      sk->sk_state_change(sk);
                      return;
              }
              /* Prepare new probe */
              skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
                                    pnode, self, pport, oport, TIPC_OK);
              if (skb)
                      __skb_queue_tail(list, skb);
              tsk->probe_unacked = true;
              sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
      }
      
      static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
      {
              struct tipc_sock *tsk = tipc_sk(sk);
      
              /* Try again later if dest link is congested */
              if (tsk->cong_link_cnt) {
                      sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
                      return;
              }
              /* Prepare SYN for retransmit */
              tipc_msg_skb_clone(&sk->sk_write_queue, list);
      }
      
      static void tipc_sk_timeout(struct timer_list *t)
      {
              struct sock *sk = from_timer(sk, t, sk_timer);
              struct tipc_sock *tsk = tipc_sk(sk);
              u32 pnode = tsk_peer_node(tsk);
              struct sk_buff_head list;
              int rc = 0;
      
              skb_queue_head_init(&list);
              bh_lock_sock(sk);
      
              /* Try again later if socket is busy */
              if (sock_owned_by_user(sk)) {
                      sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
                      bh_unlock_sock(sk);
                      return;
              }
      
              if (sk->sk_state == TIPC_ESTABLISHED)
                      tipc_sk_check_probing_state(sk, &list);