/* SPDX-License-Identifier: GPL-2.0 */
      /*
       * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
       *
       * (C) SGI 2006, Christoph Lameter
       *         Cleaned up and restructured to ease the addition of alternative
       *         implementations of SLAB allocators.
       * (C) Linux Foundation 2008-2013
       *      Unified interface for all slab allocators
       */
      
      #ifndef _LINUX_SLAB_H
      #define        _LINUX_SLAB_H
      
      #include <linux/gfp.h>
      #include <linux/overflow.h>
      #include <linux/types.h>
      #include <linux/workqueue.h>
      #include <linux/percpu-refcount.h>
      
      
      /*
       * Flags to pass to kmem_cache_create().
       * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
       */
      /* DEBUG: Perform (expensive) checks on alloc/free */
      #define SLAB_CONSISTENCY_CHECKS        ((slab_flags_t __force)0x00000100U)
      /* DEBUG: Red zone objs in a cache */
      #define SLAB_RED_ZONE                ((slab_flags_t __force)0x00000400U)
      /* DEBUG: Poison objects */
      #define SLAB_POISON                ((slab_flags_t __force)0x00000800U)
      /* Align objs on cache lines */
      #define SLAB_HWCACHE_ALIGN        ((slab_flags_t __force)0x00002000U)
      /* Use GFP_DMA memory */
      #define SLAB_CACHE_DMA                ((slab_flags_t __force)0x00004000U)
      /* Use GFP_DMA32 memory */
      #define SLAB_CACHE_DMA32        ((slab_flags_t __force)0x00008000U)
      /* DEBUG: Store the last owner for bug hunting */
      #define SLAB_STORE_USER                ((slab_flags_t __force)0x00010000U)
      /* Panic if kmem_cache_create() fails */
      #define SLAB_PANIC                ((slab_flags_t __force)0x00040000U)
      /*
       * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
       *
       * This delays freeing the SLAB page by a grace period, it does _NOT_
       * delay object freeing. This means that if you do kmem_cache_free()
       * that memory location is free to be reused at any time. Thus it may
       * be possible to see another object there in the same RCU grace period.
       *
       * This feature only ensures the memory location backing the object
       * stays valid, the trick to using this is relying on an independent
       * object validation pass. Something like:
       *
       *  rcu_read_lock()
       * again:
       *  obj = lockless_lookup(key);
       *  if (obj) {
       *    if (!try_get_ref(obj)) // might fail for free objects
       *      goto again;
       *
       *    if (obj->key != key) { // not the object we expected
       *      put_ref(obj);
       *      goto again;
       *    }
       *  }
       *  rcu_read_unlock();
       *
       * This is useful if we need to approach a kernel structure obliquely,
       * from its address obtained without the usual locking. We can lock
       * the structure to stabilize it and check it's still at the given address,
       * only if we can be sure that the memory has not been meanwhile reused
       * for some other kind of object (which our subsystem's lock might corrupt).
       *
       * rcu_read_lock before reading the address, then rcu_read_unlock after
       * taking the spinlock within the structure expected at that address.
       *
       * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
       */
      /* Defer freeing slabs to RCU */
      #define SLAB_TYPESAFE_BY_RCU        ((slab_flags_t __force)0x00080000U)
      /* Spread some memory over cpuset */
      #define SLAB_MEM_SPREAD                ((slab_flags_t __force)0x00100000U)
      /* Trace allocations and frees */
      #define SLAB_TRACE                ((slab_flags_t __force)0x00200000U)
      
      /* Flag to prevent checks on free */
      #ifdef CONFIG_DEBUG_OBJECTS
      # define SLAB_DEBUG_OBJECTS        ((slab_flags_t __force)0x00400000U)
      #else
      # define SLAB_DEBUG_OBJECTS        0
      #endif
      
      /* Avoid kmemleak tracing */
      #define SLAB_NOLEAKTRACE        ((slab_flags_t __force)0x00800000U)
      
      /* Fault injection mark */
      #ifdef CONFIG_FAILSLAB
      # define SLAB_FAILSLAB                ((slab_flags_t __force)0x02000000U)
      #else
      # define SLAB_FAILSLAB                0
      #endif
      /* Account to memcg */
      #ifdef CONFIG_MEMCG_KMEM
      # define SLAB_ACCOUNT                ((slab_flags_t __force)0x04000000U)
      #else
      # define SLAB_ACCOUNT                0
      #endif
      
      #ifdef CONFIG_KASAN
      #define SLAB_KASAN                ((slab_flags_t __force)0x08000000U)
      #else
      #define SLAB_KASAN                0
      #endif
      
      /* The following flags affect the page allocator grouping pages by mobility */
      /* Objects are reclaimable */
      #define SLAB_RECLAIM_ACCOUNT        ((slab_flags_t __force)0x00020000U)
      #define SLAB_TEMPORARY                SLAB_RECLAIM_ACCOUNT        /* Objects are short-lived */
      
      /* Slab deactivation flag */
      #define SLAB_DEACTIVATED        ((slab_flags_t __force)0x10000000U)
      
      /*
       * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
       *
       * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
       *
       * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
       * Both make kfree a no-op.
       */
      #define ZERO_SIZE_PTR ((void *)16)
      
      #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
                                      (unsigned long)ZERO_SIZE_PTR)
      
      #include <linux/kasan.h>
      
      struct mem_cgroup;
      /*
       * struct kmem_cache related prototypes
       */
      void __init kmem_cache_init(void);
      bool slab_is_available(void);
      
      extern bool usercopy_fallback;
      
      struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
                              unsigned int align, slab_flags_t flags,
                              void (*ctor)(void *));
      struct kmem_cache *kmem_cache_create_usercopy(const char *name,
                              unsigned int size, unsigned int align,
                              slab_flags_t flags,
                              unsigned int useroffset, unsigned int usersize,
                              void (*ctor)(void *));
      void kmem_cache_destroy(struct kmem_cache *);
      int kmem_cache_shrink(struct kmem_cache *);
      
      void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
      void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
      
      /*
       * Please use this macro to create slab caches. Simply specify the
       * name of the structure and maybe some flags that are listed above.
       *
       * The alignment of the struct determines object alignment. If you
       * f.e. add ____cacheline_aligned_in_smp to the struct declaration
       * then the objects will be properly aligned in SMP configurations.
       */
      #define KMEM_CACHE(__struct, __flags)                                        \
                      kmem_cache_create(#__struct, sizeof(struct __struct),        \
                              __alignof__(struct __struct), (__flags), NULL)
      
      /*
       * To whitelist a single field for copying to/from usercopy, use this
       * macro instead for KMEM_CACHE() above.
       */
      #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)                        \
                      kmem_cache_create_usercopy(#__struct,                        \
                              sizeof(struct __struct),                        \
                              __alignof__(struct __struct), (__flags),        \
                              offsetof(struct __struct, __field),                \
                              sizeof_field(struct __struct, __field), NULL)
      
      /*
       * Common kmalloc functions provided by all allocators
       */
      void * __must_check krealloc(const void *, size_t, gfp_t);
      void kfree(const void *);
      void kzfree(const void *);
      size_t __ksize(const void *);
      size_t ksize(const void *);
      
      #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
      void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
                              bool to_user);
      #else
      static inline void __check_heap_object(const void *ptr, unsigned long n,
                                             struct page *page, bool to_user) { }
      #endif
      
      /*
       * Some archs want to perform DMA into kmalloc caches and need a guaranteed
       * alignment larger than the alignment of a 64-bit integer.
       * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
       */
      #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
      #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
      #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
      #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
      #else
      #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
      #endif
      
      /*
       * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
       * Intended for arches that get misalignment faults even for 64 bit integer
       * aligned buffers.
       */
      #ifndef ARCH_SLAB_MINALIGN
      #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
      #endif
      
      /*
       * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
       * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
       * aligned pointers.
       */
      #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
      #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
      #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
      
      /*
       * Kmalloc array related definitions
       */
      
      #ifdef CONFIG_SLAB
      /*
       * The largest kmalloc size supported by the SLAB allocators is
       * 32 megabyte (2^25) or the maximum allocatable page order if that is
       * less than 32 MB.
       *
       * WARNING: Its not easy to increase this value since the allocators have
       * to do various tricks to work around compiler limitations in order to
       * ensure proper constant folding.
       */
      #define KMALLOC_SHIFT_HIGH        ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
                                      (MAX_ORDER + PAGE_SHIFT - 1) : 25)
      #define KMALLOC_SHIFT_MAX        KMALLOC_SHIFT_HIGH
      #ifndef KMALLOC_SHIFT_LOW
      #define KMALLOC_SHIFT_LOW        5
      #endif
      #endif
      
      #ifdef CONFIG_SLUB
      /*
       * SLUB directly allocates requests fitting in to an order-1 page
       * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
       */
      #define KMALLOC_SHIFT_HIGH        (PAGE_SHIFT + 1)
      #define KMALLOC_SHIFT_MAX        (MAX_ORDER + PAGE_SHIFT - 1)
      #ifndef KMALLOC_SHIFT_LOW
      #define KMALLOC_SHIFT_LOW        3
      #endif
      #endif
      
      #ifdef CONFIG_SLOB
      /*
       * SLOB passes all requests larger than one page to the page allocator.
       * No kmalloc array is necessary since objects of different sizes can
       * be allocated from the same page.
       */
      #define KMALLOC_SHIFT_HIGH        PAGE_SHIFT
      #define KMALLOC_SHIFT_MAX        (MAX_ORDER + PAGE_SHIFT - 1)
      #ifndef KMALLOC_SHIFT_LOW
      #define KMALLOC_SHIFT_LOW        3
      #endif
      #endif
      
      /* Maximum allocatable size */
      #define KMALLOC_MAX_SIZE        (1UL << KMALLOC_SHIFT_MAX)
      /* Maximum size for which we actually use a slab cache */
      #define KMALLOC_MAX_CACHE_SIZE        (1UL << KMALLOC_SHIFT_HIGH)
      /* Maximum order allocatable via the slab allocagtor */
      #define KMALLOC_MAX_ORDER        (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
      
      /*
       * Kmalloc subsystem.
       */
      #ifndef KMALLOC_MIN_SIZE
      #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
      #endif
      
      /*
       * This restriction comes from byte sized index implementation.
       * Page size is normally 2^12 bytes and, in this case, if we want to use
       * byte sized index which can represent 2^8 entries, the size of the object
       * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
       * If minimum size of kmalloc is less than 16, we use it as minimum object
       * size and give up to use byte sized index.
       */
      #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                                     (KMALLOC_MIN_SIZE) : 16)
      
      /*
       * Whenever changing this, take care of that kmalloc_type() and
       * create_kmalloc_caches() still work as intended.
       */
      enum kmalloc_cache_type {
              KMALLOC_NORMAL = 0,
              KMALLOC_RECLAIM,
      #ifdef CONFIG_ZONE_DMA
              KMALLOC_DMA,
      #endif
              NR_KMALLOC_TYPES
      };
      
      #ifndef CONFIG_SLOB
      extern struct kmem_cache *
      kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
      
      static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
      {
      #ifdef CONFIG_ZONE_DMA
              /*
               * The most common case is KMALLOC_NORMAL, so test for it
               * with a single branch for both flags.
               */
 1887         if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
                      return KMALLOC_NORMAL;
      
              /*
               * At least one of the flags has to be set. If both are, __GFP_DMA
               * is more important.
               */
              return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
      #else
              return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
      #endif
      }
      
      /*
       * Figure out which kmalloc slab an allocation of a certain size
       * belongs to.
       * 0 = zero alloc
       * 1 =  65 .. 96 bytes
       * 2 = 129 .. 192 bytes
       * n = 2^(n-1)+1 .. 2^n
       */
      static __always_inline unsigned int kmalloc_index(size_t size)
      {
              if (!size)
                      return 0;
      
              if (size <= KMALLOC_MIN_SIZE)
                      return KMALLOC_SHIFT_LOW;
      
              if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
                      return 1;
              if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
                      return 2;
              if (size <=          8) return 3;
              if (size <=         16) return 4;
              if (size <=         32) return 5;
              if (size <=         64) return 6;
              if (size <=        128) return 7;
              if (size <=        256) return 8;
              if (size <=        512) return 9;
              if (size <=       1024) return 10;
              if (size <=   2 * 1024) return 11;
              if (size <=   4 * 1024) return 12;
              if (size <=   8 * 1024) return 13;
              if (size <=  16 * 1024) return 14;
              if (size <=  32 * 1024) return 15;
              if (size <=  64 * 1024) return 16;
              if (size <= 128 * 1024) return 17;
              if (size <= 256 * 1024) return 18;
              if (size <= 512 * 1024) return 19;
              if (size <= 1024 * 1024) return 20;
              if (size <=  2 * 1024 * 1024) return 21;
              if (size <=  4 * 1024 * 1024) return 22;
              if (size <=  8 * 1024 * 1024) return 23;
              if (size <=  16 * 1024 * 1024) return 24;
              if (size <=  32 * 1024 * 1024) return 25;
              if (size <=  64 * 1024 * 1024) return 26;
              BUG();
      
              /* Will never be reached. Needed because the compiler may complain */
              return -1;
      }
      #endif /* !CONFIG_SLOB */
      
      void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
      void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
      void kmem_cache_free(struct kmem_cache *, void *);
      
      /*
       * Bulk allocation and freeing operations. These are accelerated in an
       * allocator specific way to avoid taking locks repeatedly or building
       * metadata structures unnecessarily.
       *
       * Note that interrupts must be enabled when calling these functions.
       */
      void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
      int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
      
      /*
       * Caller must not use kfree_bulk() on memory not originally allocated
       * by kmalloc(), because the SLOB allocator cannot handle this.
       */
      static __always_inline void kfree_bulk(size_t size, void **p)
      {
              kmem_cache_free_bulk(NULL, size, p);
      }
      
      #ifdef CONFIG_NUMA
      void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
      void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
      #else
      static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
      {
              return __kmalloc(size, flags);
      }
      
      static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
      {
              return kmem_cache_alloc(s, flags);
      }
      #endif
      
      #ifdef CONFIG_TRACING
      extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
      
      #ifdef CONFIG_NUMA
      extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
                                                 gfp_t gfpflags,
                                                 int node, size_t size) __assume_slab_alignment __malloc;
      #else
      static __always_inline void *
      kmem_cache_alloc_node_trace(struct kmem_cache *s,
                                    gfp_t gfpflags,
                                    int node, size_t size)
      {
              return kmem_cache_alloc_trace(s, gfpflags, size);
      }
      #endif /* CONFIG_NUMA */
      
      #else /* CONFIG_TRACING */
      static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
                      gfp_t flags, size_t size)
      {
              void *ret = kmem_cache_alloc(s, flags);
      
              ret = kasan_kmalloc(s, ret, size, flags);
              return ret;
      }
      
      static __always_inline void *
      kmem_cache_alloc_node_trace(struct kmem_cache *s,
                                    gfp_t gfpflags,
                                    int node, size_t size)
      {
              void *ret = kmem_cache_alloc_node(s, gfpflags, node);
      
              ret = kasan_kmalloc(s, ret, size, gfpflags);
              return ret;
      }
      #endif /* CONFIG_TRACING */
      
      extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
      
      #ifdef CONFIG_TRACING
      extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
      #else
      static __always_inline void *
      kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
      {
              return kmalloc_order(size, flags, order);
      }
      #endif
      
      static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
      {
              unsigned int order = get_order(size);
              return kmalloc_order_trace(size, flags, order);
      }
      
      /**
       * kmalloc - allocate memory
       * @size: how many bytes of memory are required.
       * @flags: the type of memory to allocate.
       *
       * kmalloc is the normal method of allocating memory
       * for objects smaller than page size in the kernel.
       *
       * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
       * bytes. For @size of power of two bytes, the alignment is also guaranteed
       * to be at least to the size.
       *
       * The @flags argument may be one of the GFP flags defined at
       * include/linux/gfp.h and described at
       * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
       *
       * The recommended usage of the @flags is described at
       * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
       *
       * Below is a brief outline of the most useful GFP flags
       *
       * %GFP_KERNEL
       *        Allocate normal kernel ram. May sleep.
       *
       * %GFP_NOWAIT
       *        Allocation will not sleep.
       *
       * %GFP_ATOMIC
       *        Allocation will not sleep.  May use emergency pools.
       *
       * %GFP_HIGHUSER
       *        Allocate memory from high memory on behalf of user.
       *
       * Also it is possible to set different flags by OR'ing
       * in one or more of the following additional @flags:
       *
       * %__GFP_HIGH
       *        This allocation has high priority and may use emergency pools.
       *
       * %__GFP_NOFAIL
       *        Indicate that this allocation is in no way allowed to fail
       *        (think twice before using).
       *
       * %__GFP_NORETRY
       *        If memory is not immediately available,
       *        then give up at once.
       *
       * %__GFP_NOWARN
       *        If allocation fails, don't issue any warnings.
       *
       * %__GFP_RETRY_MAYFAIL
       *        Try really hard to succeed the allocation but fail
       *        eventually.
       */
      static __always_inline void *kmalloc(size_t size, gfp_t flags)
      {
              if (__builtin_constant_p(size)) {
      #ifndef CONFIG_SLOB
                      unsigned int index;
      #endif
                      if (size > KMALLOC_MAX_CACHE_SIZE)
                              return kmalloc_large(size, flags);
      #ifndef CONFIG_SLOB
                      index = kmalloc_index(size);
      
                      if (!index)
                              return ZERO_SIZE_PTR;
      
 7908                 return kmem_cache_alloc_trace(
 1887                                 kmalloc_caches[kmalloc_type(flags)][index],
                                      flags, size);
      #endif
              }
 4486         return __kmalloc(size, flags);
      }
      
      static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
      {
      #ifndef CONFIG_SLOB
              if (__builtin_constant_p(size) &&
                      size <= KMALLOC_MAX_CACHE_SIZE) {
                      unsigned int i = kmalloc_index(size);
      
                      if (!i)
                              return ZERO_SIZE_PTR;
      
   63                 return kmem_cache_alloc_node_trace(
                                      kmalloc_caches[kmalloc_type(flags)][i],
                                                      flags, node, size);
              }
      #endif
 3957         return __kmalloc_node(size, flags, node);
      }
      
      int memcg_update_all_caches(int num_memcgs);
      
      /**
       * kmalloc_array - allocate memory for an array.
       * @n: number of elements.
       * @size: element size.
       * @flags: the type of memory to allocate (see kmalloc).
       */
      static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
      {
              size_t bytes;
      
   97         if (unlikely(check_mul_overflow(n, size, &bytes)))
                      return NULL;
              if (__builtin_constant_p(n) && __builtin_constant_p(size))
  926                 return kmalloc(bytes, flags);
 1630         return __kmalloc(bytes, flags);
      }
      
      /**
       * kcalloc - allocate memory for an array. The memory is set to zero.
       * @n: number of elements.
       * @size: element size.
       * @flags: the type of memory to allocate (see kmalloc).
       */
      static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
      {
 1485         return kmalloc_array(n, size, flags | __GFP_ZERO);
      }
      
      /*
       * kmalloc_track_caller is a special version of kmalloc that records the
       * calling function of the routine calling it for slab leak tracking instead
       * of just the calling function (confusing, eh?).
       * It's useful when the call to kmalloc comes from a widely-used standard
       * allocator where we care about the real place the memory allocation
       * request comes from.
       */
      extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
      #define kmalloc_track_caller(size, flags) \
              __kmalloc_track_caller(size, flags, _RET_IP_)
      
      static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
                                             int node)
      {
              size_t bytes;
      
              if (unlikely(check_mul_overflow(n, size, &bytes)))
                      return NULL;
              if (__builtin_constant_p(n) && __builtin_constant_p(size))
                      return kmalloc_node(bytes, flags, node);
              return __kmalloc_node(bytes, flags, node);
      }
      
      static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
      {
              return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
      }
      
      
      #ifdef CONFIG_NUMA
      extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
      #define kmalloc_node_track_caller(size, flags, node) \
              __kmalloc_node_track_caller(size, flags, node, \
                              _RET_IP_)
      
      #else /* CONFIG_NUMA */
      
      #define kmalloc_node_track_caller(size, flags, node) \
              kmalloc_track_caller(size, flags)
      
      #endif /* CONFIG_NUMA */
      
      /*
       * Shortcuts
       */
      static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
      {
 4011         return kmem_cache_alloc(k, flags | __GFP_ZERO);
      }
      
      /**
       * kzalloc - allocate memory. The memory is set to zero.
       * @size: how many bytes of memory are required.
       * @flags: the type of memory to allocate (see kmalloc).
       */
      static inline void *kzalloc(size_t size, gfp_t flags)
      {
 8728         return kmalloc(size, flags | __GFP_ZERO);
      }
      
      /**
       * kzalloc_node - allocate zeroed memory from a particular memory node.
       * @size: how many bytes of memory are required.
       * @flags: the type of memory to allocate (see kmalloc).
       * @node: memory node from which to allocate
       */
      static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
      {
   47         return kmalloc_node(size, flags | __GFP_ZERO, node);
      }
      
      unsigned int kmem_cache_size(struct kmem_cache *s);
      void __init kmem_cache_init_late(void);
      
      #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
      int slab_prepare_cpu(unsigned int cpu);
      int slab_dead_cpu(unsigned int cpu);
      #else
      #define slab_prepare_cpu        NULL
      #define slab_dead_cpu                NULL
      #endif
      
      #endif        /* _LINUX_SLAB_H */
      // SPDX-License-Identifier: GPL-2.0-or-later
      /* SCTP kernel implementation
       * (C) Copyright Red Hat Inc. 2017
       *
       * This file is part of the SCTP kernel implementation
       *
       * These functions implement sctp stream message interleaving, mostly
       * including I-DATA and I-FORWARD-TSN chunks process.
       *
       * Please send any bug reports or fixes you make to the
       * email addresched(es):
       *    lksctp developers <linux-sctp@vger.kernel.org>
       *
       * Written or modified by:
       *    Xin Long <lucien.xin@gmail.com>
       */
      
      #include <net/busy_poll.h>
      #include <net/sctp/sctp.h>
      #include <net/sctp/sm.h>
      #include <net/sctp/ulpevent.h>
      #include <linux/sctp.h>
      
      static struct sctp_chunk *sctp_make_idatafrag_empty(
                                              const struct sctp_association *asoc,
                                              const struct sctp_sndrcvinfo *sinfo,
                                              int len, __u8 flags, gfp_t gfp)
      {
              struct sctp_chunk *retval;
              struct sctp_idatahdr dp;
      
              memset(&dp, 0, sizeof(dp));
              dp.stream = htons(sinfo->sinfo_stream);
      
              if (sinfo->sinfo_flags & SCTP_UNORDERED)
                      flags |= SCTP_DATA_UNORDERED;
      
              retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
              if (!retval)
                      return NULL;
      
              retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
              memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
      
              return retval;
      }
      
      static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
      {
              struct sctp_stream *stream;
              struct sctp_chunk *lchunk;
              __u32 cfsn = 0;
              __u16 sid;
      
              if (chunk->has_mid)
                      return;
      
              sid = sctp_chunk_stream_no(chunk);
              stream = &chunk->asoc->stream;
      
              list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
                      struct sctp_idatahdr *hdr;
                      __u32 mid;
      
                      lchunk->has_mid = 1;
      
                      hdr = lchunk->subh.idata_hdr;
      
                      if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
                              hdr->ppid = lchunk->sinfo.sinfo_ppid;
                      else
                              hdr->fsn = htonl(cfsn++);
      
                      if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
                              mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
                                      sctp_mid_uo_next(stream, out, sid) :
                                      sctp_mid_uo_peek(stream, out, sid);
                      } else {
                              mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
                                      sctp_mid_next(stream, out, sid) :
                                      sctp_mid_peek(stream, out, sid);
                      }
                      hdr->mid = htonl(mid);
              }
      }
      
      static bool sctp_validate_data(struct sctp_chunk *chunk)
      {
              struct sctp_stream *stream;
              __u16 sid, ssn;
      
              if (chunk->chunk_hdr->type != SCTP_CID_DATA)
                      return false;
      
              if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
                      return true;
      
              stream = &chunk->asoc->stream;
              sid = sctp_chunk_stream_no(chunk);
              ssn = ntohs(chunk->subh.data_hdr->ssn);
      
              return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
      }
      
      static bool sctp_validate_idata(struct sctp_chunk *chunk)
      {
              struct sctp_stream *stream;
              __u32 mid;
              __u16 sid;
      
              if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
                      return false;
      
              if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
                      return true;
      
              stream = &chunk->asoc->stream;
              sid = sctp_chunk_stream_no(chunk);
              mid = ntohl(chunk->subh.idata_hdr->mid);
      
              return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
      }
      
      static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
                                        struct sctp_ulpevent *event)
      {
              struct sctp_ulpevent *cevent;
              struct sk_buff *pos, *loc;
      
              pos = skb_peek_tail(&ulpq->reasm);
              if (!pos) {
                      __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
                      return;
              }
      
              cevent = sctp_skb2event(pos);
      
              if (event->stream == cevent->stream &&
                  event->mid == cevent->mid &&
                  (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
                   (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
                    event->fsn > cevent->fsn))) {
                      __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
                      return;
              }
      
              if ((event->stream == cevent->stream &&
                   MID_lt(cevent->mid, event->mid)) ||
                  event->stream > cevent->stream) {
                      __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
                      return;
              }
      
              loc = NULL;
              skb_queue_walk(&ulpq->reasm, pos) {
                      cevent = sctp_skb2event(pos);
      
                      if (event->stream < cevent->stream ||
                          (event->stream == cevent->stream &&
                           MID_lt(event->mid, cevent->mid))) {
                              loc = pos;
                              break;
                      }
                      if (event->stream == cevent->stream &&
                          event->mid == cevent->mid &&
                          !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
                          (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
                           event->fsn < cevent->fsn)) {
                              loc = pos;
                              break;
                      }
              }
      
              if (!loc)
                      __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
              else
                      __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
      }
      
      static struct sctp_ulpevent *sctp_intl_retrieve_partial(
                                                      struct sctp_ulpq *ulpq,
                                                      struct sctp_ulpevent *event)
      {
              struct sk_buff *first_frag = NULL;
              struct sk_buff *last_frag = NULL;
              struct sctp_ulpevent *retval;
              struct sctp_stream_in *sin;
              struct sk_buff *pos;
              __u32 next_fsn = 0;
              int is_last = 0;
      
              sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
      
              skb_queue_walk(&ulpq->reasm, pos) {
                      struct sctp_ulpevent *cevent = sctp_skb2event(pos);
      
                      if (cevent->stream < event->stream)
                              continue;
      
                      if (cevent->stream > event->stream ||
                          cevent->mid != sin->mid)
                              break;
      
                      switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
                      case SCTP_DATA_FIRST_FRAG:
                              goto out;
                      case SCTP_DATA_MIDDLE_FRAG:
                              if (!first_frag) {
                                      if (cevent->fsn == sin->fsn) {
                                              first_frag = pos;
                                              last_frag = pos;
                                              next_fsn = cevent->fsn + 1;
                                      }
                              } else if (cevent->fsn == next_fsn) {
                                      last_frag = pos;
                                      next_fsn++;
                              } else {
                                      goto out;
                              }
                              break;
                      case SCTP_DATA_LAST_FRAG:
                              if (!first_frag) {
                                      if (cevent->fsn == sin->fsn) {
                                              first_frag = pos;
                                              last_frag = pos;
                                              next_fsn = 0;
                                              is_last = 1;
                                      }
                              } else if (cevent->fsn == next_fsn) {
                                      last_frag = pos;
                                      next_fsn = 0;
                                      is_last = 1;
                              }
                              goto out;
                      default:
                              goto out;
                      }
              }
      
      out:
              if (!first_frag)
                      return NULL;
      
              retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
                                                   first_frag, last_frag);
              if (retval) {
                      sin->fsn = next_fsn;
                      if (is_last) {
                              retval->msg_flags |= MSG_EOR;
                              sin->pd_mode = 0;
                      }
              }
      
              return retval;
      }
      
      static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
                                                      struct sctp_ulpq *ulpq,
                                                      struct sctp_ulpevent *event)
      {
              struct sctp_association *asoc = ulpq->asoc;
              struct sk_buff *pos, *first_frag = NULL;
              struct sctp_ulpevent *retval = NULL;
              struct sk_buff *pd_first = NULL;
              struct sk_buff *pd_last = NULL;
              struct sctp_stream_in *sin;
              __u32 next_fsn = 0;
              __u32 pd_point = 0;
              __u32 pd_len = 0;
              __u32 mid = 0;
      
              sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
      
              skb_queue_walk(&ulpq->reasm, pos) {
                      struct sctp_ulpevent *cevent = sctp_skb2event(pos);
      
                      if (cevent->stream < event->stream)
                              continue;
                      if (cevent->stream > event->stream)
                              break;
      
                      if (MID_lt(cevent->mid, event->mid))
                              continue;
                      if (MID_lt(event->mid, cevent->mid))
                              break;
      
                      switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
                      case SCTP_DATA_FIRST_FRAG:
                              if (cevent->mid == sin->mid) {
                                      pd_first = pos;
                                      pd_last = pos;
                                      pd_len = pos->len;
                              }
      
                              first_frag = pos;
                              next_fsn = 0;
                              mid = cevent->mid;
                              break;
      
                      case SCTP_DATA_MIDDLE_FRAG:
                              if (first_frag && cevent->mid == mid &&
                                  cevent->fsn == next_fsn) {
                                      next_fsn++;
                                      if (pd_first) {
                                              pd_last = pos;
                                              pd_len += pos->len;
                                      }
                              } else {
                                      first_frag = NULL;
                              }
                              break;
      
                      case SCTP_DATA_LAST_FRAG:
                              if (first_frag && cevent->mid == mid &&
                                  cevent->fsn == next_fsn)
                                      goto found;
                              else
                                      first_frag = NULL;
                              break;
                      }
              }
      
              if (!pd_first)
                      goto out;
      
              pd_point = sctp_sk(asoc->base.sk)->pd_point;
              if (pd_point && pd_point <= pd_len) {
                      retval = sctp_make_reassembled_event(asoc->base.net,
                                                           &ulpq->reasm,
                                                           pd_first, pd_last);
                      if (retval) {
                              sin->fsn = next_fsn;
                              sin->pd_mode = 1;
                      }
              }
              goto out;
      
      found:
              retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
                                                   first_frag, pos);
              if (retval)
                      retval->msg_flags |= MSG_EOR;
      
      out:
              return retval;
      }
      
      static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
                                                   struct sctp_ulpevent *event)
      {
              struct sctp_ulpevent *retval = NULL;
              struct sctp_stream_in *sin;
      
              if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
                      event->msg_flags |= MSG_EOR;
                      return event;
              }
      
              sctp_intl_store_reasm(ulpq, event);
      
              sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
              if (sin->pd_mode && event->mid == sin->mid &&
                  event->fsn == sin->fsn)
                      retval = sctp_intl_retrieve_partial(ulpq, event);
      
              if (!retval)
                      retval = sctp_intl_retrieve_reassembled(ulpq, event);
      
              return retval;
      }
      
      static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
                                          struct sctp_ulpevent *event)
      {
              struct sctp_ulpevent *cevent;
              struct sk_buff *pos, *loc;
      
              pos = skb_peek_tail(&ulpq->lobby);
              if (!pos) {
                      __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
                      return;
              }
      
              cevent = (struct sctp_ulpevent *)pos->cb;
              if (event->stream == cevent->stream &&
                  MID_lt(cevent->mid, event->mid)) {
                      __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
                      return;
              }
      
              if (event->stream > cevent->stream) {
                      __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
                      return;
              }
      
              loc = NULL;
              skb_queue_walk(&ulpq->lobby, pos) {
                      cevent = (struct sctp_ulpevent *)pos->cb;
      
                      if (cevent->stream > event->stream) {
                              loc = pos;
                              break;
                      }
                      if (cevent->stream == event->stream &&
                          MID_lt(event->mid, cevent->mid)) {
                              loc = pos;
                              break;
                      }
              }
      
              if (!loc)
                      __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
              else
                      __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
      }
      
      static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
                                             struct sctp_ulpevent *event)
      {
              struct sk_buff_head *event_list;
              struct sctp_stream *stream;
              struct sk_buff *pos, *tmp;
              __u16 sid = event->stream;
      
              stream  = &ulpq->asoc->stream;
              event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
      
              sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
                      struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
      
                      if (cevent->stream > sid)
                              break;
      
                      if (cevent->stream < sid)
                              continue;
      
                      if (cevent->mid != sctp_mid_peek(stream, in, sid))
                              break;
      
                      sctp_mid_next(stream, in, sid);
      
                      __skb_unlink(pos, &ulpq->lobby);
      
                      __skb_queue_tail(event_list, pos);
              }
      }
      
      static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
                                                   struct sctp_ulpevent *event)
      {
              struct sctp_stream *stream;
              __u16 sid;
      
              stream  = &ulpq->asoc->stream;
              sid = event->stream;
      
              if (event->mid != sctp_mid_peek(stream, in, sid)) {
                      sctp_intl_store_ordered(ulpq, event);
                      return NULL;
              }
      
              sctp_mid_next(stream, in, sid);
      
              sctp_intl_retrieve_ordered(ulpq, event);
      
              return event;
      }
      
      static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
                                    struct sk_buff_head *skb_list)
      {
              struct sock *sk = ulpq->asoc->base.sk;
              struct sctp_sock *sp = sctp_sk(sk);
              struct sctp_ulpevent *event;
              struct sk_buff *skb;
      
              skb = __skb_peek(skb_list);
              event = sctp_skb2event(skb);
      
              if (sk->sk_shutdown & RCV_SHUTDOWN &&
                  (sk->sk_shutdown & SEND_SHUTDOWN ||
                   !sctp_ulpevent_is_notification(event)))
                      goto out_free;
      
              if (!sctp_ulpevent_is_notification(event)) {
                      sk_mark_napi_id(sk, skb);
                      sk_incoming_cpu_update(sk);
              }
      
              if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
                      goto out_free;
      
              if (skb_list)
                      skb_queue_splice_tail_init(skb_list,
                                                 &sk->sk_receive_queue);
              else
                      __skb_queue_tail(&sk->sk_receive_queue, skb);
      
              if (!sp->data_ready_signalled) {
                      sp->data_ready_signalled = 1;
                      sk->sk_data_ready(sk);
              }
      
              return 1;
      
      out_free:
              if (skb_list)
                      sctp_queue_purge_ulpevents(skb_list);
              else
                      sctp_ulpevent_free(event);
      
              return 0;
      }
      
      static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
                                           struct sctp_ulpevent *event)
      {
              struct sctp_ulpevent *cevent;
              struct sk_buff *pos;
      
              pos = skb_peek_tail(&ulpq->reasm_uo);
              if (!pos) {
                      __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
                      return;
              }
      
              cevent = sctp_skb2event(pos);
      
              if (event->stream == cevent->stream &&
                  event->mid == cevent->mid &&
                  (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
                   (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
                    event->fsn > cevent->fsn))) {
                      __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
                      return;
              }
      
              if ((event->stream == cevent->stream &&
                   MID_lt(cevent->mid, event->mid)) ||
                  event->stream > cevent->stream) {
                      __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
                      return;
              }
      
              skb_queue_walk(&ulpq->reasm_uo, pos) {
                      cevent = sctp_skb2event(pos);
      
                      if (event->stream < cevent->stream ||
                          (event->stream == cevent->stream &&
                           MID_lt(event->mid, cevent->mid)))
                              break;
      
                      if (event->stream == cevent->stream &&
                          event->mid == cevent->mid &&
                          !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
                          (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
                           event->fsn < cevent->fsn))
                              break;
              }
      
              __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
      }
      
      static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
                                                      struct sctp_ulpq *ulpq,
                                                      struct sctp_ulpevent *event)
      {
              struct sk_buff *first_frag = NULL;
              struct sk_buff *last_frag = NULL;
              struct sctp_ulpevent *retval;
              struct sctp_stream_in *sin;
              struct sk_buff *pos;
              __u32 next_fsn = 0;
              int is_last = 0;
      
              sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
      
              skb_queue_walk(&ulpq->reasm_uo, pos) {
                      struct sctp_ulpevent *cevent = sctp_skb2event(pos);
      
                      if (cevent->stream < event->stream)
                              continue;
                      if (cevent->stream > event->stream)
                              break;
      
                      if (MID_lt(cevent->mid, sin->mid_uo))
                              continue;
                      if (MID_lt(sin->mid_uo, cevent->mid))
                              break;
      
                      switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
                      case SCTP_DATA_FIRST_FRAG:
                              goto out;
                      case SCTP_DATA_MIDDLE_FRAG:
                              if (!first_frag) {
                                      if (cevent->fsn == sin->fsn_uo) {
                                              first_frag = pos;
                                              last_frag = pos;
                                              next_fsn = cevent->fsn + 1;
                                      }
                              } else if (cevent->fsn == next_fsn) {
                                      last_frag = pos;
                                      next_fsn++;
                              } else {
                                      goto out;
                              }
                              break;
                      case SCTP_DATA_LAST_FRAG:
                              if (!first_frag) {
                                      if (cevent->fsn == sin->fsn_uo) {
                                              first_frag = pos;
                                              last_frag = pos;
                                              next_fsn = 0;
                                              is_last = 1;
                                      }
                              } else if (cevent->fsn == next_fsn) {
                                      last_frag = pos;
                                      next_fsn = 0;
                                      is_last = 1;
                              }
                              goto out;
                      default:
                              goto out;
                      }
              }
      
      out:
              if (!first_frag)
                      return NULL;
      
              retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
                                                   &ulpq->reasm_uo, first_frag,
                                                   last_frag);
              if (retval) {
                      sin->fsn_uo = next_fsn;
                      if (is_last) {
                              retval->msg_flags |= MSG_EOR;
                              sin->pd_mode_uo = 0;
                      }
              }
      
              return retval;
      }
      
      static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
                                                      struct sctp_ulpq *ulpq,
                                                      struct sctp_ulpevent *event)
      {
              struct sctp_association *asoc = ulpq->asoc;
              struct sk_buff *pos, *first_frag = NULL;
              struct sctp_ulpevent *retval = NULL;
              struct sk_buff *pd_first = NULL;
              struct sk_buff *pd_last = NULL;
              struct sctp_stream_in *sin;
              __u32 next_fsn = 0;
              __u32 pd_point = 0;
              __u32 pd_len = 0;
              __u32 mid = 0;
      
              sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
      
              skb_queue_walk(&ulpq->reasm_uo, pos) {
                      struct sctp_ulpevent *cevent = sctp_skb2event(pos);
      
                      if (cevent->stream < event->stream)
                              continue;
                      if (cevent->stream > event->stream)
                              break;
      
                      if (MID_lt(cevent->mid, event->mid))
                              continue;
                      if (MID_lt(event->mid, cevent->mid))
                              break;
      
                      switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
                      case SCTP_DATA_FIRST_FRAG:
                              if (!sin->pd_mode_uo) {
                                      sin->mid_uo = cevent->mid;
                                      pd_first = pos;
                                      pd_last = pos;
                                      pd_len = pos->len;
                              }
      
                              first_frag = pos;
                              next_fsn = 0;
                              mid = cevent->mid;
                              break;
      
                      case SCTP_DATA_MIDDLE_FRAG:
                              if (first_frag && cevent->mid == mid &&
                                  cevent->fsn == next_fsn) {
                                      next_fsn++;
                                      if (pd_first) {
                                              pd_last = pos;
                                              pd_len += pos->len;
                                      }
                              } else {
                                      first_frag = NULL;
                              }
                              break;
      
                      case SCTP_DATA_LAST_FRAG:
                              if (first_frag && cevent->mid == mid &&
                                  cevent->fsn == next_fsn)
                                      goto found;
                              else
                                      first_frag = NULL;
                              break;
                      }
              }
      
              if (!pd_first)
                      goto out;
      
              pd_point = sctp_sk(asoc->base.sk)->pd_point;
              if (pd_point && pd_point <= pd_len) {
                      retval = sctp_make_reassembled_event(asoc->base.net,
                                                           &ulpq->reasm_uo,
                                                           pd_first, pd_last);
                      if (retval) {
                              sin->fsn_uo = next_fsn;
                              sin->pd_mode_uo = 1;
                      }
              }
              goto out;
      
      found:
              retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
                                                   first_frag, pos);
              if (retval)
                      retval->msg_flags |= MSG_EOR;
      
      out:
              return retval;
      }
      
      static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
                                                      struct sctp_ulpevent *event)
      {
              struct sctp_ulpevent *retval = NULL;
              struct sctp_stream_in *sin;
      
              if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
                      event->msg_flags |= MSG_EOR;
                      return event;
              }
      
              sctp_intl_store_reasm_uo(ulpq, event);
      
              sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
              if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
                  event->fsn == sin->fsn_uo)
                      retval = sctp_intl_retrieve_partial_uo(ulpq, event);
      
              if (!retval)
                      retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
      
              return retval;
      }
      
      static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
      {
              struct sctp_stream_in *csin, *sin = NULL;
              struct sk_buff *first_frag = NULL;
              struct sk_buff *last_frag = NULL;
              struct sctp_ulpevent *retval;
              struct sk_buff *pos;
              __u32 next_fsn = 0;
              __u16 sid = 0;
      
              skb_queue_walk(&ulpq->reasm_uo, pos) {
                      struct sctp_ulpevent *cevent = sctp_skb2event(pos);
      
                      csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
                      if (csin->pd_mode_uo)
                              continue;
      
                      switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
                      case SCTP_DATA_FIRST_FRAG:
                              if (first_frag)
                                      goto out;
                              first_frag = pos;
                              last_frag = pos;
                              next_fsn = 0;
                              sin = csin;
                              sid = cevent->stream;
                              sin->mid_uo = cevent->mid;
                              break;
                      case SCTP_DATA_MIDDLE_FRAG:
                              if (!first_frag)
                                      break;
                              if (cevent->stream == sid &&
                                  cevent->mid == sin->mid_uo &&
                                  cevent->fsn == next_fsn) {
                                      next_fsn++;
                                      last_frag = pos;
                              } else {
                                      goto out;
                              }
                              break;
                      case SCTP_DATA_LAST_FRAG:
                              if (first_frag)
                                      goto out;
                              break;
                      default:
                              break;
                      }
              }
      
              if (!first_frag)
                      return NULL;
      
      out:
              retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
                                                   &ulpq->reasm_uo, first_frag,
                                                   last_frag);
              if (retval) {
                      sin->fsn_uo = next_fsn;
                      sin->pd_mode_uo = 1;
              }
      
              return retval;
      }
      
      static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
                                     struct sctp_chunk *chunk, gfp_t gfp)
      {
              struct sctp_ulpevent *event;
              struct sk_buff_head temp;
              int event_eor = 0;
      
              event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
              if (!event)
                      return -ENOMEM;
      
              event->mid = ntohl(chunk->subh.idata_hdr->mid);
              if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
                      event->ppid = chunk->subh.idata_hdr->ppid;
              else
                      event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
      
              if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
                      event = sctp_intl_reasm(ulpq, event);
                      if (event) {
                              skb_queue_head_init(&temp);
                              __skb_queue_tail(&temp, sctp_event2skb(event));
      
                              if (event->msg_flags & MSG_EOR)
                                      event = sctp_intl_order(ulpq, event);
                      }
              } else {
                      event = sctp_intl_reasm_uo(ulpq, event);
                      if (event) {
                              skb_queue_head_init(&temp);
                              __skb_queue_tail(&temp, sctp_event2skb(event));
                      }
              }
      
              if (event) {
                      event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
                      sctp_enqueue_event(ulpq, &temp);
              }
      
              return event_eor;
      }
      
      static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
      {
              struct sctp_stream_in *csin, *sin = NULL;
              struct sk_buff *first_frag = NULL;
              struct sk_buff *last_frag = NULL;
              struct sctp_ulpevent *retval;
              struct sk_buff *pos;
              __u32 next_fsn = 0;
              __u16 sid = 0;
      
              skb_queue_walk(&ulpq->reasm, pos) {
                      struct sctp_ulpevent *cevent = sctp_skb2event(pos);
      
                      csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
                      if (csin->pd_mode)
                              continue;
      
                      switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
                      case SCTP_DATA_FIRST_FRAG:
                              if (first_frag)
                                      goto out;
                              if (cevent->mid == csin->mid) {
                                      first_frag = pos;
                                      last_frag = pos;
                                      next_fsn = 0;
                                      sin = csin;
                                      sid = cevent->stream;
                              }
                              break;
                      case SCTP_DATA_MIDDLE_FRAG:
                              if (!first_frag)
                                      break;
                              if (cevent->stream == sid &&
                                  cevent->mid == sin->mid &&
                                  cevent->fsn == next_fsn) {
                                      next_fsn++;
                                      last_frag = pos;
                              } else {
                                      goto out;
                              }
                              break;
                      case SCTP_DATA_LAST_FRAG:
                              if (first_frag)
                                      goto out;
                              break;
                      default:
                              break;
                      }
              }
      
              if (!first_frag)
                      return NULL;
      
      out:
              retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
                                                   &ulpq->reasm, first_frag,
                                                   last_frag);
              if (retval) {
                      sin->fsn = next_fsn;
                      sin->pd_mode = 1;
              }
      
              return retval;
      }
      
      static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
      {
              struct sctp_ulpevent *event;
              struct sk_buff_head temp;
      
              if (!skb_queue_empty(&ulpq->reasm)) {
                      do {
                              event = sctp_intl_retrieve_first(ulpq);
                              if (event) {
                                      skb_queue_head_init(&temp);
                                      __skb_queue_tail(&temp, sctp_event2skb(event));
                                      sctp_enqueue_event(ulpq, &temp);
                              }
                      } while (event);
              }
      
              if (!skb_queue_empty(&ulpq->reasm_uo)) {
                      do {
                              event = sctp_intl_retrieve_first_uo(ulpq);
                              if (event) {
                                      skb_queue_head_init(&temp);
                                      __skb_queue_tail(&temp, sctp_event2skb(event));
                                      sctp_enqueue_event(ulpq, &temp);
                              }
                      } while (event);
              }
      }
      
      static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                                     gfp_t gfp)
      {
              struct sctp_association *asoc = ulpq->asoc;
              __u32 freed = 0;
              __u16 needed;
      
              needed = ntohs(chunk->chunk_hdr->length) -
                       sizeof(struct sctp_idata_chunk);
      
              if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
                      freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
                      if (freed < needed)
                              freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
                                                             needed);
                      if (freed < needed)
                              freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
                                                             needed);
              }
      
              if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
                      sctp_intl_start_pd(ulpq, gfp);
      
              sk_mem_reclaim(asoc->base.sk);
      }
      
      static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
                                            __u32 mid, __u16 flags, gfp_t gfp)
      {
              struct sock *sk = ulpq->asoc->base.sk;
              struct sctp_ulpevent *ev = NULL;
      
              if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
                                              SCTP_PARTIAL_DELIVERY_EVENT))
                      return;
      
              ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
                                            sid, mid, flags, gfp);
              if (ev) {
                      struct sctp_sock *sp = sctp_sk(sk);
      
                      __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
      
                      if (!sp->data_ready_signalled) {
                              sp->data_ready_signalled = 1;
                              sk->sk_data_ready(sk);
                      }
              }
      }
      
      static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
      {
              struct sctp_stream *stream = &ulpq->asoc->stream;
              struct sctp_ulpevent *cevent, *event = NULL;
              struct sk_buff_head *lobby = &ulpq->lobby;
              struct sk_buff *pos, *tmp;
              struct sk_buff_head temp;
              __u16 csid;
              __u32 cmid;
      
              skb_queue_head_init(&temp);
              sctp_skb_for_each(pos, lobby, tmp) {
                      cevent = (struct sctp_ulpevent *)pos->cb;
                      csid = cevent->stream;
                      cmid = cevent->mid;
      
                      if (csid > sid)
                              break;
      
                      if (csid < sid)
                              continue;
      
                      if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
                              break;
      
                      __skb_unlink(pos, lobby);
                      if (!event)
                              event = sctp_skb2event(pos);
      
                      __skb_queue_tail(&temp, pos);
              }
      
              if (!event && pos != (struct sk_buff *)lobby) {
                      cevent = (struct sctp_ulpevent *)pos->cb;
                      csid = cevent->stream;
                      cmid = cevent->mid;
      
                      if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
                              sctp_mid_next(stream, in, csid);
                              __skb_unlink(pos, lobby);
                              __skb_queue_tail(&temp, pos);
                              event = sctp_skb2event(pos);
                      }
              }
      
              if (event) {
                      sctp_intl_retrieve_ordered(ulpq, event);
                      sctp_enqueue_event(ulpq, &temp);
              }
      }
      
      static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
      {
              struct sctp_stream *stream = &ulpq->asoc->stream;
              __u16 sid;
      
              for (sid = 0; sid < stream->incnt; sid++) {
                      struct sctp_stream_in *sin = SCTP_SI(stream, sid);
                      __u32 mid;
      
                      if (sin->pd_mode_uo) {
                              sin->pd_mode_uo = 0;
      
                              mid = sin->mid_uo;
                              sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
                      }
      
                      if (sin->pd_mode) {
                              sin->pd_mode = 0;
      
                              mid = sin->mid;
                              sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
                              sctp_mid_skip(stream, in, sid, mid);
      
                              sctp_intl_reap_ordered(ulpq, sid);
                      }
              }
      
              /* intl abort pd happens only when all data needs to be cleaned */
              sctp_ulpq_flush(ulpq);
      }
      
      static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
                                          int nskips, __be16 stream, __u8 flags)
      {
              int i;
      
              for (i = 0; i < nskips; i++)
                      if (skiplist[i].stream == stream &&
                          skiplist[i].flags == flags)
                              return i;
      
              return i;
      }
      
      #define SCTP_FTSN_U_BIT        0x1
      static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
      {
              struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
              struct sctp_association *asoc = q->asoc;
              struct sctp_chunk *ftsn_chunk = NULL;
              struct list_head *lchunk, *temp;
              int nskips = 0, skip_pos;
              struct sctp_chunk *chunk;
              __u32 tsn;
      
              if (!asoc->peer.prsctp_capable)
                      return;
      
              if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
                      asoc->adv_peer_ack_point = ctsn;
      
              list_for_each_safe(lchunk, temp, &q->abandoned) {
                      chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
                      tsn = ntohl(chunk->subh.data_hdr->tsn);
      
                      if (TSN_lte(tsn, ctsn)) {
                              list_del_init(lchunk);
                              sctp_chunk_free(chunk);
                      } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
                              __be16 sid = chunk->subh.idata_hdr->stream;
                              __be32 mid = chunk->subh.idata_hdr->mid;
                              __u8 flags = 0;
      
                              if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
                                      flags |= SCTP_FTSN_U_BIT;
      
                              asoc->adv_peer_ack_point = tsn;
                              skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
                                                           sid, flags);
                              ftsn_skip_arr[skip_pos].stream = sid;
                              ftsn_skip_arr[skip_pos].reserved = 0;
                              ftsn_skip_arr[skip_pos].flags = flags;
                              ftsn_skip_arr[skip_pos].mid = mid;
                              if (skip_pos == nskips)
                                      nskips++;
                              if (nskips == 10)
                                      break;
                      } else {
                              break;
                      }
              }
      
              if (asoc->adv_peer_ack_point > ctsn)
                      ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
                                                     nskips, &ftsn_skip_arr[0]);
      
              if (ftsn_chunk) {
                      list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
                      SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
              }
      }
      
      #define _sctp_walk_ifwdtsn(pos, chunk, end) \
              for (pos = chunk->subh.ifwdtsn_hdr->skip; \
                   (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
      
      #define sctp_walk_ifwdtsn(pos, ch) \
              _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
                                              sizeof(struct sctp_ifwdtsn_chunk))
      
      static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
      {
              struct sctp_fwdtsn_skip *skip;
              __u16 incnt;
      
              if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
                      return false;
      
              incnt = chunk->asoc->stream.incnt;
              sctp_walk_fwdtsn(skip, chunk)
                      if (ntohs(skip->stream) >= incnt)
                              return false;
      
              return true;
      }
      
      static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
      {
              struct sctp_ifwdtsn_skip *skip;
              __u16 incnt;
      
              if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
                      return false;
      
              incnt = chunk->asoc->stream.incnt;
              sctp_walk_ifwdtsn(skip, chunk)
                      if (ntohs(skip->stream) >= incnt)
                              return false;
      
              return true;
      }
      
      static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
      {
              /* Move the Cumulattive TSN Ack ahead. */
              sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
              /* purge the fragmentation queue */
              sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
              /* Abort any in progress partial delivery. */
              sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
      }
      
      static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
      {
              struct sk_buff *pos, *tmp;
      
              skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
                      struct sctp_ulpevent *event = sctp_skb2event(pos);
                      __u32 tsn = event->tsn;
      
                      if (TSN_lte(tsn, ftsn)) {
                              __skb_unlink(pos, &ulpq->reasm);
                              sctp_ulpevent_free(event);
                      }
              }
      
              skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
                      struct sctp_ulpevent *event = sctp_skb2event(pos);
                      __u32 tsn = event->tsn;
      
                      if (TSN_lte(tsn, ftsn)) {
                              __skb_unlink(pos, &ulpq->reasm_uo);
                              sctp_ulpevent_free(event);
                      }
              }
      }
      
      static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
      {
              /* Move the Cumulattive TSN Ack ahead. */
              sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
              /* purge the fragmentation queue */
              sctp_intl_reasm_flushtsn(ulpq, ftsn);
              /* abort only when it's for all data */
              if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
                      sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
      }
      
      static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
      {
              struct sctp_fwdtsn_skip *skip;
      
              /* Walk through all the skipped SSNs */
              sctp_walk_fwdtsn(skip, chunk)
                      sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
      }
      
      static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
                                 __u8 flags)
      {
              struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
              struct sctp_stream *stream  = &ulpq->asoc->stream;
      
              if (flags & SCTP_FTSN_U_BIT) {
                      if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
                              sin->pd_mode_uo = 0;
                              sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
                                                        GFP_ATOMIC);
                      }
                      return;
              }
      
              if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
                      return;
      
              if (sin->pd_mode) {
                      sin->pd_mode = 0;
                      sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
              }
      
              sctp_mid_skip(stream, in, sid, mid);
      
              sctp_intl_reap_ordered(ulpq, sid);
      }
      
      static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
      {
              struct sctp_ifwdtsn_skip *skip;
      
              /* Walk through all the skipped MIDs and abort stream pd if possible */
              sctp_walk_ifwdtsn(skip, chunk)
                      sctp_intl_skip(ulpq, ntohs(skip->stream),
                                     ntohl(skip->mid), skip->flags);
      }
      
      static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
      {
              struct sk_buff_head temp;
      
   60         skb_queue_head_init(&temp);
              __skb_queue_tail(&temp, sctp_event2skb(event));
              return sctp_ulpq_tail_event(ulpq, &temp);
      }
      
      static struct sctp_stream_interleave sctp_stream_interleave_0 = {
              .data_chunk_len                = sizeof(struct sctp_data_chunk),
              .ftsn_chunk_len                = sizeof(struct sctp_fwdtsn_chunk),
              /* DATA process functions */
              .make_datafrag                = sctp_make_datafrag_empty,
              .assign_number                = sctp_chunk_assign_ssn,
              .validate_data                = sctp_validate_data,
              .ulpevent_data                = sctp_ulpq_tail_data,
              .enqueue_event                = do_ulpq_tail_event,
              .renege_events                = sctp_ulpq_renege,
              .start_pd                = sctp_ulpq_partial_delivery,
              .abort_pd                = sctp_ulpq_abort_pd,
              /* FORWARD-TSN process functions */
              .generate_ftsn                = sctp_generate_fwdtsn,
              .validate_ftsn                = sctp_validate_fwdtsn,
              .report_ftsn                = sctp_report_fwdtsn,
              .handle_ftsn                = sctp_handle_fwdtsn,
      };
      
      static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
                                       struct sctp_ulpevent *event)
      {
              struct sk_buff_head temp;
      
              skb_queue_head_init(&temp);
              __skb_queue_tail(&temp, sctp_event2skb(event));
              return sctp_enqueue_event(ulpq, &temp);
      }
      
      static struct sctp_stream_interleave sctp_stream_interleave_1 = {
              .data_chunk_len                = sizeof(struct sctp_idata_chunk),
              .ftsn_chunk_len                = sizeof(struct sctp_ifwdtsn_chunk),
              /* I-DATA process functions */
              .make_datafrag                = sctp_make_idatafrag_empty,
              .assign_number                = sctp_chunk_assign_mid,
              .validate_data                = sctp_validate_idata,
              .ulpevent_data                = sctp_ulpevent_idata,
              .enqueue_event                = do_sctp_enqueue_event,
              .renege_events                = sctp_renege_events,
              .start_pd                = sctp_intl_start_pd,
              .abort_pd                = sctp_intl_abort_pd,
              /* I-FORWARD-TSN process functions */
              .generate_ftsn                = sctp_generate_iftsn,
              .validate_ftsn                = sctp_validate_iftsn,
              .report_ftsn                = sctp_report_iftsn,
              .handle_ftsn                = sctp_handle_iftsn,
      };
      
      void sctp_stream_interleave_init(struct sctp_stream *stream)
      {
              struct sctp_association *asoc;
      
              asoc = container_of(stream, struct sctp_association, stream);
  332         stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
                                                   : &sctp_stream_interleave_0;
      }
      /*
       * net/tipc/node.c: TIPC node management routines
       *
       * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
       * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
       * All rights reserved.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions are met:
       *
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the names of the copyright holders nor the names of its
       *    contributors may be used to endorse or promote products derived from
       *    this software without specific prior written permission.
       *
       * Alternatively, this software may be distributed under the terms of the
       * GNU General Public License ("GPL") version 2 as published by the Free
       * Software Foundation.
       *
       * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
       * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
       * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      #include "core.h"
      #include "link.h"
      #include "node.h"
      #include "name_distr.h"
      #include "socket.h"
      #include "bcast.h"
      #include "monitor.h"
      #include "discover.h"
      #include "netlink.h"
      #include "trace.h"
      #include "crypto.h"
      
      #define INVALID_NODE_SIG        0x10000
      #define NODE_CLEANUP_AFTER        300000
      
      /* Flags used to take different actions according to flag type
       * TIPC_NOTIFY_NODE_DOWN: notify node is down
       * TIPC_NOTIFY_NODE_UP: notify node is up
       * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
       */
      enum {
              TIPC_NOTIFY_NODE_DOWN                = (1 << 3),
              TIPC_NOTIFY_NODE_UP                = (1 << 4),
              TIPC_NOTIFY_LINK_UP                = (1 << 6),
              TIPC_NOTIFY_LINK_DOWN                = (1 << 7)
      };
      
      struct tipc_link_entry {
              struct tipc_link *link;
              spinlock_t lock; /* per link */
              u32 mtu;
              struct sk_buff_head inputq;
              struct tipc_media_addr maddr;
      };
      
      struct tipc_bclink_entry {
              struct tipc_link *link;
              struct sk_buff_head inputq1;
              struct sk_buff_head arrvq;
              struct sk_buff_head inputq2;
              struct sk_buff_head namedq;
      };
      
      /**
       * struct tipc_node - TIPC node structure
       * @addr: network address of node
       * @ref: reference counter to node object
       * @lock: rwlock governing access to structure
       * @net: the applicable net namespace
       * @hash: links to adjacent nodes in unsorted hash chain
       * @inputq: pointer to input queue containing messages for msg event
       * @namedq: pointer to name table input queue with name table messages
       * @active_links: bearer ids of active links, used as index into links[] array
       * @links: array containing references to all links to node
       * @action_flags: bit mask of different types of node actions
       * @state: connectivity state vs peer node
       * @preliminary: a preliminary node or not
       * @sync_point: sequence number where synch/failover is finished
       * @list: links to adjacent nodes in sorted list of cluster's nodes
       * @working_links: number of working links to node (both active and standby)
       * @link_cnt: number of links to node
       * @capabilities: bitmap, indicating peer node's functional capabilities
       * @signature: node instance identifier
       * @link_id: local and remote bearer ids of changing link, if any
       * @publ_list: list of publications
       * @rcu: rcu struct for tipc_node
       * @delete_at: indicates the time for deleting a down node
       * @crypto_rx: RX crypto handler
       */
      struct tipc_node {
              u32 addr;
              struct kref kref;
              rwlock_t lock;
              struct net *net;
              struct hlist_node hash;
              int active_links[2];
              struct tipc_link_entry links[MAX_BEARERS];
              struct tipc_bclink_entry bc_entry;
              int action_flags;
              struct list_head list;
              int state;
              bool preliminary;
              bool failover_sent;
              u16 sync_point;
              int link_cnt;
              u16 working_links;
              u16 capabilities;
              u32 signature;
              u32 link_id;
              u8 peer_id[16];
              char peer_id_string[NODE_ID_STR_LEN];
              struct list_head publ_list;
              struct list_head conn_sks;
              unsigned long keepalive_intv;
              struct timer_list timer;
              struct rcu_head rcu;
              unsigned long delete_at;
              struct net *peer_net;
              u32 peer_hash_mix;
      #ifdef CONFIG_TIPC_CRYPTO
              struct tipc_crypto *crypto_rx;
      #endif
      };
      
      /* Node FSM states and events:
       */
      enum {
              SELF_DOWN_PEER_DOWN    = 0xdd,
              SELF_UP_PEER_UP        = 0xaa,
              SELF_DOWN_PEER_LEAVING = 0xd1,
              SELF_UP_PEER_COMING    = 0xac,
              SELF_COMING_PEER_UP    = 0xca,
              SELF_LEAVING_PEER_DOWN = 0x1d,
              NODE_FAILINGOVER       = 0xf0,
              NODE_SYNCHING          = 0xcc
      };
      
      enum {
              SELF_ESTABL_CONTACT_EVT = 0xece,
              SELF_LOST_CONTACT_EVT   = 0x1ce,
              PEER_ESTABL_CONTACT_EVT = 0x9ece,
              PEER_LOST_CONTACT_EVT   = 0x91ce,
              NODE_FAILOVER_BEGIN_EVT = 0xfbe,
              NODE_FAILOVER_END_EVT   = 0xfee,
              NODE_SYNCH_BEGIN_EVT    = 0xcbe,
              NODE_SYNCH_END_EVT      = 0xcee
      };
      
      static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
                                        struct sk_buff_head *xmitq,
                                        struct tipc_media_addr **maddr);
      static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
                                      bool delete);
      static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
      static void tipc_node_delete(struct tipc_node *node);
      static void tipc_node_timeout(struct timer_list *t);
      static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
      static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
      static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
      static bool node_is_up(struct tipc_node *n);
      static void tipc_node_delete_from_list(struct tipc_node *node);
      
      struct tipc_sock_conn {
              u32 port;
              u32 peer_port;
              u32 peer_node;
              struct list_head list;
      };
      
      static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
      {
              int bearer_id = n->active_links[sel & 1];
      
              if (unlikely(bearer_id == INVALID_BEARER_ID))
                      return NULL;
      
              return n->links[bearer_id].link;
      }
      
      int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
      {
              struct tipc_node *n;
              int bearer_id;
              unsigned int mtu = MAX_MSG_SIZE;
      
  173         n = tipc_node_find(net, addr);
              if (unlikely(!n))
                      return mtu;
      
              /* Allow MAX_MSG_SIZE when building connection oriented message
               * if they are in the same core network
               */
              if (n->peer_net && connected) {
                      tipc_node_put(n);
                      return mtu;
              }
      
              bearer_id = n->active_links[sel & 1];
              if (likely(bearer_id != INVALID_BEARER_ID))
                      mtu = n->links[bearer_id].mtu;
              tipc_node_put(n);
              return mtu;
  173 }
      
      bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
      {
    3         u8 *own_id = tipc_own_id(net);
              struct tipc_node *n;
      
              if (!own_id)
                      return true;
      
              if (addr == tipc_own_addr(net)) {
                      memcpy(id, own_id, TIPC_NODEID_LEN);
    3                 return true;
              }
              n = tipc_node_find(net, addr);
              if (!n)
                      return false;
      
              memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
              tipc_node_put(n);
              return true;
      }
      
      u16 tipc_node_get_capabilities(struct net *net, u32 addr)
      {
              struct tipc_node *n;
              u16 caps;
      
  171         n = tipc_node_find(net, addr);
              if (unlikely(!n))
                      return TIPC_NODE_CAPABILITIES;
              caps = n->capabilities;
              tipc_node_put(n);
              return caps;
  171 }
      
      u32 tipc_node_get_addr(struct tipc_node *node)
      {
              return (node) ? node->addr : 0;
      }
      
      char *tipc_node_get_id_str(struct tipc_node *node)
      {
              return node->peer_id_string;
      }
      
      #ifdef CONFIG_TIPC_CRYPTO
      /**
       * tipc_node_crypto_rx - Retrieve crypto RX handle from node
       * Note: node ref counter must be held first!
       */
      struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
      {
              return (__n) ? __n->crypto_rx : NULL;
      }
      
      struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
      {
              return container_of(pos, struct tipc_node, list)->crypto_rx;
      }
      #endif
      
      static void tipc_node_free(struct rcu_head *rp)
      {
              struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
      
      #ifdef CONFIG_TIPC_CRYPTO
              tipc_crypto_stop(&n->crypto_rx);
      #endif
              kfree(n);
      }
      
      static void tipc_node_kref_release(struct kref *kref)
      {
              struct tipc_node *n = container_of(kref, struct tipc_node, kref);
      
              kfree(n->bc_entry.link);
              call_rcu(&n->rcu, tipc_node_free);
      }
      
      void tipc_node_put(struct tipc_node *node)
      {
              kref_put(&node->kref, tipc_node_kref_release);
      }
      
      static void tipc_node_get(struct tipc_node *node)
      {
              kref_get(&node->kref);
      }
      
      /*
       * tipc_node_find - locate specified node object, if it exists
       */
      static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
      {
  185         struct tipc_net *tn = tipc_net(net);
              struct tipc_node *node;
              unsigned int thash = tipc_hashfn(addr);
      
  185         rcu_read_lock();
  185         hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
                      if (node->addr != addr || node->preliminary)
                              continue;
                      if (!kref_get_unless_zero(&node->kref))
                              node = NULL;
                      break;
              }
  185         rcu_read_unlock();
              return node;
      }
      
      /* tipc_node_find_by_id - locate specified node object by its 128-bit id
       * Note: this function is called only when a discovery request failed
       * to find the node by its 32-bit id, and is not time critical
       */
      static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
      {
              struct tipc_net *tn = tipc_net(net);
              struct tipc_node *n;
              bool found = false;
      
              rcu_read_lock();
              list_for_each_entry_rcu(n, &tn->node_list, list) {
                      read_lock_bh(&n->lock);
                      if (!memcmp(id, n->peer_id, 16) &&
                          kref_get_unless_zero(&n->kref))
                              found = true;
                      read_unlock_bh(&n->lock);
                      if (found)
                              break;
              }
              rcu_read_unlock();
              return found ? n : NULL;
      }
      
      static void tipc_node_read_lock(struct tipc_node *n)
      {
              read_lock_bh(&n->lock);
      }
      
      static void tipc_node_read_unlock(struct tipc_node *n)
      {
              read_unlock_bh(&n->lock);
      }
      
      static void tipc_node_write_lock(struct tipc_node *n)
      {
              write_lock_bh(&n->lock);
      }
      
      static void tipc_node_write_unlock_fast(struct tipc_node *n)
      {
              write_unlock_bh(&n->lock);
      }
      
      static void tipc_node_write_unlock(struct tipc_node *n)
      {
              struct net *net = n->net;
              u32 addr = 0;
              u32 flags = n->action_flags;
              u32 link_id = 0;
              u32 bearer_id;
              struct list_head *publ_list;
      
              if (likely(!flags)) {
                      write_unlock_bh(&n->lock);
                      return;
              }
      
              addr = n->addr;
              link_id = n->link_id;
              bearer_id = link_id & 0xffff;
              publ_list = &n->publ_list;
      
              n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
                                   TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
      
              write_unlock_bh(&n->lock);
      
              if (flags & TIPC_NOTIFY_NODE_DOWN)
                      tipc_publ_notify(net, publ_list, addr);
      
              if (flags & TIPC_NOTIFY_NODE_UP)
                      tipc_named_node_up(net, addr);
      
              if (flags & TIPC_NOTIFY_LINK_UP) {
                      tipc_mon_peer_up(net, addr, bearer_id);
                      tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
                                           TIPC_NODE_SCOPE, link_id, link_id);
              }
              if (flags & TIPC_NOTIFY_LINK_DOWN) {
                      tipc_mon_peer_down(net, addr, bearer_id);
                      tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
                                            addr, link_id);
              }
      }
      
      static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
      {
              int net_id = tipc_netid(n->net);
              struct tipc_net *tn_peer;
              struct net *tmp;
              u32 hash_chk;
      
              if (n->peer_net)
                      return;
      
              for_each_net_rcu(tmp) {
                      tn_peer = tipc_net(tmp);
                      if (!tn_peer)
                              continue;
                      /* Integrity checking whether node exists in namespace or not */
                      if (tn_peer->net_id != net_id)
                              continue;
                      if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
                              continue;
                      hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
                      if (hash_mixes ^ hash_chk)
                              continue;
                      n->peer_net = tmp;
                      n->peer_hash_mix = hash_mixes;
                      break;
              }
      }
      
      struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
                                         u16 capabilities, u32 hash_mixes,
                                         bool preliminary)
      {
              struct tipc_net *tn = net_generic(net, tipc_net_id);
              struct tipc_node *n, *temp_node;
              struct tipc_link *l;
              unsigned long intv;
              int bearer_id;
              int i;
      
              spin_lock_bh(&tn->node_list_lock);
              n = tipc_node_find(net, addr) ?:
                      tipc_node_find_by_id(net, peer_id);
              if (n) {
                      if (!n->preliminary)
                              goto update;
                      if (preliminary)
                              goto exit;
                      /* A preliminary node becomes "real" now, refresh its data */
                      tipc_node_write_lock(n);
                      n->preliminary = false;
                      n->addr = addr;
                      hlist_del_rcu(&n->hash);
                      hlist_add_head_rcu(&n->hash,
                                         &tn->node_htable[tipc_hashfn(addr)]);
                      list_del_rcu(&n->list);
                      list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                              if (n->addr < temp_node->addr)
                                      break;
                      }
                      list_add_tail_rcu(&n->list, &temp_node->list);
                      tipc_node_write_unlock_fast(n);
      
      update:
                      if (n->peer_hash_mix ^ hash_mixes)
                              tipc_node_assign_peer_net(n, hash_mixes);
                      if (n->capabilities == capabilities)
                              goto exit;
                      /* Same node may come back with new capabilities */
                      tipc_node_write_lock(n);
                      n->capabilities = capabilities;
                      for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
                              l = n->links[bearer_id].link;
                              if (l)
                                      tipc_link_update_caps(l, capabilities);
                      }
                      tipc_node_write_unlock_fast(n);
      
                      /* Calculate cluster capabilities */
                      tn->capabilities = TIPC_NODE_CAPABILITIES;
                      list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                              tn->capabilities &= temp_node->capabilities;
                      }
      
                      tipc_bcast_toggle_rcast(net,
                                              (tn->capabilities & TIPC_BCAST_RCAST));
      
                      goto exit;
              }
              n = kzalloc(sizeof(*n), GFP_ATOMIC);
              if (!n) {
                      pr_warn("Node creation failed, no memory\n");
                      goto exit;
              }
              tipc_nodeid2string(n->peer_id_string, peer_id);
      #ifdef CONFIG_TIPC_CRYPTO
              if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
                      pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
                      kfree(n);
                      n = NULL;
                      goto exit;
              }
      #endif
              n->addr = addr;
              n->preliminary = preliminary;
              memcpy(&n->peer_id, peer_id, 16);
              n->net = net;
              n->peer_net = NULL;
              n->peer_hash_mix = 0;
              /* Assign kernel local namespace if exists */
              tipc_node_assign_peer_net(n, hash_mixes);
              n->capabilities = capabilities;
              kref_init(&n->kref);
              rwlock_init(&n->lock);
              INIT_HLIST_NODE(&n->hash);
              INIT_LIST_HEAD(&n->list);
              INIT_LIST_HEAD(&n->publ_list);
              INIT_LIST_HEAD(&n->conn_sks);
              skb_queue_head_init(&n->bc_entry.namedq);
              skb_queue_head_init(&n->bc_entry.inputq1);
              __skb_queue_head_init(&n->bc_entry.arrvq);
              skb_queue_head_init(&n->bc_entry.inputq2);
              for (i = 0; i < MAX_BEARERS; i++)
                      spin_lock_init(&n->links[i].lock);
              n->state = SELF_DOWN_PEER_LEAVING;
              n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
              n->signature = INVALID_NODE_SIG;
              n->active_links[0] = INVALID_BEARER_ID;
              n->active_links[1] = INVALID_BEARER_ID;
              n->bc_entry.link = NULL;
              tipc_node_get(n);
              timer_setup(&n->timer, tipc_node_timeout, 0);
              /* Start a slow timer anyway, crypto needs it */
              n->keepalive_intv = 10000;
              intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
              if (!mod_timer(&n->timer, intv))
                      tipc_node_get(n);
              hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
              list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                      if (n->addr < temp_node->addr)
                              break;
              }
              list_add_tail_rcu(&n->list, &temp_node->list);
              /* Calculate cluster capabilities */
              tn->capabilities = TIPC_NODE_CAPABILITIES;
              list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                      tn->capabilities &= temp_node->capabilities;
              }
              tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
              trace_tipc_node_create(n, true, " ");
      exit:
              spin_unlock_bh(&tn->node_list_lock);
              return n;
      }
      
      static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
      {
              unsigned long tol = tipc_link_tolerance(l);
              unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
      
              /* Link with lowest tolerance determines timer interval */
              if (intv < n->keepalive_intv)
                      n->keepalive_intv = intv;
      
              /* Ensure link's abort limit corresponds to current tolerance */
              tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
      }
      
      static void tipc_node_delete_from_list(struct tipc_node *node)
      {
              list_del_rcu(&node->list);
              hlist_del_rcu(&node->hash);
              tipc_node_put(node);
      }
      
      static void tipc_node_delete(struct tipc_node *node)
      {
              trace_tipc_node_delete(node, true, " ");
              tipc_node_delete_from_list(node);
      
              del_timer_sync(&node->timer);
              tipc_node_put(node);
      }
      
      void tipc_node_stop(struct net *net)
      {
              struct tipc_net *tn = tipc_net(net);
              struct tipc_node *node, *t_node;
      
              spin_lock_bh(&tn->node_list_lock);
              list_for_each_entry_safe(node, t_node, &tn->node_list, list)
                      tipc_node_delete(node);
              spin_unlock_bh(&tn->node_list_lock);
      }
      
      void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
      {
              struct tipc_node *n;
      
              if (in_own_node(net, addr))
                      return;
      
              n = tipc_node_find(net, addr);
              if (!n) {
                      pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
                      return;
              }
              tipc_node_write_lock(n);
              list_add_tail(subscr, &n->publ_list);
              tipc_node_write_unlock_fast(n);
              tipc_node_put(n);
      }
      
      void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
      {
              struct tipc_node *n;
      
              if (in_own_node(net, addr))
                      return;
      
              n = tipc_node_find(net, addr);
              if (!n) {
                      pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
                      return;
              }
              tipc_node_write_lock(n);
              list_del_init(subscr);
              tipc_node_write_unlock_fast(n);
              tipc_node_put(n);
      }
      
      int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
      {
              struct tipc_node *node;
              struct tipc_sock_conn *conn;
              int err = 0;
      
  152         if (in_own_node(net, dnode))
  152                 return 0;
      
              node = tipc_node_find(net, dnode);
              if (!node) {
                      pr_warn("Connecting sock to node 0x%x failed\n", dnode);
                      return -EHOSTUNREACH;
              }
              conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
              if (!conn) {
                      err = -EHOSTUNREACH;
                      goto exit;
              }
              conn->peer_node = dnode;
              conn->port = port;
              conn->peer_port = peer_port;
      
              tipc_node_write_lock(node);
              list_add_tail(&conn->list, &node->conn_sks);
              tipc_node_write_unlock(node);
      exit:
              tipc_node_put(node);
              return err;
      }
      
      void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
      {
              struct tipc_node *node;
              struct tipc_sock_conn *conn, *safe;
      
  104         if (in_own_node(net, dnode))
                      return;
      
              node = tipc_node_find(net, dnode);
              if (!node)
                      return;
      
              tipc_node_write_lock(node);
              list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
                      if (port != conn->port)
                              continue;
                      list_del(&conn->list);
                      kfree(conn);
              }
              tipc_node_write_unlock(node);
              tipc_node_put(node);
      }
      
      static void  tipc_node_clear_links(struct tipc_node *node)
      {
              int i;
      
              for (i = 0; i < MAX_BEARERS; i++) {
                      struct tipc_link_entry *le = &node->links[i];
      
                      if (le->link) {
                              kfree(le->link);
                              le->link = NULL;
                              node->link_cnt--;
                      }
              }
      }
      
      /* tipc_node_cleanup - delete nodes that does not
       * have active links for NODE_CLEANUP_AFTER time
       */
      static bool tipc_node_cleanup(struct tipc_node *peer)
      {
              struct tipc_node *temp_node;
              struct tipc_net *tn = tipc_net(peer->net);
              bool deleted = false;
      
              /* If lock held by tipc_node_stop() the node will be deleted anyway */
              if (!spin_trylock_bh(&tn->node_list_lock))
                      return false;
      
              tipc_node_write_lock(peer);
      
              if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
                      tipc_node_clear_links(peer);
                      tipc_node_delete_from_list(peer);
                      deleted = true;
              }
              tipc_node_write_unlock(peer);
      
              if (!deleted) {
                      spin_unlock_bh(&tn->node_list_lock);
                      return deleted;
              }
      
              /* Calculate cluster capabilities */
              tn->capabilities = TIPC_NODE_CAPABILITIES;
              list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                      tn->capabilities &= temp_node->capabilities;
              }
              tipc_bcast_toggle_rcast(peer->net,
                                      (tn->capabilities & TIPC_BCAST_RCAST));
              spin_unlock_bh(&tn->node_list_lock);
              return deleted;
      }
      
      /* tipc_node_timeout - handle expiration of node timer
       */
      static void tipc_node_timeout(struct timer_list *t)
      {
              struct tipc_node *n = from_timer(n, t, timer);
              struct tipc_link_entry *le;
              struct sk_buff_head xmitq;
              int remains = n->link_cnt;
              int bearer_id;
              int rc = 0;
      
              trace_tipc_node_timeout(n, false, " ");
              if (!node_is_up(n) && tipc_node_cleanup(n)) {
                      /*Removing the reference of Timer*/
                      tipc_node_put(n);
                      return;
              }
      
      #ifdef CONFIG_TIPC_CRYPTO
              /* Take any crypto key related actions first */
              tipc_crypto_timeout(n->crypto_rx);
      #endif
              __skb_queue_head_init(&xmitq);
      
              /* Initial node interval to value larger (10 seconds), then it will be
               * recalculated with link lowest tolerance
               */
              tipc_node_read_lock(n);
              n->keepalive_intv = 10000;
              tipc_node_read_unlock(n);
              for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
                      tipc_node_read_lock(n);
                      le = &n->links[bearer_id];
                      if (le->link) {
                              spin_lock_bh(&le->lock);
                              /* Link tolerance may change asynchronously: */
                              tipc_node_calculate_timer(n, le->link);
                              rc = tipc_link_timeout(le->link, &xmitq);
                              spin_unlock_bh(&le->lock);
                              remains--;
                      }
                      tipc_node_read_unlock(n);
                      tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
                      if (rc & TIPC_LINK_DOWN_EVT)
                              tipc_node_link_down(n, bearer_id, false);
              }
              mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
      }
      
      /**
       * __tipc_node_link_up - handle addition of link
       * Node lock must be held by caller
       * Link becomes active (alone or shared) or standby, depending on its priority.
       */
      static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
                                      struct sk_buff_head *xmitq)
      {
              int *slot0 = &n->active_links[0];
              int *slot1 = &n->active_links[1];
              struct tipc_link *ol = node_active_link(n, 0);
              struct tipc_link *nl = n->links[bearer_id].link;
      
              if (!nl || tipc_link_is_up(nl))
                      return;
      
              tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
              if (!tipc_link_is_up(nl))
                      return;
      
              n->working_links++;
              n->action_flags |= TIPC_NOTIFY_LINK_UP;
              n->link_id = tipc_link_id(nl);
      
              /* Leave room for tunnel header when returning 'mtu' to users: */
              n->links[bearer_id].mtu = tipc_link_mss(nl);
      
              tipc_bearer_add_dest(n->net, bearer_id, n->addr);
              tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
      
              pr_debug("Established link <%s> on network plane %c\n",
                       tipc_link_name(nl), tipc_link_plane(nl));
              trace_tipc_node_link_up(n, true, " ");
      
              /* Ensure that a STATE message goes first */
              tipc_link_build_state_msg(nl, xmitq);
      
              /* First link? => give it both slots */
              if (!ol) {
                      *slot0 = bearer_id;
                      *slot1 = bearer_id;
                      tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
                      n->action_flags |= TIPC_NOTIFY_NODE_UP;
                      tipc_link_set_active(nl, true);
                      tipc_bcast_add_peer(n->net, nl, xmitq);
                      return;
              }
      
              /* Second link => redistribute slots */
              if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
                      pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
                      *slot0 = bearer_id;
                      *slot1 = bearer_id;
                      tipc_link_set_active(nl, true);
                      tipc_link_set_active(ol, false);
              } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
                      tipc_link_set_active(nl, true);
                      *slot1 = bearer_id;
              } else {
                      pr_debug("New link <%s> is standby\n", tipc_link_name(nl));