/* SPDX-License-Identifier: GPL-2.0 */
      #ifndef _LINUX_ICMPV6_H
      #define _LINUX_ICMPV6_H
      
      #include <linux/skbuff.h>
      #include <uapi/linux/icmpv6.h>
      
      static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
      {
  259         return (struct icmp6hdr *)skb_transport_header(skb);
      }
      
      #include <linux/netdevice.h>
      
      #if IS_ENABLED(CONFIG_IPV6)
      
      typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                                   const struct in6_addr *force_saddr);
      #if IS_BUILTIN(CONFIG_IPV6)
      void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                      const struct in6_addr *force_saddr);
      static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
      {
  149         icmp6_send(skb, type, code, info, NULL);
      }
      static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
      {
              BUILD_BUG_ON(fn != icmp6_send);
              return 0;
      }
      static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
      {
              BUILD_BUG_ON(fn != icmp6_send);
              return 0;
      }
      #else
      extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
      extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
      extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
      #endif
      
      int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
                                     unsigned int data_len);
      
      #if IS_ENABLED(CONFIG_NF_NAT)
      void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
      #else
      #define icmpv6_ndo_send icmpv6_send
      #endif
      
      #else
      
      static inline void icmpv6_send(struct sk_buff *skb,
                                     u8 type, u8 code, __u32 info)
      {
      }
      
      static inline void icmpv6_ndo_send(struct sk_buff *skb,
                                         u8 type, u8 code, __u32 info)
      {
      }
      #endif
      
      extern int                                icmpv6_init(void);
      extern int                                icmpv6_err_convert(u8 type, u8 code,
                                                                 int *err);
      extern void                                icmpv6_cleanup(void);
      extern void                                icmpv6_param_prob(struct sk_buff *skb,
                                                                u8 code, int pos);
      
      struct flowi6;
      struct in6_addr;
      extern void                                icmpv6_flow_init(struct sock *sk,
                                                               struct flowi6 *fl6,
                                                               u8 type,
                                                               const struct in6_addr *saddr,
                                                               const struct in6_addr *daddr,
                                                               int oif);
      
      static inline bool icmpv6_is_err(int type)
      {
              switch (type) {
              case ICMPV6_DEST_UNREACH:
              case ICMPV6_PKT_TOOBIG:
              case ICMPV6_TIME_EXCEED:
              case ICMPV6_PARAMPROB:
                      return true;
              }
      
              return false;
      }
      
      #endif
      /* SPDX-License-Identifier: GPL-2.0 */
      /*
       * Shared glue code for 128bit block ciphers
       */
      
      #ifndef _CRYPTO_GLUE_HELPER_H
      #define _CRYPTO_GLUE_HELPER_H
      
      #include <crypto/internal/skcipher.h>
      #include <linux/kernel.h>
      #include <asm/fpu/api.h>
      #include <crypto/b128ops.h>
      
      typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src);
      typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src);
      typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src,
                                             le128 *iv);
      typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src,
                                             le128 *iv);
      
      struct common_glue_func_entry {
              unsigned int num_blocks; /* number of blocks that @fn will process */
              union {
                      common_glue_func_t ecb;
                      common_glue_cbc_func_t cbc;
                      common_glue_ctr_func_t ctr;
                      common_glue_xts_func_t xts;
              } fn_u;
      };
      
      struct common_glue_ctx {
              unsigned int num_funcs;
              int fpu_blocks_limit; /* -1 means fpu not needed at all */
      
              /*
               * First funcs entry must have largest num_blocks and last funcs entry
               * must have num_blocks == 1!
               */
              struct common_glue_func_entry funcs[];
      };
      
   29 static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
                                        struct skcipher_walk *walk,
                                        bool fpu_enabled, unsigned int nbytes)
      {
              if (likely(fpu_blocks_limit < 0))
                      return false;
      
   29         if (fpu_enabled)
                      return true;
      
              /*
               * Vector-registers are only used when chunk to be processed is large
               * enough, so do not enable FPU until it is necessary.
               */
   38         if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
                      return false;
      
              /* prevent sleeping if FPU is in use */
   25         skcipher_walk_atomise(walk);
      
              kernel_fpu_begin();
              return true;
      }
      
      static inline void glue_fpu_end(bool fpu_enabled)
      {
   49         if (fpu_enabled)
   25                 kernel_fpu_end();
      }
      
      static inline void le128_to_be128(be128 *dst, const le128 *src)
      {
   11         dst->a = cpu_to_be64(le64_to_cpu(src->a));
              dst->b = cpu_to_be64(le64_to_cpu(src->b));
      }
      
      static inline void be128_to_le128(le128 *dst, const be128 *src)
      {
              dst->a = cpu_to_le64(be64_to_cpu(src->a));
              dst->b = cpu_to_le64(be64_to_cpu(src->b));
      }
      
      static inline void le128_inc(le128 *i)
      {
              u64 a = le64_to_cpu(i->a);
              u64 b = le64_to_cpu(i->b);
      
              b++;
              if (!b)
                      a++;
      
   11         i->a = cpu_to_le64(a);
              i->b = cpu_to_le64(b);
      }
      
      extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
                                     struct skcipher_request *req);
      
      extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
                                             struct skcipher_request *req);
      
      extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
                                             struct skcipher_request *req);
      
      extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
                                     struct skcipher_request *req);
      
      extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
                                     struct skcipher_request *req,
                                     common_glue_func_t tweak_fn, void *tweak_ctx,
                                     void *crypt_ctx, bool decrypt);
      
      extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst,
                                            const u8 *src, le128 *iv,
                                            common_glue_func_t fn);
      
      #endif /* _CRYPTO_GLUE_HELPER_H */
      /*
       * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
       * cleaned up code to current version of sparse and added the slicing-by-8
       * algorithm to the closely similar existing slicing-by-4 algorithm.
       *
       * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
       * Nicer crc32 functions/docs submitted by linux@horizon.com.  Thanks!
       * Code was from the public domain, copyright abandoned.  Code was
       * subsequently included in the kernel, thus was re-licensed under the
       * GNU GPL v2.
       *
       * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
       * Same crc32 function was used in 5 other places in the kernel.
       * I made one version, and deleted the others.
       * There are various incantations of crc32().  Some use a seed of 0 or ~0.
       * Some xor at the end with ~0.  The generic crc32() function takes
       * seed as an argument, and doesn't xor at the end.  Then individual
       * users can do whatever they need.
       *   drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
       *   fs/jffs2 uses seed 0, doesn't xor with ~0.
       *   fs/partitions/efi.c uses seed ~0, xor's with ~0.
       *
       * This source code is licensed under the GNU General Public License,
       * Version 2.  See the file COPYING for more details.
       */
      
      /* see: Documentation/crc32.txt for a description of algorithms */
      
      #include <linux/crc32.h>
      #include <linux/crc32poly.h>
      #include <linux/module.h>
      #include <linux/types.h>
      #include <linux/sched.h>
      #include "crc32defs.h"
      
      #if CRC_LE_BITS > 8
      # define tole(x) ((__force u32) cpu_to_le32(x))
      #else
      # define tole(x) (x)
      #endif
      
      #if CRC_BE_BITS > 8
      # define tobe(x) ((__force u32) cpu_to_be32(x))
      #else
      # define tobe(x) (x)
      #endif
      
      #include "crc32table.h"
      
      MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
      MODULE_DESCRIPTION("Various CRC32 calculations");
      MODULE_LICENSE("GPL");
      
      #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
      
      /* implements slicing-by-4 or slicing-by-8 algorithm */
      static inline u32 __pure
      crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
      {
      # ifdef __LITTLE_ENDIAN
      #  define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)
      #  define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
                         t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
      #  define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
                         t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
      # else
      #  define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
      #  define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
                         t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
      #  define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
                         t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
      # endif
              const u32 *b;
              size_t    rem_len;
      # ifdef CONFIG_X86
              size_t i;
      # endif
              const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
      # if CRC_LE_BITS != 32
              const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7];
      # endif
              u32 q;
      
              /* Align it */
   11         if (unlikely((long)buf & 3 && len)) {
                      do {
    3                         DO_CRC(*buf++);
    1                 } while ((--len) && ((long)buf)&3);
              }
      
      # if CRC_LE_BITS == 32
              rem_len = len & 3;
              len = len >> 2;
      # else
   10         rem_len = len & 7;
              len = len >> 3;
      # endif
      
              b = (const u32 *)buf;
      # ifdef CONFIG_X86
    2         --b;
              for (i = 0; i < len; i++) {
      # else
              for (--b; len; --len) {
      # endif
    6                 q = crc ^ *++b; /* use pre increment for speed */
      # if CRC_LE_BITS == 32
                      crc = DO_CRC4;
      # else
                      crc = DO_CRC8;
                      q = *++b;
                      crc ^= DO_CRC4;
      # endif
              }
              len = rem_len;
              /* And the last few bytes */
   11         if (len) {
    8                 u8 *p = (u8 *)(b + 1) - 1;
      # ifdef CONFIG_X86
                      for (i = 0; i < len; i++)
    8                         DO_CRC(*++p); /* use pre increment for speed */
      # else
                      do {
                              DO_CRC(*++p); /* use pre increment for speed */
                      } while (--len);
      # endif
              }
   11         return crc;
      #undef DO_CRC
      #undef DO_CRC4
      #undef DO_CRC8
      }
      #endif
      
      
      /**
       * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
       *                        CRC32/CRC32C
       * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for other
       *         uses, or the previous crc32/crc32c value if computing incrementally.
       * @p: pointer to buffer over which CRC32/CRC32C is run
       * @len: length of buffer @p
       * @tab: little-endian Ethernet table
       * @polynomial: CRC32/CRC32c LE polynomial
       */
      static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
                                                size_t len, const u32 (*tab)[256],
                                                u32 polynomial)
      {
      #if CRC_LE_BITS == 1
              int i;
              while (len--) {
                      crc ^= *p++;
                      for (i = 0; i < 8; i++)
                              crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
              }
      # elif CRC_LE_BITS == 2
              while (len--) {
                      crc ^= *p++;
                      crc = (crc >> 2) ^ tab[0][crc & 3];
                      crc = (crc >> 2) ^ tab[0][crc & 3];
                      crc = (crc >> 2) ^ tab[0][crc & 3];
                      crc = (crc >> 2) ^ tab[0][crc & 3];
              }
      # elif CRC_LE_BITS == 4
              while (len--) {
                      crc ^= *p++;
                      crc = (crc >> 4) ^ tab[0][crc & 15];
                      crc = (crc >> 4) ^ tab[0][crc & 15];
              }
      # elif CRC_LE_BITS == 8
              /* aka Sarwate algorithm */
              while (len--) {
                      crc ^= *p++;
                      crc = (crc >> 8) ^ tab[0][crc & 255];
              }
      # else
              crc = (__force u32) __cpu_to_le32(crc);
   11         crc = crc32_body(crc, p, len, tab);
              crc = __le32_to_cpu((__force __le32)crc);
      #endif
              return crc;
      }
      
      #if CRC_LE_BITS == 1
      u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
      {
              return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
      }
      u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
      {
              return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
      }
      #else
      u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
      {
    8         return crc32_le_generic(crc, p, len,
                              (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
      }
      u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
      {
    3         return crc32_le_generic(crc, p, len,
                              (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
      }
      #endif
      EXPORT_SYMBOL(crc32_le);
      EXPORT_SYMBOL(__crc32c_le);
      
      u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
      u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
      
      /*
       * This multiplies the polynomials x and y modulo the given modulus.
       * This follows the "little-endian" CRC convention that the lsbit
       * represents the highest power of x, and the msbit represents x^0.
       */
      static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
      {
    2         u32 product = x & 1 ? y : 0;
              int i;
      
    3         for (i = 0; i < 31; i++) {
    2                 product = (product >> 1) ^ (product & 1 ? modulus : 0);
    2                 x >>= 1;
    2                 product ^= x & 1 ? y : 0;
              }
      
              return product;
      }
      
      /**
       * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time
       * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
       * @len: The number of bytes. @crc is multiplied by x^(8*@len)
       * @polynomial: The modulus used to reduce the result to 32 bits.
       *
       * It's possible to parallelize CRC computations by computing a CRC
       * over separate ranges of a buffer, then summing them.
       * This shifts the given CRC by 8*len bits (i.e. produces the same effect
       * as appending len bytes of zero to the data), in time proportional
       * to log(len).
       */
      static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
                                                         u32 polynomial)
      {
              u32 power = polynomial;        /* CRC of x^32 */
              int i;
      
              /* Shift up to 32 bits in the simple linear way */
    3         for (i = 0; i < 8 * (int)(len & 3); i++)
    2                 crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
      
    3         len >>= 2;
              if (!len)
                      return crc;
      
              for (;;) {
                      /* "power" is x^(2^i), modulo the polynomial */
    2                 if (len & 1)
    2                         crc = gf2_multiply(crc, power, polynomial);
      
    2                 len >>= 1;
                      if (!len)
                              break;
      
                      /* Square power, advancing to x^(2^(i+1)) */
    3                 power = gf2_multiply(power, power, polynomial);
              }
      
              return crc;
      }
      
      u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
      {
              return crc32_generic_shift(crc, len, CRC32_POLY_LE);
      }
      
      u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
      {
    3         return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
      }
      EXPORT_SYMBOL(crc32_le_shift);
      EXPORT_SYMBOL(__crc32c_le_shift);
      
      /**
       * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
       * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for
       *        other uses, or the previous crc32 value if computing incrementally.
       * @p: pointer to buffer over which CRC32 is run
       * @len: length of buffer @p
       * @tab: big-endian Ethernet table
       * @polynomial: CRC32 BE polynomial
       */
      static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
                                                size_t len, const u32 (*tab)[256],
                                                u32 polynomial)
      {
      #if CRC_BE_BITS == 1
              int i;
              while (len--) {
                      crc ^= *p++ << 24;
                      for (i = 0; i < 8; i++)
                              crc =
                                  (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
                                                0);
              }
      # elif CRC_BE_BITS == 2
              while (len--) {
                      crc ^= *p++ << 24;
                      crc = (crc << 2) ^ tab[0][crc >> 30];
                      crc = (crc << 2) ^ tab[0][crc >> 30];
                      crc = (crc << 2) ^ tab[0][crc >> 30];
                      crc = (crc << 2) ^ tab[0][crc >> 30];
              }
      # elif CRC_BE_BITS == 4
              while (len--) {
                      crc ^= *p++ << 24;
                      crc = (crc << 4) ^ tab[0][crc >> 28];
                      crc = (crc << 4) ^ tab[0][crc >> 28];
              }
      # elif CRC_BE_BITS == 8
              while (len--) {
                      crc ^= *p++ << 24;
                      crc = (crc << 8) ^ tab[0][crc >> 24];
              }
      # else
              crc = (__force u32) __cpu_to_be32(crc);
              crc = crc32_body(crc, p, len, tab);
              crc = __be32_to_cpu((__force __be32)crc);
      # endif
              return crc;
      }
      
      #if CRC_LE_BITS == 1
      u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
      {
              return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
      }
      #else
      u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
      {
              return crc32_be_generic(crc, p, len,
                              (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
      }
      #endif
      EXPORT_SYMBOL(crc32_be);
      // SPDX-License-Identifier: GPL-2.0-only
      /*
       * AppArmor security module
       *
       * This file contains AppArmor task related definitions and mediation
       *
       * Copyright 2017 Canonical Ltd.
       *
       * TODO
       * If a task uses change_hat it currently does not return to the old
       * cred or task context but instead creates a new one.  Ideally the task
       * should return to the previous cred if it has not been modified.
       */
      
      #include "include/cred.h"
      #include "include/task.h"
      
      /**
       * aa_get_task_label - Get another task's label
       * @task: task to query  (NOT NULL)
       *
       * Returns: counted reference to @task's label
       */
      struct aa_label *aa_get_task_label(struct task_struct *task)
      {
              struct aa_label *p;
      
 1568         rcu_read_lock();
 1568         p = aa_get_newest_label(__aa_task_raw_label(task));
 1568         rcu_read_unlock();
      
              return p;
      }
      
      /**
       * aa_replace_current_label - replace the current tasks label
       * @label: new label  (NOT NULL)
       *
       * Returns: 0 or error on failure
       */
      int aa_replace_current_label(struct aa_label *label)
      {
              struct aa_label *old = aa_current_raw_label();
              struct aa_task_ctx *ctx = task_ctx(current);
              struct cred *new;
      
              AA_BUG(!label);
      
              if (old == label)
                      return 0;
      
              if (current_cred() != current_real_cred())
                      return -EBUSY;
      
              new  = prepare_creds();
              if (!new)
                      return -ENOMEM;
      
              if (ctx->nnp && label_is_stale(ctx->nnp)) {
                      struct aa_label *tmp = ctx->nnp;
      
                      ctx->nnp = aa_get_newest_label(tmp);
                      aa_put_label(tmp);
              }
              if (unconfined(label) || (labels_ns(old) != labels_ns(label)))
                      /*
                       * if switching to unconfined or a different label namespace
                       * clear out context state
                       */
                      aa_clear_task_ctx_trans(task_ctx(current));
      
              /*
               * be careful switching cred label, when racing replacement it
               * is possible that the cred labels's->proxy->label is the reference
               * keeping @label valid, so make sure to get its reference before
               * dropping the reference on the cred's label
               */
              aa_get_label(label);
              aa_put_label(cred_label(new));
              set_cred_label(new, label);
      
              commit_creds(new);
              return 0;
      }
      
      
      /**
       * aa_set_current_onexec - set the tasks change_profile to happen onexec
       * @label: system label to set at exec  (MAYBE NULL to clear value)
       * @stack: whether stacking should be done
       * Returns: 0 or error on failure
       */
      int aa_set_current_onexec(struct aa_label *label, bool stack)
      {
              struct aa_task_ctx *ctx = task_ctx(current);
      
              aa_get_label(label);
              aa_put_label(ctx->onexec);
              ctx->onexec = label;
              ctx->token = stack;
      
              return 0;
      }
      
      /**
       * aa_set_current_hat - set the current tasks hat
       * @label: label to set as the current hat  (NOT NULL)
       * @token: token value that must be specified to change from the hat
       *
       * Do switch of tasks hat.  If the task is currently in a hat
       * validate the token to match.
       *
       * Returns: 0 or error on failure
       */
      int aa_set_current_hat(struct aa_label *label, u64 token)
      {
              struct aa_task_ctx *ctx = task_ctx(current);
              struct cred *new;
      
              new = prepare_creds();
              if (!new)
                      return -ENOMEM;
              AA_BUG(!label);
      
              if (!ctx->previous) {
                      /* transfer refcount */
                      ctx->previous = cred_label(new);
                      ctx->token = token;
              } else if (ctx->token == token) {
                      aa_put_label(cred_label(new));
              } else {
                      /* previous_profile && ctx->token != token */
                      abort_creds(new);
                      return -EACCES;
              }
      
              set_cred_label(new, aa_get_newest_label(label));
              /* clear exec on switching context */
              aa_put_label(ctx->onexec);
              ctx->onexec = NULL;
      
              commit_creds(new);
              return 0;
      }
      
      /**
       * aa_restore_previous_label - exit from hat context restoring previous label
       * @token: the token that must be matched to exit hat context
       *
       * Attempt to return out of a hat to the previous label.  The token
       * must match the stored token value.
       *
       * Returns: 0 or error of failure
       */
      int aa_restore_previous_label(u64 token)
      {
              struct aa_task_ctx *ctx = task_ctx(current);
              struct cred *new;
      
              if (ctx->token != token)
                      return -EACCES;
              /* ignore restores when there is no saved label */
              if (!ctx->previous)
                      return 0;
      
              new = prepare_creds();
              if (!new)
                      return -ENOMEM;
      
              aa_put_label(cred_label(new));
              set_cred_label(new, aa_get_newest_label(ctx->previous));
              AA_BUG(!cred_label(new));
              /* clear exec && prev information when restoring to previous context */
              aa_clear_task_ctx_trans(ctx);
      
              commit_creds(new);
      
              return 0;
      }
      // SPDX-License-Identifier: GPL-2.0
      /* MPTCP socket monitoring support
       *
       * Copyright (c) 2019 Red Hat
       *
       * Author: Davide Caratti <dcaratti@redhat.com>
       */
      
      #include <linux/kernel.h>
      #include <linux/net.h>
      #include <linux/inet_diag.h>
      #include <net/netlink.h>
      #include <uapi/linux/mptcp.h>
      #include "protocol.h"
      
      static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
      {
              struct mptcp_subflow_context *sf;
              struct nlattr *start;
              u32 flags = 0;
              int err;
      
    1         start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
    1         if (!start)
                      return -EMSGSIZE;
      
    1         rcu_read_lock();
    1         sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
    1         if (!sf) {
                      err = 0;
                      goto nla_failure;
              }
      
    1         if (sf->mp_capable)
                      flags |= MPTCP_SUBFLOW_FLAG_MCAP_REM;
              if (sf->request_mptcp)
    1                 flags |= MPTCP_SUBFLOW_FLAG_MCAP_LOC;
    1         if (sf->mp_join)
                      flags |= MPTCP_SUBFLOW_FLAG_JOIN_REM;
    1         if (sf->request_join)
                      flags |= MPTCP_SUBFLOW_FLAG_JOIN_LOC;
    1         if (sf->backup)
                      flags |= MPTCP_SUBFLOW_FLAG_BKUP_REM;
    1         if (sf->request_bkup)
                      flags |= MPTCP_SUBFLOW_FLAG_BKUP_LOC;
    1         if (sf->fully_established)
                      flags |= MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED;
    1         if (sf->conn_finished)
    1                 flags |= MPTCP_SUBFLOW_FLAG_CONNECTED;
    1         if (sf->map_valid)
                      flags |= MPTCP_SUBFLOW_FLAG_MAPVALID;
      
    1         if (nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_REM, sf->remote_token) ||
    1             nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_LOC, sf->token) ||
    1             nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
                              sf->rel_write_seq) ||
    1             nla_put_u64_64bit(skb, MPTCP_SUBFLOW_ATTR_MAP_SEQ, sf->map_seq,
                                    MPTCP_SUBFLOW_ATTR_PAD) ||
    1             nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
                              sf->map_subflow_seq) ||
    1             nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_SSN_OFFSET, sf->ssn_offset) ||
                  nla_put_u16(skb, MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
    1                         sf->map_data_len) ||
    1             nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
    1             nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
    1             nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, sf->local_id)) {
                      err = -EMSGSIZE;
                      goto nla_failure;
              }
      
    1         rcu_read_unlock();
              nla_nest_end(skb, start);
              return 0;
      
      nla_failure:
              rcu_read_unlock();
    1         nla_nest_cancel(skb, start);
              return err;
      }
      
      static size_t subflow_get_info_size(const struct sock *sk)
      {
              size_t size = 0;
      
              size += nla_total_size(0) +        /* INET_ULP_INFO_MPTCP */
                      nla_total_size(4) +        /* MPTCP_SUBFLOW_ATTR_TOKEN_REM */
                      nla_total_size(4) +        /* MPTCP_SUBFLOW_ATTR_TOKEN_LOC */
                      nla_total_size(4) +        /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
                      nla_total_size_64bit(8) +        /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
                      nla_total_size(4) +        /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
                      nla_total_size(2) +        /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
                      nla_total_size(2) +        /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
                      nla_total_size(4) +        /* MPTCP_SUBFLOW_ATTR_FLAGS */
                      nla_total_size(1) +        /* MPTCP_SUBFLOW_ATTR_ID_REM */
                      nla_total_size(1) +        /* MPTCP_SUBFLOW_ATTR_ID_LOC */
                      0;
              return size;
      }
      
      void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops)
      {
              ops->get_info = subflow_get_info;
              ops->get_info_size = subflow_get_info_size;
      }
      // SPDX-License-Identifier: GPL-2.0
      #include <linux/bug.h>
      #include <linux/kernel.h>
      #include <asm/div64.h>
      #include <linux/reciprocal_div.h>
      #include <linux/export.h>
      
      /*
       * For a description of the algorithm please have a look at
       * include/linux/reciprocal_div.h
       */
      
      struct reciprocal_value reciprocal_value(u32 d)
      {
              struct reciprocal_value R;
              u64 m;
              int l;
      
   26         l = fls(d - 1);
              m = ((1ULL << 32) * ((1ULL << l) - d));
              do_div(m, d);
              ++m;
              R.m = (u32)m;
              R.sh1 = min(l, 1);
              R.sh2 = max(l - 1, 0);
      
              return R;
      }
      EXPORT_SYMBOL(reciprocal_value);
      
      struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
      {
              struct reciprocal_value_adv R;
              u32 l, post_shift;
              u64 mhigh, mlow;
      
              /* ceil(log2(d)) */
              l = fls(d - 1);
              /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
               * be handled before calling "reciprocal_value_adv", please see the
               * comment at include/linux/reciprocal_div.h.
               */
              WARN(l == 32,
                   "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
                   d, __func__);
              post_shift = l;
              mlow = 1ULL << (32 + l);
              do_div(mlow, d);
              mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
              do_div(mhigh, d);
      
              for (; post_shift > 0; post_shift--) {
                      u64 lo = mlow >> 1, hi = mhigh >> 1;
      
                      if (lo >= hi)
                              break;
      
                      mlow = lo;
                      mhigh = hi;
              }
      
              R.m = (u32)mhigh;
              R.sh = post_shift;
              R.exp = l;
              R.is_wide_m = mhigh > U32_MAX;
      
              return R;
      }
      EXPORT_SYMBOL(reciprocal_value_adv);
      // SPDX-License-Identifier: GPL-2.0-only
      /*
       *        linux/lib/crc-ccitt.c
       */
      
      #include <linux/types.h>
      #include <linux/module.h>
      #include <linux/crc-ccitt.h>
      
      /*
       * This mysterious table is just the CRC of each possible byte. It can be
       * computed using the standard bit-at-a-time methods. The polynomial can
       * be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12.
       * Add the implicit x^16, and you have the standard CRC-CCITT.
       */
      u16 const crc_ccitt_table[256] = {
              0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
              0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
              0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
              0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
              0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
              0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
              0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
              0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
              0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
              0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
              0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
              0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
              0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
              0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
              0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
              0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
              0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
              0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
              0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
              0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
              0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
              0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
              0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
              0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
              0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
              0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
              0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
              0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
              0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
              0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
              0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
              0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
      };
      EXPORT_SYMBOL(crc_ccitt_table);
      
      /*
       * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
       * Reflected bits order, does not augment final value.
       */
      u16 const crc_ccitt_false_table[256] = {
          0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
          0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
          0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
          0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
          0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
          0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
          0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
          0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
          0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
          0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948,