/* SPDX-License-Identifier: GPL-2.0 */
      #ifndef __ASM_GENERIC_GETORDER_H
      #define __ASM_GENERIC_GETORDER_H
      
      #ifndef __ASSEMBLY__
      
      #include <linux/compiler.h>
      #include <linux/log2.h>
      
      /**
       * get_order - Determine the allocation order of a memory size
       * @size: The size for which to get the order
       *
       * Determine the allocation order of a particular sized block of memory.  This
       * is on a logarithmic scale, where:
       *
       *        0 -> 2^0 * PAGE_SIZE and below
       *        1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
       *        2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
       *        3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
       *        4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
       *        ...
       *
       * The order returned is used to find the smallest allocation granule required
       * to hold an object of the specified size.
       *
       * The result is undefined if the size is 0.
       */
    4 static inline __attribute_const__ int get_order(unsigned long size)
      {
              if (__builtin_constant_p(size)) {
                      if (!size)
                              return BITS_PER_LONG - PAGE_SHIFT;
      
                      if (size < (1UL << PAGE_SHIFT))
                              return 0;
      
                      return ilog2((size) - 1) - PAGE_SHIFT + 1;
              }
      
    4         size--;
              size >>= PAGE_SHIFT;
      #if BITS_PER_LONG == 32
              return fls(size);
      #else
              return fls64(size);
      #endif
      }
      
      #endif        /* __ASSEMBLY__ */
      
      #endif        /* __ASM_GENERIC_GETORDER_H */
      /* SPDX-License-Identifier: GPL-2.0 */
      /*
       * linux/mii.h: definitions for MII-compatible transceivers
       * Originally drivers/net/sunhme.h.
       *
       * Copyright (C) 1996, 1999, 2001 David S. Miller (davem@redhat.com)
       */
      #ifndef __LINUX_MII_H__
      #define __LINUX_MII_H__
      
      
      #include <linux/if.h>
      #include <linux/linkmode.h>
      #include <uapi/linux/mii.h>
      
      struct ethtool_cmd;
      
      struct mii_if_info {
              int phy_id;
              int advertising;
              int phy_id_mask;
              int reg_num_mask;
      
              unsigned int full_duplex : 1;        /* is full duplex? */
              unsigned int force_media : 1;        /* is autoneg. disabled? */
              unsigned int supports_gmii : 1; /* are GMII registers supported? */
      
              struct net_device *dev;
              int (*mdio_read) (struct net_device *dev, int phy_id, int location);
              void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
      };
      
      extern int mii_link_ok (struct mii_if_info *mii);
      extern int mii_nway_restart (struct mii_if_info *mii);
      extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
      extern void mii_ethtool_get_link_ksettings(
              struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
      extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
      extern int mii_ethtool_set_link_ksettings(
              struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd);
      extern int mii_check_gmii_support(struct mii_if_info *mii);
      extern void mii_check_link (struct mii_if_info *mii);
      extern unsigned int mii_check_media (struct mii_if_info *mii,
                                           unsigned int ok_to_print,
                                           unsigned int init_media);
      extern int generic_mii_ioctl(struct mii_if_info *mii_if,
                                   struct mii_ioctl_data *mii_data, int cmd,
                                   unsigned int *duplex_changed);
      
      
      static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
      {
    2         return (struct mii_ioctl_data *) &rq->ifr_ifru;
      }
      
      /**
       * mii_nway_result
       * @negotiated: value of MII ANAR and'd with ANLPAR
       *
       * Given a set of MII abilities, check each bit and returns the
       * currently supported media, in the priority order defined by
       * IEEE 802.3u.  We use LPA_xxx constants but note this is not the
       * value of LPA solely, as described above.
       *
       * The one exception to IEEE 802.3u is that 100baseT4 is placed
       * between 100T-full and 100T-half.  If your phy does not support
       * 100T4 this is fine.  If your phy places 100T4 elsewhere in the
       * priority order, you will need to roll your own function.
       */
      static inline unsigned int mii_nway_result (unsigned int negotiated)
      {
              unsigned int ret;
      
              if (negotiated & LPA_100FULL)
                      ret = LPA_100FULL;
              else if (negotiated & LPA_100BASE4)
                      ret = LPA_100BASE4;
              else if (negotiated & LPA_100HALF)
                      ret = LPA_100HALF;
              else if (negotiated & LPA_10FULL)
                      ret = LPA_10FULL;
              else
                      ret = LPA_10HALF;
      
              return ret;
      }
      
      /**
       * mii_duplex
       * @duplex_lock: Non-zero if duplex is locked at full
       * @negotiated: value of MII ANAR and'd with ANLPAR
       *
       * A small helper function for a common case.  Returns one
       * if the media is operating or locked at full duplex, and
       * returns zero otherwise.
       */
      static inline unsigned int mii_duplex (unsigned int duplex_lock,
                                             unsigned int negotiated)
      {
              if (duplex_lock)
                      return 1;
              if (mii_nway_result(negotiated) & LPA_DUPLEX)
                      return 1;
              return 0;
      }
      
      /**
       * ethtool_adv_to_mii_adv_t
       * @ethadv: the ethtool advertisement settings
       *
       * A small helper function that translates ethtool advertisement
       * settings to phy autonegotiation advertisements for the
       * MII_ADVERTISE register.
       */
      static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
      {
              u32 result = 0;
      
              if (ethadv & ADVERTISED_10baseT_Half)
                      result |= ADVERTISE_10HALF;
              if (ethadv & ADVERTISED_10baseT_Full)
                      result |= ADVERTISE_10FULL;
              if (ethadv & ADVERTISED_100baseT_Half)
                      result |= ADVERTISE_100HALF;
              if (ethadv & ADVERTISED_100baseT_Full)
                      result |= ADVERTISE_100FULL;
              if (ethadv & ADVERTISED_Pause)
                      result |= ADVERTISE_PAUSE_CAP;
              if (ethadv & ADVERTISED_Asym_Pause)
                      result |= ADVERTISE_PAUSE_ASYM;
      
              return result;
      }
      
      /**
       * linkmode_adv_to_mii_adv_t
       * @advertising: the linkmode advertisement settings
       *
       * A small helper function that translates linkmode advertisement
       * settings to phy autonegotiation advertisements for the
       * MII_ADVERTISE register.
       */
      static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
      {
              u32 result = 0;
      
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
                      result |= ADVERTISE_10HALF;
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
                      result |= ADVERTISE_10FULL;
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
                      result |= ADVERTISE_100HALF;
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
                      result |= ADVERTISE_100FULL;
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
                      result |= ADVERTISE_PAUSE_CAP;
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
                      result |= ADVERTISE_PAUSE_ASYM;
      
              return result;
      }
      
      /**
       * mii_adv_to_ethtool_adv_t
       * @adv: value of the MII_ADVERTISE register
       *
       * A small helper function that translates MII_ADVERTISE bits
       * to ethtool advertisement settings.
       */
      static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
      {
              u32 result = 0;
      
              if (adv & ADVERTISE_10HALF)
                      result |= ADVERTISED_10baseT_Half;
              if (adv & ADVERTISE_10FULL)
                      result |= ADVERTISED_10baseT_Full;
              if (adv & ADVERTISE_100HALF)
                      result |= ADVERTISED_100baseT_Half;
              if (adv & ADVERTISE_100FULL)
                      result |= ADVERTISED_100baseT_Full;
              if (adv & ADVERTISE_PAUSE_CAP)
                      result |= ADVERTISED_Pause;
              if (adv & ADVERTISE_PAUSE_ASYM)
                      result |= ADVERTISED_Asym_Pause;
      
              return result;
      }
      
      /**
       * ethtool_adv_to_mii_ctrl1000_t
       * @ethadv: the ethtool advertisement settings
       *
       * A small helper function that translates ethtool advertisement
       * settings to phy autonegotiation advertisements for the
       * MII_CTRL1000 register when in 1000T mode.
       */
      static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
      {
              u32 result = 0;
      
              if (ethadv & ADVERTISED_1000baseT_Half)
                      result |= ADVERTISE_1000HALF;
              if (ethadv & ADVERTISED_1000baseT_Full)
                      result |= ADVERTISE_1000FULL;
      
              return result;
      }
      
      /**
       * linkmode_adv_to_mii_ctrl1000_t
       * @advertising: the linkmode advertisement settings
       *
       * A small helper function that translates linkmode advertisement
       * settings to phy autonegotiation advertisements for the
       * MII_CTRL1000 register when in 1000T mode.
       */
      static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
      {
              u32 result = 0;
      
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
                                    advertising))
                      result |= ADVERTISE_1000HALF;
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
                                    advertising))
                      result |= ADVERTISE_1000FULL;
      
              return result;
      }
      
      /**
       * mii_ctrl1000_to_ethtool_adv_t
       * @adv: value of the MII_CTRL1000 register
       *
       * A small helper function that translates MII_CTRL1000
       * bits, when in 1000Base-T mode, to ethtool
       * advertisement settings.
       */
      static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
      {
              u32 result = 0;
      
              if (adv & ADVERTISE_1000HALF)
                      result |= ADVERTISED_1000baseT_Half;
              if (adv & ADVERTISE_1000FULL)
                      result |= ADVERTISED_1000baseT_Full;
      
              return result;
      }
      
      /**
       * mii_lpa_to_ethtool_lpa_t
       * @adv: value of the MII_LPA register
       *
       * A small helper function that translates MII_LPA
       * bits, when in 1000Base-T mode, to ethtool
       * LP advertisement settings.
       */
      static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
      {
              u32 result = 0;
      
              if (lpa & LPA_LPACK)
                      result |= ADVERTISED_Autoneg;
      
              return result | mii_adv_to_ethtool_adv_t(lpa);
      }
      
      /**
       * mii_stat1000_to_ethtool_lpa_t
       * @adv: value of the MII_STAT1000 register
       *
       * A small helper function that translates MII_STAT1000
       * bits, when in 1000Base-T mode, to ethtool
       * advertisement settings.
       */
      static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
      {
              u32 result = 0;
      
              if (lpa & LPA_1000HALF)
                      result |= ADVERTISED_1000baseT_Half;
              if (lpa & LPA_1000FULL)
                      result |= ADVERTISED_1000baseT_Full;
      
              return result;
      }
      
      /**
       * mii_stat1000_mod_linkmode_lpa_t
       * @advertising: target the linkmode advertisement settings
       * @adv: value of the MII_STAT1000 register
       *
       * A small helper function that translates MII_STAT1000 bits, when in
       * 1000Base-T mode, to linkmode advertisement settings. Other bits in
       * advertising are not changes.
       */
      static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising,
                                                         u32 lpa)
      {
              linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
                               advertising, lpa & LPA_1000HALF);
      
              linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
                               advertising, lpa & LPA_1000FULL);
      }
      
      /**
       * ethtool_adv_to_mii_adv_x
       * @ethadv: the ethtool advertisement settings
       *
       * A small helper function that translates ethtool advertisement
       * settings to phy autonegotiation advertisements for the
       * MII_CTRL1000 register when in 1000Base-X mode.
       */
      static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
      {
              u32 result = 0;
      
              if (ethadv & ADVERTISED_1000baseT_Half)
                      result |= ADVERTISE_1000XHALF;
              if (ethadv & ADVERTISED_1000baseT_Full)
                      result |= ADVERTISE_1000XFULL;
              if (ethadv & ADVERTISED_Pause)
                      result |= ADVERTISE_1000XPAUSE;
              if (ethadv & ADVERTISED_Asym_Pause)
                      result |= ADVERTISE_1000XPSE_ASYM;
      
              return result;
      }
      
      /**
       * mii_adv_to_ethtool_adv_x
       * @adv: value of the MII_CTRL1000 register
       *
       * A small helper function that translates MII_CTRL1000
       * bits, when in 1000Base-X mode, to ethtool
       * advertisement settings.
       */
      static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
      {
              u32 result = 0;
      
              if (adv & ADVERTISE_1000XHALF)
                      result |= ADVERTISED_1000baseT_Half;
              if (adv & ADVERTISE_1000XFULL)
                      result |= ADVERTISED_1000baseT_Full;
              if (adv & ADVERTISE_1000XPAUSE)
                      result |= ADVERTISED_Pause;
              if (adv & ADVERTISE_1000XPSE_ASYM)
                      result |= ADVERTISED_Asym_Pause;
      
              return result;
      }
      
      /**
       * mii_lpa_to_ethtool_lpa_x
       * @adv: value of the MII_LPA register
       *
       * A small helper function that translates MII_LPA
       * bits, when in 1000Base-X mode, to ethtool
       * LP advertisement settings.
       */
      static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
      {
              u32 result = 0;
      
              if (lpa & LPA_LPACK)
                      result |= ADVERTISED_Autoneg;
      
              return result | mii_adv_to_ethtool_adv_x(lpa);
      }
      
      /**
       * mii_adv_mod_linkmode_adv_t
       * @advertising:pointer to destination link mode.
       * @adv: value of the MII_ADVERTISE register
       *
       * A small helper function that translates MII_ADVERTISE bits to
       * linkmode advertisement settings. Leaves other bits unchanged.
       */
      static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising,
                                                    u32 adv)
      {
              linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
                               advertising, adv & ADVERTISE_10HALF);
      
              linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
                               advertising, adv & ADVERTISE_10FULL);
      
              linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
                               advertising, adv & ADVERTISE_100HALF);
      
              linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
                               advertising, adv & ADVERTISE_100FULL);
      
              linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
                               adv & ADVERTISE_PAUSE_CAP);
      
              linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
                               advertising, adv & ADVERTISE_PAUSE_ASYM);
      }
      
      /**
       * mii_adv_to_linkmode_adv_t
       * @advertising:pointer to destination link mode.
       * @adv: value of the MII_ADVERTISE register
       *
       * A small helper function that translates MII_ADVERTISE bits
       * to linkmode advertisement settings. Clears the old value
       * of advertising.
       */
      static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
                                                   u32 adv)
      {
              linkmode_zero(advertising);
      
              mii_adv_mod_linkmode_adv_t(advertising, adv);
      }
      
      /**
       * mii_lpa_to_linkmode_lpa_t
       * @adv: value of the MII_LPA register
       *
       * A small helper function that translates MII_LPA bits, when in
       * 1000Base-T mode, to linkmode LP advertisement settings. Clears the
       * old value of advertising
       */
      static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising,
                                                   u32 lpa)
      {
              mii_adv_to_linkmode_adv_t(lp_advertising, lpa);
      
              if (lpa & LPA_LPACK)
                      linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
                                       lp_advertising);
      
      }
      
      /**
       * mii_lpa_mod_linkmode_lpa_t
       * @adv: value of the MII_LPA register
       *
       * A small helper function that translates MII_LPA bits, when in
       * 1000Base-T mode, to linkmode LP advertisement settings. Leaves
       * other bits unchanged.
       */
      static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
                                                    u32 lpa)
      {
              mii_adv_mod_linkmode_adv_t(lp_advertising, lpa);
      
              linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
                               lp_advertising, lpa & LPA_LPACK);
      }
      
      /**
       * linkmode_adv_to_lcl_adv_t
       * @advertising:pointer to linkmode advertising
       *
       * A small helper function that translates linkmode advertising to LVL
       * pause capabilities.
       */
      static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
      {
              u32 lcl_adv = 0;
      
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
                                    advertising))
                      lcl_adv |= ADVERTISE_PAUSE_CAP;
              if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
                                    advertising))
                      lcl_adv |= ADVERTISE_PAUSE_ASYM;
      
              return lcl_adv;
      }
      
      /**
       * mii_advertise_flowctrl - get flow control advertisement flags
       * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
       */
      static inline u16 mii_advertise_flowctrl(int cap)
      {
              u16 adv = 0;
      
              if (cap & FLOW_CTRL_RX)
                      adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
              if (cap & FLOW_CTRL_TX)
                      adv ^= ADVERTISE_PAUSE_ASYM;
      
              return adv;
      }
      
      /**
       * mii_resolve_flowctrl_fdx
       * @lcladv: value of MII ADVERTISE register
       * @rmtadv: value of MII LPA register
       *
       * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3
       */
      static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
      {
              u8 cap = 0;
      
              if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) {
                      cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
              } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) {
                      if (lcladv & ADVERTISE_PAUSE_CAP)
                              cap = FLOW_CTRL_RX;
                      else if (rmtadv & ADVERTISE_PAUSE_CAP)
                              cap = FLOW_CTRL_TX;
              }
      
              return cap;
      }
      
      #endif /* __LINUX_MII_H__ */
      // SPDX-License-Identifier: GPL-2.0-only
      /*
       *      crc16.c
       */
      
      #include <linux/types.h>
      #include <linux/module.h>
      #include <linux/crc16.h>
      
      /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
      u16 const crc16_table[256] = {
              0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
              0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
              0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
              0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
              0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
              0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
              0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
              0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
              0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
              0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
              0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
              0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
              0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
              0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
              0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
              0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
              0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
              0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
              0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
              0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
              0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
              0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
              0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
              0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
              0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
              0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
              0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
              0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
              0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
              0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
              0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
              0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
      };
      EXPORT_SYMBOL(crc16_table);
      
      /**
       * crc16 - compute the CRC-16 for the data buffer
       * @crc:        previous CRC value
       * @buffer:        data pointer
       * @len:        number of bytes in the buffer
       *
       * Returns the updated CRC value.
       */
      u16 crc16(u16 crc, u8 const *buffer, size_t len)
      {
 1160         while (len--)
 1160                 crc = crc16_byte(crc, *buffer++);
 1160         return crc;
      }
      EXPORT_SYMBOL(crc16);
      
      MODULE_DESCRIPTION("CRC16 calculations");
      MODULE_LICENSE("GPL");
      
      #include <linux/notifier.h>
      #include <linux/socket.h>
      #include <linux/kernel.h>
      #include <linux/export.h>
      #include <net/net_namespace.h>
      #include <net/fib_notifier.h>
      #include <net/netns/ipv6.h>
      #include <net/ip6_fib.h>
      
      int call_fib6_notifier(struct notifier_block *nb, struct net *net,
                             enum fib_event_type event_type,
                             struct fib_notifier_info *info)
      {
              info->family = AF_INET6;
              return call_fib_notifier(nb, net, event_type, info);
      }
      
      int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
                              struct fib_notifier_info *info)
      {
  279         info->family = AF_INET6;
              return call_fib_notifiers(net, event_type, info);
      }
      
      static unsigned int fib6_seq_read(struct net *net)
      {
              return fib6_tables_seq_read(net) + fib6_rules_seq_read(net);
      }
      
      static int fib6_dump(struct net *net, struct notifier_block *nb)
      {
              int err;
      
              err = fib6_rules_dump(net, nb);
              if (err)
                      return err;
      
              return fib6_tables_dump(net, nb);
      }
      
      static const struct fib_notifier_ops fib6_notifier_ops_template = {
              .family                = AF_INET6,
              .fib_seq_read        = fib6_seq_read,
              .fib_dump        = fib6_dump,
              .owner                = THIS_MODULE,
      };
      
      int __net_init fib6_notifier_init(struct net *net)
      {
              struct fib_notifier_ops *ops;
      
              ops = fib_notifier_ops_register(&fib6_notifier_ops_template, net);
              if (IS_ERR(ops))
                      return PTR_ERR(ops);
              net->ipv6.notifier_ops = ops;
      
              return 0;
      }
      
      void __net_exit fib6_notifier_exit(struct net *net)
      {
              fib_notifier_ops_unregister(net->ipv6.notifier_ops);
      }
      /* SPDX-License-Identifier: GPL-2.0 */
      /*
       *                INETPEER - A storage for permanent information about peers
       *
       *  Authors:        Andrey V. Savochkin <saw@msu.ru>
       */
      
      #ifndef _NET_INETPEER_H
      #define _NET_INETPEER_H
      
      #include <linux/types.h>
      #include <linux/init.h>
      #include <linux/jiffies.h>
      #include <linux/spinlock.h>
      #include <linux/rtnetlink.h>
      #include <net/ipv6.h>
      #include <linux/atomic.h>
      
      /* IPv4 address key for cache lookups */
      struct ipv4_addr_key {
              __be32        addr;
              int        vif;
      };
      
      #define INETPEER_MAXKEYSZ   (sizeof(struct in6_addr) / sizeof(u32))
      
      struct inetpeer_addr {
              union {
                      struct ipv4_addr_key        a4;
                      struct in6_addr                a6;
                      u32                        key[INETPEER_MAXKEYSZ];
              };
              __u16                                family;
      };
      
      struct inet_peer {
              struct rb_node                rb_node;
              struct inetpeer_addr        daddr;
      
              u32                        metrics[RTAX_MAX];
              u32                        rate_tokens;        /* rate limiting for ICMP */
              u32                        n_redirects;
              unsigned long                rate_last;
              /*
               * Once inet_peer is queued for deletion (refcnt == 0), following field
               * is not available: rid
               * We can share memory with rcu_head to help keep inet_peer small.
               */
              union {
                      struct {
                              atomic_t                        rid;                /* Frag reception counter */
                      };
                      struct rcu_head         rcu;
              };
      
              /* following fields might be frequently dirtied */
              __u32                        dtime;        /* the time of last use of not referenced entries */
              refcount_t                refcnt;
      };
      
      struct inet_peer_base {
              struct rb_root                rb_root;
              seqlock_t                lock;
              int                        total;
      };
      
      void inet_peer_base_init(struct inet_peer_base *);
      
      void inet_initpeers(void) __init;
      
      #define INETPEER_METRICS_NEW        (~(u32) 0)
      
      static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
      {
              iaddr->a4.addr = ip;
              iaddr->a4.vif = 0;
              iaddr->family = AF_INET;
      }
      
      static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
      {
              return iaddr->a4.addr;
      }
      
      static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
                                              struct in6_addr *in6)
      {
              iaddr->a6 = *in6;
              iaddr->family = AF_INET6;
      }
      
      static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
      {
              return &iaddr->a6;
      }
      
      /* can be called with or without local BH being disabled */
      struct inet_peer *inet_getpeer(struct inet_peer_base *base,
                                     const struct inetpeer_addr *daddr,
                                     int create);
      
      static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
                                                      __be32 v4daddr,
                                                      int vif, int create)
      {
              struct inetpeer_addr daddr;
      
              daddr.a4.addr = v4daddr;
              daddr.a4.vif = vif;
              daddr.family = AF_INET;
              return inet_getpeer(base, &daddr, create);
      }
      
      static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
                                                      const struct in6_addr *v6daddr,
                                                      int create)
      {
              struct inetpeer_addr daddr;
      
              daddr.a6 = *v6daddr;
              daddr.family = AF_INET6;
              return inet_getpeer(base, &daddr, create);
      }
      
      static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
                                          const struct inetpeer_addr *b)
      {
              int i, n;
      
   90         if (a->family == AF_INET)
                      n = sizeof(a->a4) / sizeof(u32);
              else
                      n = sizeof(a->a6) / sizeof(u32);
      
   90         for (i = 0; i < n; i++) {
   90                 if (a->key[i] == b->key[i])
                              continue;
   45                 if (a->key[i] < b->key[i])
                              return -1;
                      return 1;
              }
      
              return 0;
      }
      
      /* can be called from BH context or outside */
      void inet_putpeer(struct inet_peer *p);
      bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
      
      void inetpeer_invalidate_tree(struct inet_peer_base *);
      
      #endif /* _NET_INETPEER_H */
      /* SPDX-License-Identifier: GPL-2.0-or-later */
      /*
       * INET                An implementation of the TCP/IP protocol suite for the LINUX
       *                operating system.  INET is implemented using the  BSD Socket
       *                interface as the means of communication with the user level.
       *
       *                Global definitions for the ARP (RFC 826) protocol.
       *
       * Version:        @(#)if_arp.h        1.0.1        04/16/93
       *
       * Authors:        Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
       *                Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source.
       *                Ross Biro
       *                Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
       *                Florian La Roche,
       *                Jonathan Layes <layes@loran.com>
       *                Arnaldo Carvalho de Melo <acme@conectiva.com.br> ARPHRD_HWX25
       */
      #ifndef _LINUX_IF_ARP_H
      #define _LINUX_IF_ARP_H
      
      #include <linux/skbuff.h>
      #include <uapi/linux/if_arp.h>
      
      static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
      {
   36         return (struct arphdr *)skb_network_header(skb);
      }
      
      static inline unsigned int arp_hdr_len(const struct net_device *dev)
      {
              switch (dev->type) {
      #if IS_ENABLED(CONFIG_FIREWIRE_NET)
              case ARPHRD_IEEE1394:
                      /* ARP header, device address and 2 IP addresses */
                      return sizeof(struct arphdr) + dev->addr_len + sizeof(u32) * 2;
      #endif
              default:
                      /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
   45                 return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
              }
      }
      
      static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
      {
              switch (dev->type) {
              case ARPHRD_TUNNEL:
              case ARPHRD_TUNNEL6:
              case ARPHRD_SIT:
              case ARPHRD_IPGRE:
              case ARPHRD_VOID:
              case ARPHRD_NONE:
              case ARPHRD_RAWIP:
                      return false;
              default:
                      return true;
              }
      }
      
      #endif        /* _LINUX_IF_ARP_H */
      /*
       * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
       *
       * This software is available to you under a choice of one of two
       * licenses.  You may choose to be licensed under the terms of the GNU
       * General Public License (GPL) Version 2, available from the file
       * COPYING in the main directory of this source tree, or the
       * OpenIB.org BSD license below:
       *
       *     Redistribution and use in source and binary forms, with or
       *     without modification, are permitted provided that the following
       *     conditions are met:
       *
       *      - Redistributions of source code must retain the above
       *        copyright notice, this list of conditions and the following
       *        disclaimer.
       *
       *      - Redistributions in binary form must reproduce the above
       *        copyright notice, this list of conditions and the following
       *        disclaimer in the documentation and/or other materials
       *        provided with the distribution.
       *
       * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
       * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
       * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
       * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
       * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
       * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
       * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
       * SOFTWARE.
       *
       */
      #include <linux/kernel.h>
      #include <linux/moduleparam.h>
      #include <linux/gfp.h>
      #include <net/sock.h>
      #include <linux/in.h>
      #include <linux/list.h>
      #include <linux/ratelimit.h>
      #include <linux/export.h>
      #include <linux/sizes.h>
      
      #include "rds.h"
      
      /* When transmitting messages in rds_send_xmit, we need to emerge from
       * time to time and briefly release the CPU. Otherwise the softlock watchdog
       * will kick our shin.
       * Also, it seems fairer to not let one busy connection stall all the
       * others.
       *
       * send_batch_count is the number of times we'll loop in send_xmit. Setting
       * it to 0 will restore the old behavior (where we looped until we had
       * drained the queue).
       */
      static int send_batch_count = SZ_1K;
      module_param(send_batch_count, int, 0444);
      MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
      
      static void rds_send_remove_from_sock(struct list_head *messages, int status);
      
      /*
       * Reset the send state.  Callers must ensure that this doesn't race with
       * rds_send_xmit().
       */
      void rds_send_path_reset(struct rds_conn_path *cp)
      {
              struct rds_message *rm, *tmp;
              unsigned long flags;
      
              if (cp->cp_xmit_rm) {
                      rm = cp->cp_xmit_rm;
                      cp->cp_xmit_rm = NULL;
                      /* Tell the user the RDMA op is no longer mapped by the
                       * transport. This isn't entirely true (it's flushed out
                       * independently) but as the connection is down, there's
                       * no ongoing RDMA to/from that memory */
                      rds_message_unmapped(rm);
                      rds_message_put(rm);
              }
      
              cp->cp_xmit_sg = 0;
              cp->cp_xmit_hdr_off = 0;
              cp->cp_xmit_data_off = 0;
              cp->cp_xmit_atomic_sent = 0;
              cp->cp_xmit_rdma_sent = 0;
              cp->cp_xmit_data_sent = 0;
      
              cp->cp_conn->c_map_queued = 0;
      
              cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
              cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
      
              /* Mark messages as retransmissions, and move them to the send q */
              spin_lock_irqsave(&cp->cp_lock, flags);
              list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
                      set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
                      set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
              }
              list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
              spin_unlock_irqrestore(&cp->cp_lock, flags);
      }
      EXPORT_SYMBOL_GPL(rds_send_path_reset);
      
      static int acquire_in_xmit(struct rds_conn_path *cp)
      {
              return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
      }
      
      static void release_in_xmit(struct rds_conn_path *cp)
      {
              clear_bit(RDS_IN_XMIT, &cp->cp_flags);
              smp_mb__after_atomic();
              /*
               * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
               * hot path and finding waiters is very rare.  We don't want to walk
               * the system-wide hashed waitqueue buckets in the fast path only to
               * almost never find waiters.
               */
              if (waitqueue_active(&cp->cp_waitq))
                      wake_up_all(&cp->cp_waitq);
      }
      
      /*
       * We're making the conscious trade-off here to only send one message
       * down the connection at a time.
       *   Pro:
       *      - tx queueing is a simple fifo list
       *           - reassembly is optional and easily done by transports per conn
       *      - no per flow rx lookup at all, straight to the socket
       *           - less per-frag memory and wire overhead
       *   Con:
       *      - queued acks can be delayed behind large messages
       *   Depends:
       *      - small message latency is higher behind queued large messages
       *      - large message latency isn't starved by intervening small sends
       */
      int rds_send_xmit(struct rds_conn_path *cp)
      {
              struct rds_connection *conn = cp->cp_conn;
              struct rds_message *rm;
              unsigned long flags;
              unsigned int tmp;
              struct scatterlist *sg;
              int ret = 0;
              LIST_HEAD(to_be_dropped);
              int batch_count;
              unsigned long send_gen = 0;
              int same_rm = 0;
      
      restart:
              batch_count = 0;
      
              /*
               * sendmsg calls here after having queued its message on the send
               * queue.  We only have one task feeding the connection at a time.  If
               * another thread is already feeding the queue then we back off.  This
               * avoids blocking the caller and trading per-connection data between
               * caches per message.
               */
              if (!acquire_in_xmit(cp)) {
                      rds_stats_inc(s_send_lock_contention);
                      ret = -ENOMEM;
                      goto out;
              }
      
              if (rds_destroy_pending(cp->cp_conn)) {
                      release_in_xmit(cp);
                      ret = -ENETUNREACH; /* dont requeue send work */
                      goto out;
              }
      
              /*
               * we record the send generation after doing the xmit acquire.
               * if someone else manages to jump in and do some work, we'll use
               * this to avoid a goto restart farther down.
               *
               * The acquire_in_xmit() check above ensures that only one
               * caller can increment c_send_gen at any time.
               */
              send_gen = READ_ONCE(cp->cp_send_gen) + 1;
              WRITE_ONCE(cp->cp_send_gen, send_gen);
      
              /*
               * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
               * we do the opposite to avoid races.
               */
              if (!rds_conn_path_up(cp)) {
                      release_in_xmit(cp);
                      ret = 0;
                      goto out;
              }
      
              if (conn->c_trans->xmit_path_prepare)
                      conn->c_trans->xmit_path_prepare(cp);
      
              /*
               * spin trying to push headers and data down the connection until
               * the connection doesn't make forward progress.
               */
              while (1) {
      
                      rm = cp->cp_xmit_rm;
      
                      if (!rm) {
                              same_rm = 0;
                      } else {
                              same_rm++;
                              if (same_rm >= 4096) {
                                      rds_stats_inc(s_send_stuck_rm);
                                      ret = -EAGAIN;
                                      break;
                              }
                      }
      
                      /*
                       * If between sending messages, we can send a pending congestion
                       * map update.
                       */
                      if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
                              rm = rds_cong_update_alloc(conn);
                              if (IS_ERR(rm)) {
                                      ret = PTR_ERR(rm);
                                      break;
                              }
                              rm->data.op_active = 1;
                              rm->m_inc.i_conn_path = cp;
                              rm->m_inc.i_conn = cp->cp_conn;
      
                              cp->cp_xmit_rm = rm;
                      }
      
                      /*
                       * If not already working on one, grab the next message.
                       *
                       * cp_xmit_rm holds a ref while we're sending this message down
                       * the connction.  We can use this ref while holding the
                       * send_sem.. rds_send_reset() is serialized with it.
                       */
                      if (!rm) {
                              unsigned int len;
      
                              batch_count++;
      
                              /* we want to process as big a batch as we can, but
                               * we also want to avoid softlockups.  If we've been
                               * through a lot of messages, lets back off and see
                               * if anyone else jumps in
                               */
                              if (batch_count >= send_batch_count)
                                      goto over_batch;
      
                              spin_lock_irqsave(&cp->cp_lock, flags);
      
                              if (!list_empty(&cp->cp_send_queue)) {
                                      rm = list_entry(cp->cp_send_queue.next,
                                                      struct rds_message,
                                                      m_conn_item);
                                      rds_message_addref(rm);
      
                                      /*
                                       * Move the message from the send queue to the retransmit
                                       * list right away.
                                       */
                                      list_move_tail(&rm->m_conn_item,
                                                     &cp->cp_retrans);
                              }
      
                              spin_unlock_irqrestore(&cp->cp_lock, flags);
      
                              if (!rm)
                                      break;
      
                              /* Unfortunately, the way Infiniband deals with
                               * RDMA to a bad MR key is by moving the entire
                               * queue pair to error state. We cold possibly
                               * recover from that, but right now we drop the
                               * connection.
                               * Therefore, we never retransmit messages with RDMA ops.
                               */
                              if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
                                  (rm->rdma.op_active &&
                                  test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
                                      spin_lock_irqsave(&cp->cp_lock, flags);
                                      if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
                                              list_move(&rm->m_conn_item, &to_be_dropped);
                                      spin_unlock_irqrestore(&cp->cp_lock, flags);
                                      continue;
                              }
      
                              /* Require an ACK every once in a while */
                              len = ntohl(rm->m_inc.i_hdr.h_len);
                              if (cp->cp_unacked_packets == 0 ||
                                  cp->cp_unacked_bytes < len) {
                                      set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
      
                                      cp->cp_unacked_packets =
                                              rds_sysctl_max_unacked_packets;
                                      cp->cp_unacked_bytes =
                                              rds_sysctl_max_unacked_bytes;
                                      rds_stats_inc(s_send_ack_required);
                              } else {
                                      cp->cp_unacked_bytes -= len;
                                      cp->cp_unacked_packets--;
                              }
      
                              cp->cp_xmit_rm = rm;
                      }
      
                      /* The transport either sends the whole rdma or none of it */
                      if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
                              rm->m_final_op = &rm->rdma;
                              /* The transport owns the mapped memory for now.
                               * You can't unmap it while it's on the send queue
                               */
                              set_bit(RDS_MSG_MAPPED, &rm->m_flags);
                              ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
                              if (ret) {
                                      clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
                                      wake_up_interruptible(&rm->m_flush_wait);
                                      break;
                              }
                              cp->cp_xmit_rdma_sent = 1;
      
                      }
      
                      if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
                              rm->m_final_op = &rm->atomic;
                              /* The transport owns the mapped memory for now.
                               * You can't unmap it while it's on the send queue
                               */
                              set_bit(RDS_MSG_MAPPED, &rm->m_flags);
                              ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
                              if (ret) {
                                      clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
                                      wake_up_interruptible(&rm->m_flush_wait);
                                      break;
                              }
                              cp->cp_xmit_atomic_sent = 1;
      
                      }
      
                      /*
                       * A number of cases require an RDS header to be sent
                       * even if there is no data.
                       * We permit 0-byte sends; rds-ping depends on this.
                       * However, if there are exclusively attached silent ops,
                       * we skip the hdr/data send, to enable silent operation.
                       */
                      if (rm->data.op_nents == 0) {
                              int ops_present;
                              int all_ops_are_silent = 1;
      
                              ops_present = (rm->atomic.op_active || rm->rdma.op_active);
                              if (rm->atomic.op_active && !rm->atomic.op_silent)
                                      all_ops_are_silent = 0;
                              if (rm->rdma.op_active && !rm->rdma.op_silent)
                                      all_ops_are_silent = 0;
      
                              if (ops_present && all_ops_are_silent
                                  && !rm->m_rdma_cookie)
                                      rm->data.op_active = 0;
                      }
      
                      if (rm->data.op_active && !cp->cp_xmit_data_sent) {
                              rm->m_final_op = &rm->data;
      
                              ret = conn->c_trans->xmit(conn, rm,
                                                        cp->cp_xmit_hdr_off,
                                                        cp->cp_xmit_sg,
                                                        cp->cp_xmit_data_off);
                              if (ret <= 0)
                                      break;
      
                              if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
                                      tmp = min_t(int, ret,
                                                  sizeof(struct rds_header) -
                                                  cp->cp_xmit_hdr_off);
                                      cp->cp_xmit_hdr_off += tmp;
                                      ret -= tmp;
                              }
      
                              sg = &rm->data.op_sg[cp->cp_xmit_sg];
                              while (ret) {
                                      tmp = min_t(int, ret, sg->length -
                                                            cp->cp_xmit_data_off);
                                      cp->cp_xmit_data_off += tmp;
                                      ret -= tmp;
                                      if (cp->cp_xmit_data_off == sg->length) {
                                              cp->cp_xmit_data_off = 0;
                                              sg++;
                                              cp->cp_xmit_sg++;
                                              BUG_ON(ret != 0 && cp->cp_xmit_sg ==
                                                     rm->data.op_nents);
                                      }
                              }
      
                              if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
                                  (cp->cp_xmit_sg == rm->data.op_nents))
                                      cp->cp_xmit_data_sent = 1;
                      }
      
                      /*
                       * A rm will only take multiple times through this loop
                       * if there is a data op. Thus, if the data is sent (or there was
                       * none), then we're done with the rm.
                       */
                      if (!rm->data.op_active || cp->cp_xmit_data_sent) {
                              cp->cp_xmit_rm = NULL;
                              cp->cp_xmit_sg = 0;
                              cp->cp_xmit_hdr_off = 0;
                              cp->cp_xmit_data_off = 0;
                              cp->cp_xmit_rdma_sent = 0;
                              cp->cp_xmit_atomic_sent = 0;
                              cp->cp_xmit_data_sent = 0;
      
                              rds_message_put(rm);
                      }
              }
      
      over_batch:
              if (conn->c_trans->xmit_path_complete)
                      conn->c_trans->xmit_path_complete(cp);
              release_in_xmit(cp);
      
              /* Nuke any messages we decided not to retransmit. */
              if (!list_empty(&to_be_dropped)) {
                      /* irqs on here, so we can put(), unlike above */
                      list_for_each_entry(rm, &to_be_dropped, m_conn_item)
                              rds_message_put(rm);
                      rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
              }
      
              /*
               * Other senders can queue a message after we last test the send queue
               * but before we clear RDS_IN_XMIT.  In that case they'd back off and
               * not try and send their newly queued message.  We need to check the
               * send queue after having cleared RDS_IN_XMIT so that their message
               * doesn't get stuck on the send queue.
               *
               * If the transport cannot continue (i.e ret != 0), then it must
               * call us when more room is available, such as from the tx
               * completion handler.
               *
               * We have an extra generation check here so that if someone manages
               * to jump in after our release_in_xmit, we'll see that they have done
               * some work and we will skip our goto
               */
              if (ret == 0) {
                      bool raced;
      
                      smp_mb();
                      raced = send_gen != READ_ONCE(cp->cp_send_gen);
      
                      if ((test_bit(0, &conn->c_map_queued) ||
                          !list_empty(&cp->cp_send_queue)) && !raced) {
                              if (batch_count < send_batch_count)
                                      goto restart;
                              rcu_read_lock();
                              if (rds_destroy_pending(cp->cp_conn))
                                      ret = -ENETUNREACH;
                              else
                                      queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
                              rcu_read_unlock();
                      } else if (raced) {
                              rds_stats_inc(s_send_lock_queue_raced);
                      }
              }
      out:
              return ret;
      }
      EXPORT_SYMBOL_GPL(rds_send_xmit);
      
      static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
      {
              u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
      
              assert_spin_locked(&rs->rs_lock);
      
              BUG_ON(rs->rs_snd_bytes < len);
              rs->rs_snd_bytes -= len;
      
              if (rs->rs_snd_bytes == 0)
                      rds_stats_inc(s_send_queue_empty);
      }
      
      static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
                                          is_acked_func is_acked)
      {
              if (is_acked)
                      return is_acked(rm, ack);
              return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
      }
      
      /*
       * This is pretty similar to what happens below in the ACK
       * handling code - except that we call here as soon as we get
       * the IB send completion on the RDMA op and the accompanying
       * message.
       */
      void rds_rdma_send_complete(struct rds_message *rm, int status)
      {
              struct rds_sock *rs = NULL;
              struct rm_rdma_op *ro;
              struct rds_notifier *notifier;
              unsigned long flags;
      
              spin_lock_irqsave(&rm->m_rs_lock, flags);
      
              ro = &rm->rdma;
              if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
                  ro->op_active && ro->op_notify && ro->op_notifier) {
                      notifier = ro->op_notifier;
                      rs = rm->m_rs;
                      sock_hold(rds_rs_to_sk(rs));
      
                      notifier->n_status = status;
                      spin_lock(&rs->rs_lock);
                      list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
                      spin_unlock(&rs->rs_lock);
      
                      ro->op_notifier = NULL;
              }
      
              spin_unlock_irqrestore(&rm->m_rs_lock, flags);
      
              if (rs) {
                      rds_wake_sk_sleep(rs);
                      sock_put(rds_rs_to_sk(rs));
              }
      }
      EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
      
      /*
       * Just like above, except looks at atomic op
       */
      void rds_atomic_send_complete(struct rds_message *rm, int status)
      {
              struct rds_sock *rs = NULL;
              struct rm_atomic_op *ao;
              struct rds_notifier *notifier;
              unsigned long flags;
      
              spin_lock_irqsave(&rm->m_rs_lock, flags);
      
              ao = &rm->atomic;
              if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
                  && ao->op_active && ao->op_notify && ao->op_notifier) {
                      notifier = ao->op_notifier;
                      rs = rm->m_rs;
                      sock_hold(rds_rs_to_sk(rs));
      
                      notifier->n_status = status;
                      spin_lock(&rs->rs_lock);
                      list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
                      spin_unlock(&rs->rs_lock);
      
                      ao->op_notifier = NULL;
              }
      
              spin_unlock_irqrestore(&rm->m_rs_lock, flags);
      
              if (rs) {
                      rds_wake_sk_sleep(rs);
                      sock_put(rds_rs_to_sk(rs));
              }
      }
      EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
      
      /*
       * This is the same as rds_rdma_send_complete except we
       * don't do any locking - we have all the ingredients (message,
       * socket, socket lock) and can just move the notifier.
       */
      static inline void
      __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
      {
              struct rm_rdma_op *ro;
              struct rm_atomic_op *ao;
      
              ro = &rm->rdma;
              if (ro->op_active && ro->op_notify && ro->op_notifier) {
                      ro->op_notifier->n_status = status;
                      list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
                      ro->op_notifier = NULL;
              }
      
              ao = &rm->atomic;
              if (ao->op_active && ao->op_notify && ao->op_notifier) {
                      ao->op_notifier->n_status = status;
                      list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
                      ao->op_notifier = NULL;
              }
      
              /* No need to wake the app - caller does this */
      }
      
      /*
       * This removes messages from the socket's list if they're on it.  The list
       * argument must be private to the caller, we must be able to modify it
       * without locks.  The messages must have a reference held for their
       * position on the list.  This function will drop that reference after
       * removing the messages from the 'messages' list regardless of if it found
       * the messages on the socket list or not.
       */
      static void rds_send_remove_from_sock(struct list_head *messages, int status)
      {
              unsigned long flags;
              struct rds_sock *rs = NULL;
              struct rds_message *rm;
      
              while (!list_empty(messages)) {
                      int was_on_sock = 0;
      
                      rm = list_entry(messages->next, struct rds_message,
                                      m_conn_item);
                      list_del_init(&rm->m_conn_item);
      
                      /*
                       * If we see this flag cleared then we're *sure* that someone
                       * else beat us to removing it from the sock.  If we race
                       * with their flag update we'll get the lock and then really
                       * see that the flag has been cleared.
                       *
                       * The message spinlock makes sure nobody clears rm->m_rs
                       * while we're messing with it. It does not prevent the
                       * message from being removed from the socket, though.
                       */
                      spin_lock_irqsave(&rm->m_rs_lock, flags);
                      if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
                              goto unlock_and_drop;
      
                      if (rs != rm->m_rs) {
                              if (rs) {
                                      rds_wake_sk_sleep(rs);
                                      sock_put(rds_rs_to_sk(rs));
                              }
                              rs = rm->m_rs;
                              if (rs)
                                      sock_hold(rds_rs_to_sk(rs));
                      }
                      if (!rs)
                              goto unlock_and_drop;
                      spin_lock(&rs->rs_lock);
      
                      if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
                              struct rm_rdma_op *ro = &rm->rdma;
                              struct rds_notifier *notifier;
      
                              list_del_init(&rm->m_sock_item);
                              rds_send_sndbuf_remove(rs, rm);
      
                              if (ro->op_active && ro->op_notifier &&
                                     (ro->op_notify || (ro->op_recverr && status))) {
                                      notifier = ro->op_notifier;
                                      list_add_tail(&notifier->n_list,
                                                      &rs->rs_notify_queue);
                                      if (!notifier->n_status)
                                              notifier->n_status = status;
                                      rm->rdma.op_notifier = NULL;
                              }
                              was_on_sock = 1;
                      }
                      spin_unlock(&rs->rs_lock);
      
      unlock_and_drop:
                      spin_unlock_irqrestore(&rm->m_rs_lock, flags);
                      rds_message_put(rm);
                      if (was_on_sock)
                              rds_message_put(rm);
              }
      
              if (rs) {
                      rds_wake_sk_sleep(rs);
                      sock_put(rds_rs_to_sk(rs));
              }
      }
      
      /*
       * Transports call here when they've determined that the receiver queued
       * messages up to, and including, the given sequence number.  Messages are
       * moved to the retrans queue when rds_send_xmit picks them off the send
       * queue. This means that in the TCP case, the message may not have been
       * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
       * checks the RDS_MSG_HAS_ACK_SEQ bit.
       */
      void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
                                    is_acked_func is_acked)
      {
              struct rds_message *rm, *tmp;
              unsigned long flags;
              LIST_HEAD(list);
      
              spin_lock_irqsave(&cp->cp_lock, flags);
      
              list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
                      if (!rds_send_is_acked(rm, ack, is_acked))
                              break;
      
                      list_move(&rm->m_conn_item, &list);
                      clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
              }
      
              /* order flag updates with spin locks */
              if (!list_empty(&list))
                      smp_mb__after_atomic();
      
              spin_unlock_irqrestore(&cp->cp_lock, flags);
      
              /* now remove the messages from the sock list as needed */
              rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
      }
      EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
      
      void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
                               is_acked_func is_acked)
      {
              WARN_ON(conn->c_trans->t_mp_capable);
              rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
      }
      EXPORT_SYMBOL_GPL(rds_send_drop_acked);
      
      void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
      {
              struct rds_message *rm, *tmp;
              struct rds_connection *conn;
              struct rds_conn_path *cp;
              unsigned long flags;
    5         LIST_HEAD(list);
      
              /* get all the messages we're dropping under the rs lock */
              spin_lock_irqsave(&rs->rs_lock, flags);
      
              list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
                      if (dest &&
                          (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
                           dest->sin6_port != rm->m_inc.i_hdr.h_dport))
                              continue;
      
                      list_move(&rm->m_sock_item, &list);
                      rds_send_sndbuf_remove(rs, rm);
                      clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
              }
      
              /* order flag updates with the rs lock */
              smp_mb__after_atomic();
      
    5         spin_unlock_irqrestore(&rs->rs_lock, flags);
      
              if (list_empty(&list))
    5                 return;
      
              /* Remove the messages from the conn */
              list_for_each_entry(rm, &list, m_sock_item) {
      
                      conn = rm->m_inc.i_conn;
                      if (conn->c_trans->t_mp_capable)
                              cp = rm->m_inc.i_conn_path;
                      else
                              cp = &conn->c_path[0];
      
                      spin_lock_irqsave(&cp->cp_lock, flags);
                      /*
                       * Maybe someone else beat us to removing rm from the conn.
                       * If we race with their flag update we'll get the lock and
                       * then really see that the flag has been cleared.
                       */
                      if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
                              spin_unlock_irqrestore(&cp->cp_lock, flags);
                              continue;
                      }
                      list_del_init(&rm->m_conn_item);
                      spin_unlock_irqrestore(&cp->cp_lock, flags);
      
                      /*
                       * Couldn't grab m_rs_lock in top loop (lock ordering),
                       * but we can now.
                       */
                      spin_lock_irqsave(&rm->m_rs_lock, flags);
      
                      spin_lock(&rs->rs_lock);
                      __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
                      spin_unlock(&rs->rs_lock);
      
                      spin_unlock_irqrestore(&rm->m_rs_lock, flags);
      
                      rds_message_put(rm);
              }
      
              rds_wake_sk_sleep(rs);
      
              while (!list_empty(&list)) {
                      rm = list_entry(list.next, struct rds_message, m_sock_item);
                      list_del_init(&rm->m_sock_item);
                      rds_message_wait(rm);
      
                      /* just in case the code above skipped this message
                       * because RDS_MSG_ON_CONN wasn't set, run it again here
                       * taking m_rs_lock is the only thing that keeps us
                       * from racing with ack processing.
                       */
                      spin_lock_irqsave(&rm->m_rs_lock, flags);
      
                      spin_lock(&rs->rs_lock);
                      __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
                      spin_unlock(&rs->rs_lock);
      
                      spin_unlock_irqrestore(&rm->m_rs_lock, flags);
      
                      rds_message_put(rm);
              }
      }
      
      /*
       * we only want this to fire once so we use the callers 'queued'.  It's
       * possible that another thread can race with us and remove the
       * message from the flow with RDS_CANCEL_SENT_TO.
       */
      static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
                                   struct rds_conn_path *cp,
                                   struct rds_message *rm, __be16 sport,
                                   __be16 dport, int *queued)
      {
              unsigned long flags;
              u32 len;
      
              if (*queued)
                      goto out;
      
              len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
      
              /* this is the only place which holds both the socket's rs_lock
               * and the connection's c_lock */
              spin_lock_irqsave(&rs->rs_lock, flags);
      
              /*
               * If there is a little space in sndbuf, we don't queue anything,
               * and userspace gets -EAGAIN. But poll() indicates there's send
               * room. This can lead to bad behavior (spinning) if snd_bytes isn't
               * freed up by incoming acks. So we check the *old* value of
               * rs_snd_bytes here to allow the last msg to exceed the buffer,
               * and poll() now knows no more data can be sent.
               */
              if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
                      rs->rs_snd_bytes += len;
      
                      /* let recv side know we are close to send space exhaustion.
                       * This is probably not the optimal way to do it, as this
                       * means we set the flag on *all* messages as soon as our
                       * throughput hits a certain threshold.
                       */
                      if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
                              set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
      
                      list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
                      set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
                      rds_message_addref(rm);
                      sock_hold(rds_rs_to_sk(rs));
                      rm->m_rs = rs;
      
                      /* The code ordering is a little weird, but we're
                         trying to minimize the time we hold c_lock */
                      rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
                      rm->m_inc.i_conn = conn;
                      rm->m_inc.i_conn_path = cp;
                      rds_message_addref(rm);
      
                      spin_lock(&cp->cp_lock);
                      rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
                      list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
                      set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
                      spin_unlock(&cp->cp_lock);
      
                      rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
                               rm, len, rs, rs->rs_snd_bytes,
                               (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
      
                      *queued = 1;
              }
      
              spin_unlock_irqrestore(&rs->rs_lock, flags);
      out:
              return *queued;
      }
      
      /*
       * rds_message is getting to be quite complicated, and we'd like to allocate
       * it all in one go. This figures out how big it needs to be up front.
       */
      static int rds_rm_size(struct msghdr *msg, int num_sgs,
                             struct rds_iov_vector_arr *vct)
      {
              struct cmsghdr *cmsg;
              int size = 0;
              int cmsg_groups = 0;
              int retval;
              bool zcopy_cookie = false;
              struct rds_iov_vector *iov, *tmp_iov;
      
              if (num_sgs < 0)
                      return -EINVAL;
      
              for_each_cmsghdr(cmsg, msg) {
                      if (!CMSG_OK(msg, cmsg))
                              return -EINVAL;
      
                      if (cmsg->cmsg_level != SOL_RDS)
                              continue;
      
                      switch (cmsg->cmsg_type) {
                      case RDS_CMSG_RDMA_ARGS:
                              if (vct->indx >= vct->len) {
                                      vct->len += vct->incr;
                                      tmp_iov =
                                              krealloc(vct->vec,
                                                       vct->len *
                                                       sizeof(struct rds_iov_vector),
                                                       GFP_KERNEL);
                                      if (!tmp_iov) {
                                              vct->len -= vct->incr;
                                              return -ENOMEM;
                                      }
                                      vct->vec = tmp_iov;
                              }
                              iov = &vct->vec[vct->indx];
                              memset(iov, 0, sizeof(struct rds_iov_vector));
                              vct->indx++;
                              cmsg_groups |= 1;
                              retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
                              if (retval < 0)
                                      return retval;
                              size += retval;
      
                              break;
      
                      case RDS_CMSG_ZCOPY_COOKIE:
                              zcopy_cookie = true;
                              /* fall through */
      
                      case RDS_CMSG_RDMA_DEST:
                      case RDS_CMSG_RDMA_MAP:
                              cmsg_groups |= 2;
                              /* these are valid but do no add any size */
                              break;
      
                      case RDS_CMSG_ATOMIC_CSWP:
                      case RDS_CMSG_ATOMIC_FADD:
                      case RDS_CMSG_MASKED_ATOMIC_CSWP:
                      case RDS_CMSG_MASKED_ATOMIC_FADD:
                              cmsg_groups |= 1;
                              size += sizeof(struct scatterlist);
                              break;
      
                      default:
                              return -EINVAL;
                      }
      
              }
      
              if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
                      return -EINVAL;
      
              size += num_sgs * sizeof(struct scatterlist);
      
              /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
              if (cmsg_groups == 3)
                      return -EINVAL;
      
              return size;
      }
      
      static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
                                struct cmsghdr *cmsg)
      {
              u32 *cookie;
      
              if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
                  !rm->data.op_mmp_znotifier)
                      return -EINVAL;
              cookie = CMSG_DATA(cmsg);
              rm->data.op_mmp_znotifier->z_cookie = *cookie;
              return 0;
      }
      
      static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
                               struct msghdr *msg, int *allocated_mr,
                               struct rds_iov_vector_arr *vct)
      {
              struct cmsghdr *cmsg;
              int ret = 0, ind = 0;
      
              for_each_cmsghdr(cmsg, msg) {
                      if (!CMSG_OK(msg, cmsg))
                              return -EINVAL;
      
                      if (cmsg->cmsg_level != SOL_RDS)
                              continue;
      
                      /* As a side effect, RDMA_DEST and RDMA_MAP will set
                       * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
                       */
                      switch (cmsg->cmsg_type) {
                      case RDS_CMSG_RDMA_ARGS:
                              if (ind >= vct->indx)
                                      return -ENOMEM;
                              ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
                              ind++;
                              break;
      
                      case RDS_CMSG_RDMA_DEST:
                              ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
                              break;
      
                      case RDS_CMSG_RDMA_MAP:
                              ret = rds_cmsg_rdma_map(rs, rm, cmsg);
                              if (!ret)
                                      *allocated_mr = 1;
                              else if (ret == -ENODEV)
                                      /* Accommodate the get_mr() case which can fail
                                       * if connection isn't established yet.
                                       */
                                      ret = -EAGAIN;
                              break;
                      case RDS_CMSG_ATOMIC_CSWP:
                      case RDS_CMSG_ATOMIC_FADD:
                      case RDS_CMSG_MASKED_ATOMIC_CSWP:
                      case RDS_CMSG_MASKED_ATOMIC_FADD:
                              ret = rds_cmsg_atomic(rs, rm, cmsg);
                              break;
      
                      case RDS_CMSG_ZCOPY_COOKIE:
                              ret = rds_cmsg_zcopy(rs, rm, cmsg);
                              break;
      
                      default:
                              return -EINVAL;
                      }
      
                      if (ret)
                              break;
              }
      
              return ret;
      }
      
      static int rds_send_mprds_hash(struct rds_sock *rs,
                                     struct rds_connection *conn, int nonblock)
      {
              int hash;
      
              if (conn->c_npaths == 0)
                      hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
              else
                      hash = RDS_MPATH_HASH(rs, conn->c_npaths);
              if (conn->c_npaths == 0 && hash != 0) {
                      rds_send_ping(conn, 0);
      
                      /* The underlying connection is not up yet.  Need to wait
                       * until it is up to be sure that the non-zero c_path can be
                       * used.  But if we are interrupted, we have to use the zero
                       * c_path in case the connection ends up being non-MP capable.
                       */
                      if (conn->c_npaths == 0) {
                              /* Cannot wait for the connection be made, so just use
                               * the base c_path.
                               */
                              if (nonblock)
                                      return 0;
                              if (wait_event_interruptible(conn->c_hs_waitq,
                                                           conn->c_npaths != 0))
                                      hash = 0;
                      }
                      if (conn->c_npaths == 1)
                              hash = 0;
              }
              return hash;
      }
      
      static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
      {
              struct rds_rdma_args *args;
              struct cmsghdr *cmsg;
      
              for_each_cmsghdr(cmsg, msg) {
                      if (!CMSG_OK(msg, cmsg))
                              return -EINVAL;
      
                      if (cmsg->cmsg_level != SOL_RDS)
                              continue;
      
                      if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
                              if (cmsg->cmsg_len <
                                  CMSG_LEN(sizeof(struct rds_rdma_args)))
                                      return -EINVAL;
                              args = CMSG_DATA(cmsg);
                              *rdma_bytes += args->remote_vec.bytes;
                      }
              }
              return 0;
      }
      
      int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
      {
              struct sock *sk = sock->sk;
              struct rds_sock *rs = rds_sk_to_rs(sk);
              DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
              DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
              __be16 dport;
              struct rds_message *rm = NULL;
              struct rds_connection *conn;
              int ret = 0;
              int queued = 0, allocated_mr = 0;
              int nonblock = msg->msg_flags & MSG_DONTWAIT;
              long timeo = sock_sndtimeo(sk, nonblock);
              struct rds_conn_path *cpath;
              struct in6_addr daddr;
              __u32 scope_id = 0;
              size_t total_payload_len = payload_len, rdma_payload_len = 0;
              bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
                            sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
              int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
              int namelen;
              struct rds_iov_vector_arr vct;
              int ind;
      
              memset(&vct, 0, sizeof(vct));
      
              /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
              vct.incr = 1;
      
              /* Mirror Linux UDP mirror of BSD error message compatibility */
              /* XXX: Perhaps MSG_MORE someday */
              if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
                      ret = -EOPNOTSUPP;
                      goto out;
              }
      
              namelen = msg->msg_namelen;
              if (namelen != 0) {
                      if (namelen < sizeof(*usin)) {
                              ret = -EINVAL;
                              goto out;
                      }
                      switch (usin->sin_family) {
                      case AF_INET:
                              if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
                                  usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
                                  ipv4_is_multicast(usin->sin_addr.s_addr)) {
                                      ret = -EINVAL;
                                      goto out;
                              }
                              ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr);
                              dport = usin->sin_port;
                              break;
      
      #if IS_ENABLED(CONFIG_IPV6)
                      case AF_INET6: {
                              int addr_type;
      
                              if (namelen < sizeof(*sin6)) {
                                      ret = -EINVAL;
                                      goto out;
                              }
                              addr_type = ipv6_addr_type(&sin6->sin6_addr);
                              if (!(addr_type & IPV6_ADDR_UNICAST)) {
                                      __be32 addr4;
      
                                      if (!(addr_type & IPV6_ADDR_MAPPED)) {
                                              ret = -EINVAL;
                                              goto out;
                                      }
      
                                      /* It is a mapped address.  Need to do some
                                       * sanity checks.
                                       */
                                      addr4 = sin6->sin6_addr.s6_addr32[3];
                                      if (addr4 == htonl(INADDR_ANY) ||
                                          addr4 == htonl(INADDR_BROADCAST) ||
                                          ipv4_is_multicast(addr4)) {
                                              ret = -EINVAL;
                                              goto out;
                                      }
                              }
                              if (addr_type & IPV6_ADDR_LINKLOCAL) {
                                      if (sin6->sin6_scope_id == 0) {
                                              ret = -EINVAL;
                                              goto out;
                                      }
                                      scope_id = sin6->sin6_scope_id;
                              }
      
                              daddr = sin6->sin6_addr;
                              dport = sin6->sin6_port;
                              break;
                      }
      #endif
      
                      default:
                              ret = -EINVAL;
                              goto out;
                      }
              } else {
                      /* We only care about consistency with ->connect() */
                      lock_sock(sk);
                      daddr = rs->rs_conn_addr;
                      dport = rs->rs_conn_port;
                      scope_id = rs->rs_bound_scope_id;
                      release_sock(sk);
              }
      
              lock_sock(sk);
              if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
                      release_sock(sk);
                      ret = -ENOTCONN;
                      goto out;
              } else if (namelen != 0) {
                      /* Cannot send to an IPv4 address using an IPv6 source
                       * address and cannot send to an IPv6 address using an
                       * IPv4 source address.
                       */
                      if (ipv6_addr_v4mapped(&daddr) ^
                          ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
                              release_sock(sk);
                              ret = -EOPNOTSUPP;
                              goto out;
                      }
                      /* If the socket is already bound to a link local address,
                       * it can only send to peers on the same link.  But allow
                       * communicating beween link local and non-link local address.
                       */
                      if (scope_id != rs->rs_bound_scope_id) {
                              if (!scope_id) {
                                      scope_id = rs->rs_bound_scope_id;
                              } else if (rs->rs_bound_scope_id) {
                                      release_sock(sk);
                                      ret = -EINVAL;
                                      goto out;
                              }
                      }
              }
              release_sock(sk);
      
              ret = rds_rdma_bytes(msg, &rdma_payload_len);
              if (ret)
                      goto out;
      
              total_payload_len += rdma_payload_len;
              if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
                      ret = -EMSGSIZE;
                      goto out;
              }
      
              if (payload_len > rds_sk_sndbuf(rs)) {
                      ret = -EMSGSIZE;
                      goto out;
              }
      
              if (zcopy) {
                      if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
                              ret = -EOPNOTSUPP;
                              goto out;
                      }
                      num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
              }
              /* size of rm including all sgs */
              ret = rds_rm_size(msg, num_sgs, &vct);
              if (ret < 0)
                      goto out;
      
              rm = rds_message_alloc(ret, GFP_KERNEL);
              if (!rm) {
                      ret = -ENOMEM;
                      goto out;
              }
      
              /* Attach data to the rm */
              if (payload_len) {
                      rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
                      if (!rm->data.op_sg)
                              goto out;
                      ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
                      if (ret)
                              goto out;
              }
              rm->data.op_active = 1;
      
              rm->m_daddr = daddr;
      
              /* rds_conn_create has a spinlock that runs with IRQ off.
               * Caching the conn in the socket helps a lot. */
              if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
                  rs->rs_tos == rs->rs_conn->c_tos) {
                      conn = rs->rs_conn;
              } else {
                      conn = rds_conn_create_outgoing(sock_net(sock->sk),
                                                      &rs->rs_bound_addr, &daddr,
                                                      rs->rs_transport, rs->rs_tos,
                                                      sock->sk->sk_allocation,
                                                      scope_id);
                      if (IS_ERR(conn)) {
                              ret = PTR_ERR(conn);
                              goto out;
                      }
                      rs->rs_conn = conn;
              }
      
              if (conn->c_trans->t_mp_capable)
                      cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
              else
                      cpath = &conn->c_path[0];
      
              rm->m_conn_path = cpath;
      
              /* Parse any control messages the user may have included. */
              ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
              if (ret) {
                      /* Trigger connection so that its ready for the next retry */
                      if (ret ==  -EAGAIN)
                              rds_conn_connect_if_down(conn);
                      goto out;
              }
      
              if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
                      printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
                                     &rm->rdma, conn->c_trans->xmit_rdma);
                      ret = -EOPNOTSUPP;
                      goto out;
              }
      
              if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
                      printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
                                     &rm->atomic, conn->c_trans->xmit_atomic);
                      ret = -EOPNOTSUPP;
                      goto out;
              }
      
              if (rds_destroy_pending(conn)) {
                      ret = -EAGAIN;
                      goto out;
              }
      
              rds_conn_path_connect_if_down(cpath);
      
              ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
              if (ret) {
                      rs->rs_seen_congestion = 1;
                      goto out;
              }
              while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
                                        dport, &queued)) {
                      rds_stats_inc(s_send_queue_full);
      
                      if (nonblock) {
                              ret = -EAGAIN;
                              goto out;
                      }
      
                      timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
                                              rds_send_queue_rm(rs, conn, cpath, rm,
                                                                rs->rs_bound_port,
                                                                dport,
                                                                &queued),
                                              timeo);
                      rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
                      if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
                              continue;
      
                      ret = timeo;
                      if (ret == 0)
                              ret = -ETIMEDOUT;
                      goto out;
              }
      
              /*
               * By now we've committed to the send.  We reuse rds_send_worker()
               * to retry sends in the rds thread if the transport asks us to.
               */
              rds_stats_inc(s_send_queued);
      
              ret = rds_send_xmit(cpath);
              if (ret == -ENOMEM || ret == -EAGAIN) {
                      ret = 0;
                      rcu_read_lock();
                      if (rds_destroy_pending(cpath->cp_conn))
                              ret = -ENETUNREACH;
                      else
                              queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
                      rcu_read_unlock();
              }
              if (ret)
                      goto out;
              rds_message_put(rm);
      
              for (ind = 0; ind < vct.indx; ind++)
                      kfree(vct.vec[ind].iov);
              kfree(vct.vec);
      
              return payload_len;
      
      out:
              for (ind = 0; ind < vct.indx; ind++)
                      kfree(vct.vec[ind].iov);
              kfree(vct.vec);
      
              /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
               * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
               * or in any other way, we need to destroy the MR again */
              if (allocated_mr)
                      rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
      
              if (rm)
                      rds_message_put(rm);
              return ret;
      }
      
      /*
       * send out a probe. Can be shared by rds_send_ping,
       * rds_send_pong, rds_send_hb.
       * rds_send_hb should use h_flags
       *   RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
       * or
       *   RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
       */
      static int
      rds_send_probe(struct rds_conn_path *cp, __be16 sport,
                     __be16 dport, u8 h_flags)
      {
              struct rds_message *rm;
              unsigned long flags;
              int ret = 0;
      
              rm = rds_message_alloc(0, GFP_ATOMIC);
              if (!rm) {
                      ret = -ENOMEM;
                      goto out;
              }
      
              rm->m_daddr = cp->cp_conn->c_faddr;
              rm->data.op_active = 1;
      
              rds_conn_path_connect_if_down(cp);
      
              ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
              if (ret)
                      goto out;
      
              spin_lock_irqsave(&cp->cp_lock, flags);
              list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
              set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
              rds_message_addref(rm);
              rm->m_inc.i_conn = cp->cp_conn;
              rm->m_inc.i_conn_path = cp;
      
              rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
                                          cp->cp_next_tx_seq);
              rm->m_inc.i_hdr.h_flags |= h_flags;
              cp->cp_next_tx_seq++;
      
              if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
                  cp->cp_conn->c_trans->t_mp_capable) {
                      u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
                      u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
      
                      rds_message_add_extension(&rm->m_inc.i_hdr,
                                                RDS_EXTHDR_NPATHS, &npaths,
                                                sizeof(npaths));
                      rds_message_add_extension(&rm->m_inc.i_hdr,
                                                RDS_EXTHDR_GEN_NUM,
                                                &my_gen_num,
                                                sizeof(u32));
              }
              spin_unlock_irqrestore(&cp->cp_lock, flags);
      
              rds_stats_inc(s_send_queued);
              rds_stats_inc(s_send_pong);
      
              /* schedule the send work on rds_wq */
              rcu_read_lock();
              if (!rds_destroy_pending(cp->cp_conn))
                      queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
              rcu_read_unlock();
      
              rds_message_put(rm);
              return 0;
      
      out:
              if (rm)
                      rds_message_put(rm);
              return ret;
      }
      
      int
      rds_send_pong(struct rds_conn_path *cp, __be16 dport)
      {
              return rds_send_probe(cp, 0, dport, 0);
      }
      
      void
      rds_send_ping(struct rds_connection *conn, int cp_index)
      {
              unsigned long flags;
              struct rds_conn_path *cp = &conn->c_path[cp_index];
      
              spin_lock_irqsave(&cp->cp_lock, flags);
              if (conn->c_ping_triggered) {
                      spin_unlock_irqrestore(&cp->cp_lock, flags);
                      return;
              }
              conn->c_ping_triggered = 1;
              spin_unlock_irqrestore(&cp->cp_lock, flags);
              rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
      }
      EXPORT_SYMBOL_GPL(rds_send_ping);
      // SPDX-License-Identifier: GPL-2.0-only
      /*
       * Copyright (C) 2013 Politecnico di Torino, Italy
       *                    TORSEC group -- http://security.polito.it
       *
       * Author: Roberto Sassu <roberto.sassu@polito.it>
       *
       * File: ima_template.c
       *      Helpers to manage template descriptors.
       */
      
      #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      
      #include <linux/rculist.h>
      #include "ima.h"
      #include "ima_template_lib.h"
      
      enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME,
                           HDR_TEMPLATE_DATA, HDR__LAST };
      
      static struct ima_template_desc builtin_templates[] = {
              {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
              {.name = "ima-ng", .fmt = "d-ng|n-ng"},
              {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
              {.name = "ima-buf", .fmt = "d-ng|n-ng|buf"},
              {.name = "", .fmt = ""},        /* placeholder for a custom format */
      };
      
      static LIST_HEAD(defined_templates);
      static DEFINE_SPINLOCK(template_list);
      
      static const struct ima_template_field supported_fields[] = {
              {.field_id = "d", .field_init = ima_eventdigest_init,
               .field_show = ima_show_template_digest},
              {.field_id = "n", .field_init = ima_eventname_init,
               .field_show = ima_show_template_string},
              {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init,
               .field_show = ima_show_template_digest_ng},
              {.field_id = "n-ng", .field_init = ima_eventname_ng_init,
               .field_show = ima_show_template_string},
              {.field_id = "sig", .field_init = ima_eventsig_init,
               .field_show = ima_show_template_sig},
              {.field_id = "buf", .field_init = ima_eventbuf_init,
               .field_show = ima_show_template_buf},
      };
      
      /*
       * Used when restoring measurements carried over from a kexec. 'd' and 'n' don't
       * need to be accounted for since they shouldn't be defined in the same template
       * description as 'd-ng' and 'n-ng' respectively.
       */
      #define MAX_TEMPLATE_NAME_LEN sizeof("d-ng|n-ng|sig|buf")
      
      static struct ima_template_desc *ima_template;
      
      static int __init ima_template_setup(char *str)
      {
              struct ima_template_desc *template_desc;
              int template_len = strlen(str);
      
              if (ima_template)
                      return 1;
      
              ima_init_template_list();
      
              /*
               * Verify that a template with the supplied name exists.
               * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
               */
              template_desc = lookup_template_desc(str);
              if (!template_desc) {
                      pr_err("template %s not found, using %s\n",
                             str, CONFIG_IMA_DEFAULT_TEMPLATE);
                      return 1;
              }
      
              /*
               * Verify whether the current hash algorithm is supported
               * by the 'ima' template.
               */
              if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
                  ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
                      pr_err("template does not support hash alg\n");
                      return 1;
              }
      
              ima_template = template_desc;
              return 1;
      }
      __setup("ima_template=", ima_template_setup);
      
      static int __init ima_template_fmt_setup(char *str)
      {
              int num_templates = ARRAY_SIZE(builtin_templates);
      
              if (ima_template)
                      return 1;
      
              if (template_desc_init_fields(str, NULL, NULL) < 0) {
                      pr_err("format string '%s' not valid, using template %s\n",
                             str, CONFIG_IMA_DEFAULT_TEMPLATE);
                      return 1;
              }
      
              builtin_templates[num_templates - 1].fmt = str;
              ima_template = builtin_templates + num_templates - 1;
      
              return 1;
      }
      __setup("ima_template_fmt=", ima_template_fmt_setup);
      
      struct ima_template_desc *lookup_template_desc(const char *name)
      {
              struct ima_template_desc *template_desc;
              int found = 0;
      
              rcu_read_lock();
              list_for_each_entry_rcu(template_desc, &defined_templates, list) {
                      if ((strcmp(template_desc->name, name) == 0) ||
                          (strcmp(template_desc->fmt, name) == 0)) {
                              found = 1;
                              break;
                      }
              }
              rcu_read_unlock();
              return found ? template_desc : NULL;
      }
      
      static const struct ima_template_field *
      lookup_template_field(const char *field_id)
      {
              int i;
      
              for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
                      if (strncmp(supported_fields[i].field_id, field_id,
                                  IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
                              return &supported_fields[i];
              return NULL;
      }
      
      static int template_fmt_size(const char *template_fmt)
      {
              char c;
              int template_fmt_len = strlen(template_fmt);
              int i = 0, j = 0;
      
              while (i < template_fmt_len) {
                      c = template_fmt[i];
                      if (c == '|')
                              j++;
                      i++;
              }
      
              return j + 1;
      }
      
      int template_desc_init_fields(const char *template_fmt,
                                    const struct ima_template_field ***fields,
                                    int *num_fields)
      {
              const char *template_fmt_ptr;
              const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
              int template_num_fields;
              int i, len;
      
              if (num_fields && *num_fields > 0) /* already initialized? */
                      return 0;
      
              template_num_fields = template_fmt_size(template_fmt);
      
              if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
                      pr_err("format string '%s' contains too many fields\n",
                             template_fmt);
                      return -EINVAL;
              }
      
              for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields;
                   i++, template_fmt_ptr += len + 1) {
                      char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1];
      
                      len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr;
                      if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) {
                              pr_err("Invalid field with length %d\n", len);
                              return -EINVAL;
                      }
      
                      memcpy(tmp_field_id, template_fmt_ptr, len);
                      tmp_field_id[len] = '\0';
                      found_fields[i] = lookup_template_field(tmp_field_id);
                      if (!found_fields[i]) {
                              pr_err("field '%s' not found\n", tmp_field_id);
                              return -ENOENT;
                      }
              }
      
              if (fields && num_fields) {
                      *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
                      if (*fields == NULL)
                              return -ENOMEM;
      
                      memcpy(*fields, found_fields, i * sizeof(*fields));
                      *num_fields = i;
              }
      
              return 0;
      }
      
      void ima_init_template_list(void)
      {
              int i;
      
              if (!list_empty(&defined_templates))
                      return;
      
              spin_lock(&template_list);
              for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
                      list_add_tail_rcu(&builtin_templates[i].list,
                                        &defined_templates);
              }
              spin_unlock(&template_list);
      }
      
      struct ima_template_desc *ima_template_desc_current(void)
      {
  349         if (!ima_template) {
                      ima_init_template_list();
                      ima_template =
                          lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
              }
  349         return ima_template;
      }
      
      int __init ima_init_template(void)
      {
              struct ima_template_desc *template = ima_template_desc_current();
              int result;
      
              result = template_desc_init_fields(template->fmt,
                                                 &(template->fields),
                                                 &(template->num_fields));
              if (result < 0)
                      pr_err("template %s init failed, result: %d\n",
                             (strlen(template->name) ?
                             template->name : template->fmt), result);
      
              return result;
      }
      
      static struct ima_template_desc *restore_template_fmt(char *template_name)
      {
              struct ima_template_desc *template_desc = NULL;
              int ret;
      
              ret = template_desc_init_fields(template_name, NULL, NULL);
              if (ret < 0) {
                      pr_err("attempting to initialize the template \"%s\" failed\n",
                              template_name);
                      goto out;
              }
      
              template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
              if (!template_desc)
                      goto out;
      
              template_desc->name = "";
              template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
              if (!template_desc->fmt)
                      goto out;
      
              spin_lock(&template_list);
              list_add_tail_rcu(&template_desc->list, &defined_templates);
              spin_unlock(&template_list);
      out:
              return template_desc;
      }
      
      static int ima_restore_template_data(struct ima_template_desc *template_desc,
                                           void *template_data,
                                           int template_data_size,
                                           struct ima_template_entry **entry)
      {
              int ret = 0;
              int i;
      
              *entry = kzalloc(sizeof(**entry) +
                          template_desc->num_fields * sizeof(struct ima_field_data),
                          GFP_NOFS);
              if (!*entry)
                      return -ENOMEM;
      
              ret = ima_parse_buf(template_data, template_data + template_data_size,
                                  NULL, template_desc->num_fields,
                                  (*entry)->template_data, NULL, NULL,
                                  ENFORCE_FIELDS | ENFORCE_BUFEND, "template data");
              if (ret < 0) {
                      kfree(*entry);
                      return ret;
              }
      
              (*entry)->template_desc = template_desc;
              for (i = 0; i < template_desc->num_fields; i++) {
                      struct ima_field_data *field_data = &(*entry)->template_data[i];
                      u8 *data = field_data->data;
      
                      (*entry)->template_data[i].data =
                              kzalloc(field_data->len + 1, GFP_KERNEL);
                      if (!(*entry)->template_data[i].data) {
                              ret = -ENOMEM;
                              break;
                      }
                      memcpy((*entry)->template_data[i].data, data, field_data->len);
                      (*entry)->template_data_len += sizeof(field_data->len);
                      (*entry)->template_data_len += field_data->len;
              }
      
              if (ret < 0) {
                      ima_free_template_entry(*entry);
                      *entry = NULL;
              }
      
              return ret;
      }
      
      /* Restore the serialized binary measurement list without extending PCRs. */
      int ima_restore_measurement_list(loff_t size, void *buf)
      {
              char template_name[MAX_TEMPLATE_NAME_LEN];
      
              struct ima_kexec_hdr *khdr = buf;
              struct ima_field_data hdr[HDR__LAST] = {
                      [HDR_PCR] = {.len = sizeof(u32)},
                      [HDR_DIGEST] = {.len = TPM_DIGEST_SIZE},
              };
      
              void *bufp = buf + sizeof(*khdr);
              void *bufendp;
              struct ima_template_entry *entry;
              struct ima_template_desc *template_desc;
              DECLARE_BITMAP(hdr_mask, HDR__LAST);
              unsigned long count = 0;
              int ret = 0;
      
              if (!buf || size < sizeof(*khdr))
                      return 0;
      
              if (ima_canonical_fmt) {
                      khdr->version = le16_to_cpu(khdr->version);
                      khdr->count = le64_to_cpu(khdr->count);
                      khdr->buffer_size = le64_to_cpu(khdr->buffer_size);
              }
      
              if (khdr->version != 1) {
                      pr_err("attempting to restore a incompatible measurement list");
                      return -EINVAL;
              }
      
              if (khdr->count > ULONG_MAX - 1) {
                      pr_err("attempting to restore too many measurements");
                      return -EINVAL;
              }
      
              bitmap_zero(hdr_mask, HDR__LAST);
              bitmap_set(hdr_mask, HDR_PCR, 1);
              bitmap_set(hdr_mask, HDR_DIGEST, 1);
      
              /*
               * ima kexec buffer prefix: version, buffer size, count
               * v1 format: pcr, digest, template-name-len, template-name,
               *              template-data-size, template-data
               */
              bufendp = buf + khdr->buffer_size;
              while ((bufp < bufendp) && (count++ < khdr->count)) {
                      int enforce_mask = ENFORCE_FIELDS;
      
                      enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0;
                      ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL,
                                          hdr_mask, enforce_mask, "entry header");
                      if (ret < 0)
                              break;
      
                      if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) {
                              pr_err("attempting to restore a template name that is too long\n");
                              ret = -EINVAL;
                              break;
                      }
      
                      /* template name is not null terminated */
                      memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data,
                             hdr[HDR_TEMPLATE_NAME].len);
                      template_name[hdr[HDR_TEMPLATE_NAME].len] = 0;
      
                      if (strcmp(template_name, "ima") == 0) {
                              pr_err("attempting to restore an unsupported template \"%s\" failed\n",
                                     template_name);
                              ret = -EINVAL;
                              break;
                      }
      
                      template_desc = lookup_template_desc(template_name);
                      if (!template_desc) {
                              template_desc = restore_template_fmt(template_name);
                              if (!template_desc)
                                      break;
                      }
      
                      /*
                       * Only the running system's template format is initialized
                       * on boot.  As needed, initialize the other template formats.
                       */
                      ret = template_desc_init_fields(template_desc->fmt,
                                                      &(template_desc->fields),
                                                      &(template_desc->num_fields));
                      if (ret < 0) {
                              pr_err("attempting to restore the template fmt \"%s\" failed\n",
                                     template_desc->fmt);
                              ret = -EINVAL;
                              break;
                      }
      
                      ret = ima_restore_template_data(template_desc,
                                                      hdr[HDR_TEMPLATE_DATA].data,
                                                      hdr[HDR_TEMPLATE_DATA].len,
                                                      &entry);
                      if (ret < 0)
                              break;
      
                      memcpy(entry->digest, hdr[HDR_DIGEST].data,
                             hdr[HDR_DIGEST].len);
                      entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
                                   le32_to_cpu(*(hdr[HDR_PCR].data));
                      ret = ima_restore_measurement_entry(entry);
                      if (ret < 0)
                              break;
      
              }
              return ret;
      }
      /* SPDX-License-Identifier: GPL-2.0 */
      #ifndef _LINUX_MM_PAGE_IDLE_H
      #define _LINUX_MM_PAGE_IDLE_H
      
      #include <linux/bitops.h>
      #include <linux/page-flags.h>
      #include <linux/page_ext.h>
      
      #ifdef CONFIG_IDLE_PAGE_TRACKING
      
      #ifdef CONFIG_64BIT
      static inline bool page_is_young(struct page *page)
      {
              return PageYoung(page);
      }
      
      static inline void set_page_young(struct page *page)
      {
              SetPageYoung(page);
      }
      
      static inline bool test_and_clear_page_young(struct page *page)
      {
              return TestClearPageYoung(page);
      }
      
      static inline bool page_is_idle(struct page *page)
      {
 2130         return PageIdle(page);
      }
      
      static inline void set_page_idle(struct page *page)
      {
              SetPageIdle(page);
      }
      
      static inline void clear_page_idle(struct page *page)
      {
              ClearPageIdle(page);
      }
      #else /* !CONFIG_64BIT */
      /*
       * If there is not enough space to store Idle and Young bits in page flags, use
       * page ext flags instead.
       */
      extern struct page_ext_operations page_idle_ops;
      
      static inline bool page_is_young(struct page *page)
      {
              struct page_ext *page_ext = lookup_page_ext(page);
      
              if (unlikely(!page_ext))
                      return false;
      
              return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
      }
      
      static inline void set_page_young(struct page *page)
      {
              struct page_ext *page_ext = lookup_page_ext(page);
      
              if (unlikely(!page_ext))
                      return;
      
              set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
      }
      
      static inline bool test_and_clear_page_young(struct page *page)
      {
              struct page_ext *page_ext = lookup_page_ext(page);
      
              if (unlikely(!page_ext))
                      return false;
      
              return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
      }
      
      static inline bool page_is_idle(struct page *page)
      {
              struct page_ext *page_ext = lookup_page_ext(page);
      
              if (unlikely(!page_ext))
                      return false;
      
              return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
      }
      
      static inline void set_page_idle(struct page *page)
      {
              struct page_ext *page_ext = lookup_page_ext(page);
      
              if (unlikely(!page_ext))
                      return;
      
              set_bit(PAGE_EXT_IDLE, &page_ext->flags);
      }
      
      static inline void clear_page_idle(struct page *page)
      {
              struct page_ext *page_ext = lookup_page_ext(page);
      
              if (unlikely(!page_ext))
                      return;
      
              clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
      }
      #endif /* CONFIG_64BIT */
      
      #else /* !CONFIG_IDLE_PAGE_TRACKING */
      
      static inline bool page_is_young(struct page *page)
      {
              return false;
      }
      
      static inline void set_page_young(struct page *page)
      {
      }
      
      static inline bool test_and_clear_page_young(struct page *page)
      {
              return false;
      }
      
      static inline bool page_is_idle(struct page *page)
      {
              return false;
      }
      
      static inline void set_page_idle(struct page *page)
      {
      }
      
      static inline void clear_page_idle(struct page *page)
      {
      }
      
      #endif /* CONFIG_IDLE_PAGE_TRACKING */
      
      #endif /* _LINUX_MM_PAGE_IDLE_H */
      // SPDX-License-Identifier: GPL-2.0-or-later
      /* Basic authentication token and access key management
       *
       * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
       * Written by David Howells (dhowells@redhat.com)
       */
      
      #include <linux/export.h>
      #include <linux/init.h>
      #include <linux/poison.h>
      #include <linux/sched.h>
      #include <linux/slab.h>
      #include <linux/security.h>
      #include <linux/workqueue.h>
      #include <linux/random.h>
      #include <linux/err.h>
      #include "internal.h"
      
      struct kmem_cache *key_jar;
      struct rb_root                key_serial_tree; /* tree of keys indexed by serial */
      DEFINE_SPINLOCK(key_serial_lock);
      
      struct rb_root        key_user_tree; /* tree of quota records indexed by UID */
      DEFINE_SPINLOCK(key_user_lock);
      
      unsigned int key_quota_root_maxkeys = 1000000;        /* root's key count quota */
      unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
      unsigned int key_quota_maxkeys = 200;                /* general key count quota */
      unsigned int key_quota_maxbytes = 20000;        /* general key space quota */
      
      static LIST_HEAD(key_types_list);
      static DECLARE_RWSEM(key_types_sem);
      
      /* We serialise key instantiation and link */
      DEFINE_MUTEX(key_construction_mutex);
      
      #ifdef KEY_DEBUGGING
      void __key_check(const struct key *key)
      {
              printk("__key_check: key %p {%08x} should be {%08x}\n",
                     key, key->magic, KEY_DEBUG_MAGIC);
              BUG();
      }
      #endif
      
      /*
       * Get the key quota record for a user, allocating a new record if one doesn't
       * already exist.
       */
      struct key_user *key_user_lookup(kuid_t uid)
      {
              struct key_user *candidate = NULL, *user;
              struct rb_node *parent, **p;
      
      try_again:
              parent = NULL;
              p = &key_user_tree.rb_node;
              spin_lock(&key_user_lock);
      
              /* search the tree for a user record with a matching UID */
              while (*p) {
                      parent = *p;
                      user = rb_entry(parent, struct key_user, node);
      
                      if (uid_lt(uid, user->uid))
                              p = &(*p)->rb_left;
                      else if (uid_gt(uid, user->uid))
                              p = &(*p)->rb_right;
                      else
                              goto found;
              }
      
              /* if we get here, we failed to find a match in the tree */
              if (!candidate) {
                      /* allocate a candidate user record if we don't already have
                       * one */
                      spin_unlock(&key_user_lock);
      
                      user = NULL;
                      candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
                      if (unlikely(!candidate))
                              goto out;
      
                      /* the allocation may have scheduled, so we need to repeat the
                       * search lest someone else added the record whilst we were
                       * asleep */
                      goto try_again;
              }
      
              /* if we get here, then the user record still hadn't appeared on the
               * second pass - so we use the candidate record */
              refcount_set(&candidate->usage, 1);
              atomic_set(&candidate->nkeys, 0);
              atomic_set(&candidate->nikeys, 0);
              candidate->uid = uid;
              candidate->qnkeys = 0;
              candidate->qnbytes = 0;
              spin_lock_init(&candidate->lock);
              mutex_init(&candidate->cons_lock);
      
              rb_link_node(&candidate->node, parent, p);
              rb_insert_color(&candidate->node, &key_user_tree);
              spin_unlock(&key_user_lock);
              user = candidate;
              goto out;
      
              /* okay - we found a user record for this UID */
      found:
              refcount_inc(&user->usage);
              spin_unlock(&key_user_lock);
              kfree(candidate);
      out:
              return user;
      }
      
      /*
       * Dispose of a user structure
       */
      void key_user_put(struct key_user *user)
      {
              if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
                      rb_erase(&user->node, &key_user_tree);
                      spin_unlock(&key_user_lock);
      
                      kfree(user);
              }
      }
      
      /*
       * Allocate a serial number for a key.  These are assigned randomly to avoid
       * security issues through covert channel problems.
       */
      static inline void key_alloc_serial(struct key *key)
      {
              struct rb_node *parent, **p;
              struct key *xkey;
      
              /* propose a random serial number and look for a hole for it in the
               * serial number tree */
              do {
                      get_random_bytes(&key->serial, sizeof(key->serial));
      
                      key->serial >>= 1; /* negative numbers are not permitted */
              } while (key->serial < 3);
      
              spin_lock(&key_serial_lock);
      
      attempt_insertion:
              parent = NULL;
              p = &key_serial_tree.rb_node;
      
              while (*p) {
                      parent = *p;
                      xkey = rb_entry(parent, struct key, serial_node);
      
                      if (key->serial < xkey->serial)
                              p = &(*p)->rb_left;
                      else if (key->serial > xkey->serial)
                              p = &(*p)->rb_right;
                      else
                              goto serial_exists;
              }
      
              /* we've found a suitable hole - arrange for this key to occupy it */
              rb_link_node(&key->serial_node, parent, p);
              rb_insert_color(&key->serial_node, &key_serial_tree);
      
              spin_unlock(&key_serial_lock);
              return;
      
              /* we found a key with the proposed serial number - walk the tree from
               * that point looking for the next unused serial number */
      serial_exists:
              for (;;) {
                      key->serial++;
                      if (key->serial < 3) {
                              key->serial = 3;
                              goto attempt_insertion;
                      }
      
                      parent = rb_next(parent);
                      if (!parent)
                              goto attempt_insertion;
      
                      xkey = rb_entry(parent, struct key, serial_node);
                      if (key->serial < xkey->serial)
                              goto attempt_insertion;
              }
      }
      
      /**
       * key_alloc - Allocate a key of the specified type.
       * @type: The type of key to allocate.
       * @desc: The key description to allow the key to be searched out.
       * @uid: The owner of the new key.
       * @gid: The group ID for the new key's group permissions.
       * @cred: The credentials specifying UID namespace.
       * @perm: The permissions mask of the new key.
       * @flags: Flags specifying quota properties.
       * @restrict_link: Optional link restriction for new keyrings.
       *
       * Allocate a key of the specified type with the attributes given.  The key is
       * returned in an uninstantiated state and the caller needs to instantiate the
       * key before returning.
       *
       * The restrict_link structure (if not NULL) will be freed when the
       * keyring is destroyed, so it must be dynamically allocated.
       *
       * The user's key count quota is updated to reflect the creation of the key and
       * the user's key data quota has the default for the key type reserved.  The
       * instantiation function should amend this as necessary.  If insufficient
       * quota is available, -EDQUOT will be returned.
       *
       * The LSM security modules can prevent a key being created, in which case
       * -EACCES will be returned.
       *
       * Returns a pointer to the new key if successful and an error code otherwise.
       *
       * Note that the caller needs to ensure the key type isn't uninstantiated.
       * Internally this can be done by locking key_types_sem.  Externally, this can
       * be done by either never unregistering the key type, or making sure
       * key_alloc() calls don't race with module unloading.
       */
      struct key *key_alloc(struct key_type *type, const char *desc,
                            kuid_t uid, kgid_t gid, const struct cred *cred,
                            key_perm_t perm, unsigned long flags,
                            struct key_restriction *restrict_link)
      {
              struct key_user *user = NULL;
              struct key *key;
              size_t desclen, quotalen;
              int ret;
      
              key = ERR_PTR(-EINVAL);
              if (!desc || !*desc)
                      goto error;
      
              if (type->vet_description) {
                      ret = type->vet_description(desc);
                      if (ret < 0) {
                              key = ERR_PTR(ret);
                              goto error;
                      }
              }
      
              desclen = strlen(desc);
              quotalen = desclen + 1 + type->def_datalen;
      
              /* get hold of the key tracking for this user */
              user = key_user_lookup(uid);
              if (!user)
                      goto no_memory_1;
      
              /* check that the user's quota permits allocation of another key and
               * its description */
              if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
                      unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
                              key_quota_root_maxkeys : key_quota_maxkeys;
                      unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
                              key_quota_root_maxbytes : key_quota_maxbytes;
      
                      spin_lock(&user->lock);
                      if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
                              if (user->qnkeys + 1 > maxkeys ||
                                  user->qnbytes + quotalen > maxbytes ||
                                  user->qnbytes + quotalen < user->qnbytes)
                                      goto no_quota;
                      }
      
                      user->qnkeys++;
                      user->qnbytes += quotalen;
                      spin_unlock(&user->lock);
              }
      
              /* allocate and initialise the key and its description */
              key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
              if (!key)
                      goto no_memory_2;
      
              key->index_key.desc_len = desclen;
              key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
              if (!key->index_key.description)
                      goto no_memory_3;
              key->index_key.type = type;
              key_set_index_key(&key->index_key);
      
              refcount_set(&key->usage, 1);
              init_rwsem(&key->sem);
              lockdep_set_class(&key->sem, &type->lock_class);
              key->user = user;
              key->quotalen = quotalen;
              key->datalen = type->def_datalen;
              key->uid = uid;
              key->gid = gid;
              key->perm = perm;
              key->restrict_link = restrict_link;
              key->last_used_at = ktime_get_real_seconds();
      
              if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
                      key->flags |= 1 << KEY_FLAG_IN_QUOTA;
              if (flags & KEY_ALLOC_BUILT_IN)
                      key->flags |= 1 << KEY_FLAG_BUILTIN;
              if (flags & KEY_ALLOC_UID_KEYRING)
                      key->flags |= 1 << KEY_FLAG_UID_KEYRING;
      
      #ifdef KEY_DEBUGGING
              key->magic = KEY_DEBUG_MAGIC;
      #endif
      
              /* let the security module know about the key */
              ret = security_key_alloc(key, cred, flags);
              if (ret < 0)
                      goto security_error;
      
              /* publish the key by giving it a serial number */
              refcount_inc(&key->domain_tag->usage);
              atomic_inc(&user->nkeys);
              key_alloc_serial(key);
      
      error:
              return key;
      
      security_error:
              kfree(key->description);
              kmem_cache_free(key_jar, key);
              if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
                      spin_lock(&user->lock);
                      user->qnkeys--;
                      user->qnbytes -= quotalen;
                      spin_unlock(&user->lock);
              }
              key_user_put(user);
              key = ERR_PTR(ret);
              goto error;
      
      no_memory_3:
              kmem_cache_free(key_jar, key);
      no_memory_2:
              if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
                      spin_lock(&user->lock);
                      user->qnkeys--;
                      user->qnbytes -= quotalen;
                      spin_unlock(&user->lock);
              }
              key_user_put(user);
      no_memory_1:
              key = ERR_PTR(-ENOMEM);
              goto error;
      
      no_quota:
              spin_unlock(&user->lock);
              key_user_put(user);
              key = ERR_PTR(-EDQUOT);
              goto error;
      }
      EXPORT_SYMBOL(key_alloc);
      
      /**
       * key_payload_reserve - Adjust data quota reservation for the key's payload
       * @key: The key to make the reservation for.
       * @datalen: The amount of data payload the caller now wants.
       *
       * Adjust the amount of the owning user's key data quota that a key reserves.
       * If the amount is increased, then -EDQUOT may be returned if there isn't
       * enough free quota available.
       *
       * If successful, 0 is returned.
       */
      int key_payload_reserve(struct key *key, size_t datalen)
      {
              int delta = (int)datalen - key->datalen;
              int ret = 0;
      
              key_check(key);
      
              /* contemplate the quota adjustment */
              if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
                      unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
                              key_quota_root_maxbytes : key_quota_maxbytes;
      
                      spin_lock(&key->user->lock);
      
                      if (delta > 0 &&
                          (key->user->qnbytes + delta >= maxbytes ||
                           key->user->qnbytes + delta < key->user->qnbytes)) {
                              ret = -EDQUOT;
                      }
                      else {
                              key->user->qnbytes += delta;
                              key->quotalen += delta;
                      }
                      spin_unlock(&key->user->lock);
              }
      
              /* change the recorded data length if that didn't generate an error */
              if (ret == 0)
                      key->datalen = datalen;
      
              return ret;
      }
      EXPORT_SYMBOL(key_payload_reserve);
      
      /*
       * Change the key state to being instantiated.
       */
      static void mark_key_instantiated(struct key *key, int reject_error)
      {
              /* Commit the payload before setting the state; barrier versus
               * key_read_state().
               */
              smp_store_release(&key->state,
                                (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
      }
      
      /*
       * Instantiate a key and link it into the target keyring atomically.  Must be
       * called with the target keyring's semaphore writelocked.  The target key's
       * semaphore need not be locked as instantiation is serialised by
       * key_construction_mutex.
       */
      static int __key_instantiate_and_link(struct key *key,
                                            struct key_preparsed_payload *prep,
                                            struct key *keyring,
                                            struct key *authkey,
                                            struct assoc_array_edit **_edit)
      {
              int ret, awaken;
      
              key_check(key);
              key_check(keyring);
      
              awaken = 0;
              ret = -EBUSY;
      
              mutex_lock(&key_construction_mutex);
      
              /* can't instantiate twice */
              if (key->state == KEY_IS_UNINSTANTIATED) {
                      /* instantiate the key */
                      ret = key->type->instantiate(key, prep);
      
                      if (ret == 0) {
                              /* mark the key as being instantiated */
                              atomic_inc(&key->user->nikeys);
                              mark_key_instantiated(key, 0);
      
                              if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
                                      awaken = 1;
      
                              /* and link it into the destination keyring */
                              if (keyring) {
                                      if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
                                              set_bit(KEY_FLAG_KEEP, &key->flags);
      
                                      __key_link(key, _edit);
                              }
      
                              /* disable the authorisation key */
                              if (authkey)
                                      key_invalidate(authkey);
      
                              if (prep->expiry != TIME64_MAX) {
                                      key->expiry = prep->expiry;
                                      key_schedule_gc(prep->expiry + key_gc_delay);
                              }
                      }
              }
      
              mutex_unlock(&key_construction_mutex);
      
              /* wake up anyone waiting for a key to be constructed */
              if (awaken)
                      wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
      
              return ret;
      }
      
      /**
       * key_instantiate_and_link - Instantiate a key and link it into the keyring.
       * @key: The key to instantiate.
       * @data: The data to use to instantiate the keyring.
       * @datalen: The length of @data.
       * @keyring: Keyring to create a link in on success (or NULL).
       * @authkey: The authorisation token permitting instantiation.
       *
       * Instantiate a key that's in the uninstantiated state using the provided data
       * and, if successful, link it in to the destination keyring if one is
       * supplied.
       *
       * If successful, 0 is returned, the authorisation token is revoked and anyone
       * waiting for the key is woken up.  If the key was already instantiated,
       * -EBUSY will be returned.
       */
      int key_instantiate_and_link(struct key *key,
                                   const void *data,
                                   size_t datalen,
                                   struct key *keyring,
                                   struct key *authkey)
      {
              struct key_preparsed_payload prep;
              struct assoc_array_edit *edit = NULL;
              int ret;
      
              memset(&prep, 0, sizeof(prep));
              prep.data = data;
              prep.datalen = datalen;
              prep.quotalen = key->type->def_datalen;
              prep.expiry = TIME64_MAX;
              if (key->type->preparse) {
                      ret = key->type->preparse(&prep);
                      if (ret < 0)
                              goto error;
              }
      
              if (keyring) {
                      ret = __key_link_lock(keyring, &key->index_key);
                      if (ret < 0)
                              goto error;
      
                      ret = __key_link_begin(keyring, &key->index_key, &edit);
                      if (ret < 0)
                              goto error_link_end;
      
                      if (keyring->restrict_link && keyring->restrict_link->check) {
                              struct key_restriction *keyres = keyring->restrict_link;
      
                              ret = keyres->check(keyring, key->type, &prep.payload,
                                                  keyres->key);
                              if (ret < 0)
                                      goto error_link_end;
                      }
              }
      
              ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
      
      error_link_end:
              if (keyring)
                      __key_link_end(keyring, &key->index_key, edit);
      
      error:
              if (key->type->preparse)
                      key->type->free_preparse(&prep);
              return ret;
      }
      
      EXPORT_SYMBOL(key_instantiate_and_link);
      
      /**
       * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
       * @key: The key to instantiate.
       * @timeout: The timeout on the negative key.
       * @error: The error to return when the key is hit.
       * @keyring: Keyring to create a link in on success (or NULL).
       * @authkey: The authorisation token permitting instantiation.
       *
       * Negatively instantiate a key that's in the uninstantiated state and, if
       * successful, set its timeout and stored error and link it in to the
       * destination keyring if one is supplied.  The key and any links to the key
       * will be automatically garbage collected after the timeout expires.
       *
       * Negative keys are used to rate limit repeated request_key() calls by causing
       * them to return the stored error code (typically ENOKEY) until the negative
       * key expires.
       *
       * If successful, 0 is returned, the authorisation token is revoked and anyone
       * waiting for the key is woken up.  If the key was already instantiated,
       * -EBUSY will be returned.
       */
      int key_reject_and_link(struct key *key,
                              unsigned timeout,
                              unsigned error,
                              struct key *keyring,
                              struct key *authkey)
      {
              struct assoc_array_edit *edit = NULL;
              int ret, awaken, link_ret = 0;
      
              key_check(key);
              key_check(keyring);
      
              awaken = 0;
              ret = -EBUSY;
      
              if (keyring) {
                      if (keyring->restrict_link)
                              return -EPERM;
      
                      link_ret = __key_link_lock(keyring, &key->index_key);
                      if (link_ret == 0) {
                              link_ret = __key_link_begin(keyring, &key->index_key, &edit);
                              if (link_ret < 0)
                                      __key_link_end(keyring, &key->index_key, edit);
                      }
              }
      
              mutex_lock(&key_construction_mutex);
      
              /* can't instantiate twice */
              if (key->state == KEY_IS_UNINSTANTIATED) {
                      /* mark the key as being negatively instantiated */
                      atomic_inc(&key->user->nikeys);
                      mark_key_instantiated(key, -error);
                      key->expiry = ktime_get_real_seconds() + timeout;
                      key_schedule_gc(key->expiry + key_gc_delay);
      
                      if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
                              awaken = 1;
      
                      ret = 0;
      
                      /* and link it into the destination keyring */
                      if (keyring && link_ret == 0)
                              __key_link(key, &edit);
      
                      /* disable the authorisation key */
                      if (authkey)
                              key_invalidate(authkey);
              }
      
              mutex_unlock(&key_construction_mutex);
      
              if (keyring && link_ret == 0)
                      __key_link_end(keyring, &key->index_key, edit);
      
              /* wake up anyone waiting for a key to be constructed */
              if (awaken)
                      wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
      
              return ret == 0 ? link_ret : ret;
      }
      EXPORT_SYMBOL(key_reject_and_link);