// SPDX-License-Identifier: GPL-2.0
      #include <linux/export.h>
      #include <linux/spinlock.h>
      #include <linux/atomic.h>
      
      /*
       * This is an implementation of the notion of "decrement a
       * reference count, and return locked if it decremented to zero".
       *
       * NOTE NOTE NOTE! This is _not_ equivalent to
       *
       *        if (atomic_dec_and_test(&atomic)) {
       *                spin_lock(&lock);
       *                return 1;
       *        }
       *        return 0;
       *
       * because the spin-lock and the decrement must be
       * "atomic".
       */
 1345 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
      {
              /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
 1524         if (atomic_add_unless(atomic, -1, 1))
 1524                 return 0;
      
              /* Otherwise do it the slow way */
 1345         spin_lock(lock);
              if (atomic_dec_and_test(atomic))
                      return 1;
              spin_unlock(lock);
              return 0;
      }
      
      EXPORT_SYMBOL(_atomic_dec_and_lock);
      
      int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
                                       unsigned long *flags)
      {
              /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
              if (atomic_add_unless(atomic, -1, 1))
                      return 0;
      
              /* Otherwise do it the slow way */
              spin_lock_irqsave(lock, *flags);
              if (atomic_dec_and_test(atomic))
                      return 1;
              spin_unlock_irqrestore(lock, *flags);
              return 0;
      }
      EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
      // SPDX-License-Identifier: GPL-2.0-or-later
      /*
       * net/core/dev_addr_lists.c - Functions for handling net device lists
       * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
       *
       * This file contains functions for working with unicast, multicast and device
       * addresses lists.
       */
      
      #include <linux/netdevice.h>
      #include <linux/rtnetlink.h>
      #include <linux/export.h>
      #include <linux/list.h>
      
      /*
       * General list handling functions
       */
      
      static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
                                     const unsigned char *addr, int addr_len,
                                     unsigned char addr_type, bool global,
                                     bool sync)
      {
              struct netdev_hw_addr *ha;
              int alloc_size;
      
              alloc_size = sizeof(*ha);
              if (alloc_size < L1_CACHE_BYTES)
                      alloc_size = L1_CACHE_BYTES;
  545         ha = kmalloc(alloc_size, GFP_ATOMIC);
              if (!ha)
                      return -ENOMEM;
  542         memcpy(ha->addr, addr, addr_len);
              ha->type = addr_type;
              ha->refcount = 1;
              ha->global_use = global;
              ha->synced = sync ? 1 : 0;
              ha->sync_cnt = 0;
  542         list_add_tail_rcu(&ha->list, &list->list);
  542         list->count++;
      
  542         return 0;
      }
      
      static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
                                  const unsigned char *addr, int addr_len,
                                  unsigned char addr_type, bool global, bool sync,
                                  int sync_count)
      {
              struct netdev_hw_addr *ha;
      
  568         if (addr_len > MAX_ADDR_LEN)
                      return -EINVAL;
      
  568         list_for_each_entry(ha, &list->list, list) {
   83                 if (ha->type == addr_type &&
   83                     !memcmp(ha->addr, addr, addr_len)) {
   51                         if (global) {
                                      /* check if addr is already used as global */
    1                                 if (ha->global_use)
                                              return 0;
                                      else
                                              ha->global_use = true;
                              }
   51                         if (sync) {
   43                                 if (ha->synced && sync_count)
                                              return -EEXIST;
                                      else
   21                                         ha->synced++;
                              }
   29                         ha->refcount++;
                              return 0;
                      }
              }
      
  568         return __hw_addr_create_ex(list, addr, addr_len, addr_type, global,
                                         sync);
      }
      
      static int __hw_addr_add(struct netdev_hw_addr_list *list,
                               const unsigned char *addr, int addr_len,
                               unsigned char addr_type)
      {
              return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
                                      0);
      }
      
  149 static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
                                     struct netdev_hw_addr *ha, bool global,
                                     bool sync)
      {
  155         if (global && !ha->global_use)
                      return -ENOENT;
      
  155         if (sync && !ha->synced)
                      return -ENOENT;
      
   22         if (global)
                      ha->global_use = false;
      
              if (sync)
   22                 ha->synced--;
      
  155         if (--ha->refcount)
  155                 return 0;
  149         list_del_rcu(&ha->list);
  149         kfree_rcu(ha, rcu_head);
  149         list->count--;
              return 0;
      }
      
      static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
                                  const unsigned char *addr, int addr_len,
                                  unsigned char addr_type, bool global, bool sync)
      {
              struct netdev_hw_addr *ha;
      
  165         list_for_each_entry(ha, &list->list, list) {
  157                 if (!memcmp(ha->addr, addr, addr_len) &&
  155                     (ha->type == addr_type || !addr_type))
  155                         return __hw_addr_del_entry(list, ha, global, sync);
              }
  165         return -ENOENT;
      }
      
      static int __hw_addr_del(struct netdev_hw_addr_list *list,
                               const unsigned char *addr, int addr_len,
                               unsigned char addr_type)
      {
              return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
      }
      
   21 static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
                                     struct netdev_hw_addr *ha,
                                     int addr_len)
      {
              int err;
      
   43         err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
                                     false, true, ha->sync_cnt);
   39         if (err && err != -EEXIST)
                      return err;
      
              if (!err) {
   21                 ha->sync_cnt++;
                      ha->refcount++;
              }
      
   43         return 0;
      }
      
   22 static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
                                       struct netdev_hw_addr_list *from_list,
                                       struct netdev_hw_addr *ha,
                                       int addr_len)
      {
              int err;
      
   23         err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
                                     false, true);
    1         if (err)
                      return;
   22         ha->sync_cnt--;
              /* address on from list is not marked synced */
   22         __hw_addr_del_entry(from_list, ha, false, false);
      }
      
      static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
                                         struct netdev_hw_addr_list *from_list,
                                         int addr_len)
      {
              int err = 0;
              struct netdev_hw_addr *ha, *tmp;
      
   43         list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
   43                 if (ha->sync_cnt == ha->refcount) {
   19                         __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
                      } else {
   43                         err = __hw_addr_sync_one(to_list, ha, addr_len);
                              if (err)
                                      break;
                      }
              }
   43         return err;
      }
      
      /* This function only works where there is a strict 1-1 relationship
       * between source and destionation of they synch. If you ever need to
       * sync addresses to more then 1 destination, you need to use
       * __hw_addr_sync_multiple().
       */
      int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
                         struct netdev_hw_addr_list *from_list,
                         int addr_len)
      {
              int err = 0;
              struct netdev_hw_addr *ha, *tmp;
      
              list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
                      if (!ha->sync_cnt) {
                              err = __hw_addr_sync_one(to_list, ha, addr_len);
                              if (err)
                                      break;
                      } else if (ha->refcount == 1)
                              __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
              }
              return err;
      }
      EXPORT_SYMBOL(__hw_addr_sync);
      
      void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
                            struct netdev_hw_addr_list *from_list,
                            int addr_len)
      {
              struct netdev_hw_addr *ha, *tmp;
      
    4         list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
    4                 if (ha->sync_cnt)
    4                         __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
              }
    4 }
      EXPORT_SYMBOL(__hw_addr_unsync);
      
      /**
       *  __hw_addr_sync_dev - Synchonize device's multicast list
       *  @list: address list to syncronize
       *  @dev:  device to sync
       *  @sync: function to call if address should be added
       *  @unsync: function to call if address should be removed
       *
       *  This funciton is intended to be called from the ndo_set_rx_mode
       *  function of devices that require explicit address add/remove
       *  notifications.  The unsync function may be NULL in which case
       *  the addresses requiring removal will simply be removed without
       *  any notification to the device.
       **/
      int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
                             struct net_device *dev,
                             int (*sync)(struct net_device *, const unsigned char *),
                             int (*unsync)(struct net_device *,
                                           const unsigned char *))
      {
              struct netdev_hw_addr *ha, *tmp;
              int err;
      
              /* first go through and flush out any stale entries */
              list_for_each_entry_safe(ha, tmp, &list->list, list) {
                      if (!ha->sync_cnt || ha->refcount != 1)
                              continue;
      
                      /* if unsync is defined and fails defer unsyncing address */
                      if (unsync && unsync(dev, ha->addr))
                              continue;
      
                      ha->sync_cnt--;
                      __hw_addr_del_entry(list, ha, false, false);
              }
      
              /* go through and sync new entries to the list */
              list_for_each_entry_safe(ha, tmp, &list->list, list) {
                      if (ha->sync_cnt)
                              continue;
      
                      err = sync(dev, ha->addr);
                      if (err)
                              return err;
      
                      ha->sync_cnt++;
                      ha->refcount++;
              }
      
              return 0;
      }
      EXPORT_SYMBOL(__hw_addr_sync_dev);
      
      /**
       *  __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking
       *  into account references
       *  @list: address list to synchronize
       *  @dev:  device to sync
       *  @sync: function to call if address or reference on it should be added
       *  @unsync: function to call if address or some reference on it should removed
       *
       *  This function is intended to be called from the ndo_set_rx_mode
       *  function of devices that require explicit address or references on it
       *  add/remove notifications. The unsync function may be NULL in which case
       *  the addresses or references on it requiring removal will simply be
       *  removed without any notification to the device. That is responsibility of
       *  the driver to identify and distribute address or references on it between
       *  internal address tables.
       **/
      int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
                                 struct net_device *dev,
                                 int (*sync)(struct net_device *,
                                             const unsigned char *, int),
                                 int (*unsync)(struct net_device *,
                                               const unsigned char *, int))
      {
              struct netdev_hw_addr *ha, *tmp;
              int err, ref_cnt;
      
              /* first go through and flush out any unsynced/stale entries */
              list_for_each_entry_safe(ha, tmp, &list->list, list) {
                      /* sync if address is not used */
                      if ((ha->sync_cnt << 1) <= ha->refcount)
                              continue;
      
                      /* if fails defer unsyncing address */
                      ref_cnt = ha->refcount - ha->sync_cnt;
                      if (unsync && unsync(dev, ha->addr, ref_cnt))
                              continue;
      
                      ha->refcount = (ref_cnt << 1) + 1;
                      ha->sync_cnt = ref_cnt;
                      __hw_addr_del_entry(list, ha, false, false);
              }
      
              /* go through and sync updated/new entries to the list */
              list_for_each_entry_safe(ha, tmp, &list->list, list) {
                      /* sync if address added or reused */
                      if ((ha->sync_cnt << 1) >= ha->refcount)
                              continue;
      
                      ref_cnt = ha->refcount - ha->sync_cnt;
                      err = sync(dev, ha->addr, ref_cnt);
                      if (err)
                              return err;
      
                      ha->refcount = ref_cnt << 1;
                      ha->sync_cnt = ref_cnt;
              }
      
              return 0;
      }
      EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
      
      /**
       *  __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on
       *  it from device
       *  @list: address list to remove synchronized addresses (references on it) from
       *  @dev:  device to sync
       *  @unsync: function to call if address and references on it should be removed
       *
       *  Remove all addresses that were added to the device by
       *  __hw_addr_ref_sync_dev(). This function is intended to be called from the
       *  ndo_stop or ndo_open functions on devices that require explicit address (or
       *  references on it) add/remove notifications. If the unsync function pointer
       *  is NULL then this function can be used to just reset the sync_cnt for the
       *  addresses in the list.
       **/
      void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
                                    struct net_device *dev,
                                    int (*unsync)(struct net_device *,
                                                  const unsigned char *, int))
      {
              struct netdev_hw_addr *ha, *tmp;
      
              list_for_each_entry_safe(ha, tmp, &list->list, list) {
                      if (!ha->sync_cnt)
                              continue;
      
                      /* if fails defer unsyncing address */
                      if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
                              continue;
      
                      ha->refcount -= ha->sync_cnt - 1;
                      ha->sync_cnt = 0;
                      __hw_addr_del_entry(list, ha, false, false);
              }
      }
      EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
      
      /**
       *  __hw_addr_unsync_dev - Remove synchronized addresses from device
       *  @list: address list to remove synchronized addresses from
       *  @dev:  device to sync
       *  @unsync: function to call if address should be removed
       *
       *  Remove all addresses that were added to the device by __hw_addr_sync_dev().
       *  This function is intended to be called from the ndo_stop or ndo_open
       *  functions on devices that require explicit address add/remove
       *  notifications.  If the unsync function pointer is NULL then this function
       *  can be used to just reset the sync_cnt for the addresses in the list.
       **/
      void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
                                struct net_device *dev,
                                int (*unsync)(struct net_device *,
                                              const unsigned char *))
      {
              struct netdev_hw_addr *ha, *tmp;
      
              list_for_each_entry_safe(ha, tmp, &list->list, list) {
                      if (!ha->sync_cnt)
                              continue;
      
                      /* if unsync is defined and fails defer unsyncing address */
                      if (unsync && unsync(dev, ha->addr))
                              continue;
      
                      ha->sync_cnt--;
                      __hw_addr_del_entry(list, ha, false, false);
              }
      }
      EXPORT_SYMBOL(__hw_addr_unsync_dev);
      
      static void __hw_addr_flush(struct netdev_hw_addr_list *list)
      {
              struct netdev_hw_addr *ha, *tmp;
      
  112         list_for_each_entry_safe(ha, tmp, &list->list, list) {
  105                 list_del_rcu(&ha->list);
  105                 kfree_rcu(ha, rcu_head);
              }
  112         list->count = 0;
      }
      
      void __hw_addr_init(struct netdev_hw_addr_list *list)
      {
  506         INIT_LIST_HEAD(&list->list);
              list->count = 0;
      }
      EXPORT_SYMBOL(__hw_addr_init);
      
      /*
       * Device addresses handling functions
       */
      
      /**
       *        dev_addr_flush - Flush device address list
       *        @dev: device
       *
       *        Flush device address list and reset ->dev_addr.
       *
       *        The caller must hold the rtnl_mutex.
       */
      void dev_addr_flush(struct net_device *dev)
      {
              /* rtnl_mutex must be held here */
      
  105         __hw_addr_flush(&dev->dev_addrs);
              dev->dev_addr = NULL;
      }
      EXPORT_SYMBOL(dev_addr_flush);
      
      /**
       *        dev_addr_init - Init device address list
       *        @dev: device
       *
       *        Init device address list and create the first element,
       *        used by ->dev_addr.
       *
       *        The caller must hold the rtnl_mutex.
       */
      int dev_addr_init(struct net_device *dev)
      {
              unsigned char addr[MAX_ADDR_LEN];
              struct netdev_hw_addr *ha;
              int err;
      
              /* rtnl_mutex must be held here */
      
  484         __hw_addr_init(&dev->dev_addrs);
              memset(addr, 0, sizeof(addr));
              err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
                                  NETDEV_HW_ADDR_T_LAN);
              if (!err) {
                      /*
                       * Get the first (previously created) address from the list
                       * and set dev_addr pointer to this location.
                       */
  483                 ha = list_first_entry(&dev->dev_addrs.list,
                                            struct netdev_hw_addr, list);
                      dev->dev_addr = ha->addr;
              }
  483         return err;
      }
      EXPORT_SYMBOL(dev_addr_init);
      
      /**
       *        dev_addr_add - Add a device address
       *        @dev: device
       *        @addr: address to add
       *        @addr_type: address type
       *
       *        Add a device address to the device or increase the reference count if
       *        it already exists.
       *
       *        The caller must hold the rtnl_mutex.
       */
      int dev_addr_add(struct net_device *dev, const unsigned char *addr,
                       unsigned char addr_type)
      {
              int err;
      
              ASSERT_RTNL();
      
              err = dev_pre_changeaddr_notify(dev, addr, NULL);
              if (err)
                      return err;
              err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
              if (!err)
                      call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
              return err;
      }
      EXPORT_SYMBOL(dev_addr_add);
      
      /**
       *        dev_addr_del - Release a device address.
       *        @dev: device
       *        @addr: address to delete
       *        @addr_type: address type
       *
       *        Release reference to a device address and remove it from the device
       *        if the reference count drops to zero.
       *
       *        The caller must hold the rtnl_mutex.
       */
      int dev_addr_del(struct net_device *dev, const unsigned char *addr,
                       unsigned char addr_type)
      {
              int err;
              struct netdev_hw_addr *ha;
      
              ASSERT_RTNL();
      
              /*
               * We can not remove the first address from the list because
               * dev->dev_addr points to that.
               */
              ha = list_first_entry(&dev->dev_addrs.list,
                                    struct netdev_hw_addr, list);
              if (!memcmp(ha->addr, addr, dev->addr_len) &&
                  ha->type == addr_type && ha->refcount == 1)
                      return -ENOENT;
      
              err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
                                  addr_type);
              if (!err)
                      call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
              return err;
      }
      EXPORT_SYMBOL(dev_addr_del);
      
      /*
       * Unicast list handling functions
       */
      
      /**
       *        dev_uc_add_excl - Add a global secondary unicast address
       *        @dev: device
       *        @addr: address to add
       */
      int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
      {
              struct netdev_hw_addr *ha;
              int err;
      
    4         netif_addr_lock_bh(dev);
              list_for_each_entry(ha, &dev->uc.list, list) {
    4                 if (!memcmp(ha->addr, addr, dev->addr_len) &&
    4                     ha->type == NETDEV_HW_ADDR_T_UNICAST) {
                              err = -EEXIST;
                              goto out;
                      }
              }
              err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
                                        NETDEV_HW_ADDR_T_UNICAST, true, false);
              if (!err)
                      __dev_set_rx_mode(dev);
      out:
    4         netif_addr_unlock_bh(dev);
              return err;
      }
      EXPORT_SYMBOL(dev_uc_add_excl);
      
      /**
       *        dev_uc_add - Add a secondary unicast address
       *        @dev: device
       *        @addr: address to add
       *
       *        Add a secondary unicast address to the device or increase
       *        the reference count if it already exists.
       */
      int dev_uc_add(struct net_device *dev, const unsigned char *addr)
      {
              int err;
      
    3         netif_addr_lock_bh(dev);
              err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
                                  NETDEV_HW_ADDR_T_UNICAST);
              if (!err)
    2                 __dev_set_rx_mode(dev);
    2         netif_addr_unlock_bh(dev);
              return err;
      }
      EXPORT_SYMBOL(dev_uc_add);
      
      /**
       *        dev_uc_del - Release secondary unicast address.
       *        @dev: device
       *        @addr: address to delete
       *
       *        Release reference to a secondary unicast address and remove it
       *        from the device if the reference count drops to zero.
       */
      int dev_uc_del(struct net_device *dev, const unsigned char *addr)
      {
              int err;
      
    7         netif_addr_lock_bh(dev);
              err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
                                  NETDEV_HW_ADDR_T_UNICAST);
              if (!err)
    2                 __dev_set_rx_mode(dev);
    6         netif_addr_unlock_bh(dev);
              return err;
      }
      EXPORT_SYMBOL(dev_uc_del);
      
      /**
       *        dev_uc_sync - Synchronize device's unicast list to another device
       *        @to: destination device
       *        @from: source device
       *
       *        Add newly added addresses to the destination device and release
       *        addresses that have no users left. The source device must be
       *        locked by netif_addr_lock_bh.
       *
       *        This function is intended to be called from the dev->set_rx_mode
       *        function of layered software devices.  This function assumes that
       *        addresses will only ever be synced to the @to devices and no other.
       */
      int dev_uc_sync(struct net_device *to, struct net_device *from)
      {
              int err = 0;
      
              if (to->addr_len != from->addr_len)
                      return -EINVAL;
      
              netif_addr_lock_nested(to);
              err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
              if (!err)
                      __dev_set_rx_mode(to);
              netif_addr_unlock(to);
              return err;
      }
      EXPORT_SYMBOL(dev_uc_sync);
      
      /**
       *        dev_uc_sync_multiple - Synchronize device's unicast list to another
       *        device, but allow for multiple calls to sync to multiple devices.
       *        @to: destination device
       *        @from: source device
       *
       *        Add newly added addresses to the destination device and release
       *        addresses that have been deleted from the source. The source device
       *        must be locked by netif_addr_lock_bh.
       *
       *        This function is intended to be called from the dev->set_rx_mode
       *        function of layered software devices.  It allows for a single source
       *        device to be synced to multiple destination devices.
       */
      int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
      {
              int err = 0;
      
   43         if (to->addr_len != from->addr_len)
                      return -EINVAL;
      
   43         netif_addr_lock_nested(to);
              err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
              if (!err)
   43                 __dev_set_rx_mode(to);
   43         netif_addr_unlock(to);
   43         return err;
      }
      EXPORT_SYMBOL(dev_uc_sync_multiple);
      
      /**
       *        dev_uc_unsync - Remove synchronized addresses from the destination device
       *        @to: destination device
       *        @from: source device
       *
       *        Remove all addresses that were added to the destination device by
       *        dev_uc_sync(). This function is intended to be called from the
       *        dev->stop function of layered software devices.
       */
    4 void dev_uc_unsync(struct net_device *to, struct net_device *from)
      {
    4         if (to->addr_len != from->addr_len)
                      return;
      
    4         netif_addr_lock_bh(from);
    4         netif_addr_lock_nested(to);
              __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
              __dev_set_rx_mode(to);
              netif_addr_unlock(to);
              netif_addr_unlock_bh(from);
      }
      EXPORT_SYMBOL(dev_uc_unsync);
      
      /**
       *        dev_uc_flush - Flush unicast addresses
       *        @dev: device
       *
       *        Flush unicast addresses.
       */
      void dev_uc_flush(struct net_device *dev)
      {
   93         netif_addr_lock_bh(dev);
              __hw_addr_flush(&dev->uc);
              netif_addr_unlock_bh(dev);
      }
      EXPORT_SYMBOL(dev_uc_flush);
      
      /**
       *        dev_uc_flush - Init unicast address list
       *        @dev: device
       *
       *        Init unicast address list.
       */
      void dev_uc_init(struct net_device *dev)
      {
  483         __hw_addr_init(&dev->uc);
      }
      EXPORT_SYMBOL(dev_uc_init);
      
      /*
       * Multicast list handling functions
       */
      
      /**
       *        dev_mc_add_excl - Add a global secondary multicast address
       *        @dev: device
       *        @addr: address to add
       */
      int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
      {
              struct netdev_hw_addr *ha;
              int err;
      
    2         netif_addr_lock_bh(dev);
    2         list_for_each_entry(ha, &dev->mc.list, list) {
    2                 if (!memcmp(ha->addr, addr, dev->addr_len) &&
    2                     ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
                              err = -EEXIST;
                              goto out;
                      }
              }
    1         err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
                                        NETDEV_HW_ADDR_T_MULTICAST, true, false);
              if (!err)
    1                 __dev_set_rx_mode(dev);
      out:
    2         netif_addr_unlock_bh(dev);
              return err;
      }
      EXPORT_SYMBOL(dev_mc_add_excl);
      
      static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
                              bool global)
      {
              int err;
      
  153         netif_addr_lock_bh(dev);
              err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
                                     NETDEV_HW_ADDR_T_MULTICAST, global, false, 0);
              if (!err)
  152                 __dev_set_rx_mode(dev);
  152         netif_addr_unlock_bh(dev);
              return err;
      }
      /**
       *        dev_mc_add - Add a multicast address
       *        @dev: device
       *        @addr: address to add
       *
       *        Add a multicast address to the device or increase
       *        the reference count if it already exists.
       */
      int dev_mc_add(struct net_device *dev, const unsigned char *addr)
      {
  152         return __dev_mc_add(dev, addr, false);
      }
      EXPORT_SYMBOL(dev_mc_add);
      
      /**
       *        dev_mc_add_global - Add a global multicast address
       *        @dev: device
       *        @addr: address to add
       *
       *        Add a global multicast address to the device.
       */
      int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
      {
    1         return __dev_mc_add(dev, addr, true);
      }
      EXPORT_SYMBOL(dev_mc_add_global);
      
      static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
                              bool global)
      {
              int err;
      
  158         netif_addr_lock_bh(dev);
              err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
                                     NETDEV_HW_ADDR_T_MULTICAST, global, false);
              if (!err)
  153                 __dev_set_rx_mode(dev);
  158         netif_addr_unlock_bh(dev);
              return err;
      }
      
      /**
       *        dev_mc_del - Delete a multicast address.
       *        @dev: device
       *        @addr: address to delete
       *
       *        Release reference to a multicast address and remove it
       *        from the device if the reference count drops to zero.
       */
      int dev_mc_del(struct net_device *dev, const unsigned char *addr)
      {
  157         return __dev_mc_del(dev, addr, false);
      }
      EXPORT_SYMBOL(dev_mc_del);
      
      /**
       *        dev_mc_del_global - Delete a global multicast address.
       *        @dev: device
       *        @addr: address to delete
       *
       *        Release reference to a multicast address and remove it
       *        from the device if the reference count drops to zero.
       */
      int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
      {
    1         return __dev_mc_del(dev, addr, true);
      }
      EXPORT_SYMBOL(dev_mc_del_global);
      
      /**
       *        dev_mc_sync - Synchronize device's multicast list to another device
       *        @to: destination device
       *        @from: source device
       *
       *        Add newly added addresses to the destination device and release
       *        addresses that have no users left. The source device must be
       *        locked by netif_addr_lock_bh.
       *
       *        This function is intended to be called from the ndo_set_rx_mode
       *        function of layered software devices.
       */
      int dev_mc_sync(struct net_device *to, struct net_device *from)
      {
              int err = 0;
      
              if (to->addr_len != from->addr_len)
                      return -EINVAL;
      
              netif_addr_lock_nested(to);
              err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
              if (!err)
                      __dev_set_rx_mode(to);
              netif_addr_unlock(to);
              return err;
      }
      EXPORT_SYMBOL(dev_mc_sync);
      
      /**
       *        dev_mc_sync_multiple - Synchronize device's multicast list to another
       *        device, but allow for multiple calls to sync to multiple devices.
       *        @to: destination device
       *        @from: source device
       *
       *        Add newly added addresses to the destination device and release
       *        addresses that have no users left. The source device must be
       *        locked by netif_addr_lock_bh.
       *
       *        This function is intended to be called from the ndo_set_rx_mode
       *        function of layered software devices.  It allows for a single
       *        source device to be synced to multiple destination devices.
       */
      int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
      {
              int err = 0;
      
   43         if (to->addr_len != from->addr_len)
                      return -EINVAL;
      
   43         netif_addr_lock_nested(to);
              err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
              if (!err)
   43                 __dev_set_rx_mode(to);
   43         netif_addr_unlock(to);
   43         return err;
      }
      EXPORT_SYMBOL(dev_mc_sync_multiple);
      
      /**
       *        dev_mc_unsync - Remove synchronized addresses from the destination device
       *        @to: destination device
       *        @from: source device
       *
       *        Remove all addresses that were added to the destination device by
       *        dev_mc_sync(). This function is intended to be called from the
       *        dev->stop function of layered software devices.
       */
    4 void dev_mc_unsync(struct net_device *to, struct net_device *from)
      {
    4         if (to->addr_len != from->addr_len)
                      return;
      
    4         netif_addr_lock_bh(from);
    4         netif_addr_lock_nested(to);
              __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
              __dev_set_rx_mode(to);
              netif_addr_unlock(to);
              netif_addr_unlock_bh(from);
      }
      EXPORT_SYMBOL(dev_mc_unsync);
      
      /**
       *        dev_mc_flush - Flush multicast addresses
       *        @dev: device
       *
       *        Flush multicast addresses.
       */
      void dev_mc_flush(struct net_device *dev)
      {
   93         netif_addr_lock_bh(dev);
              __hw_addr_flush(&dev->mc);
              netif_addr_unlock_bh(dev);
      }
      EXPORT_SYMBOL(dev_mc_flush);
      
      /**
       *        dev_mc_init - Init multicast address list
       *        @dev: device
       *
       *        Init multicast address list.
       */
      void dev_mc_init(struct net_device *dev)
      {
  483         __hw_addr_init(&dev->mc);
      }
      EXPORT_SYMBOL(dev_mc_init);
      // SPDX-License-Identifier: GPL-2.0-or-later
      /* xfrm4_protocol.c - Generic xfrm protocol multiplexer.
       *
       * Copyright (C) 2013 secunet Security Networks AG
       *
       * Author:
       * Steffen Klassert <steffen.klassert@secunet.com>
       *
       * Based on:
       * net/ipv4/tunnel4.c
       */
      
      #include <linux/init.h>
      #include <linux/mutex.h>
      #include <linux/skbuff.h>
      #include <net/icmp.h>
      #include <net/ip.h>
      #include <net/protocol.h>
      #include <net/xfrm.h>
      
      static struct xfrm4_protocol __rcu *esp4_handlers __read_mostly;
      static struct xfrm4_protocol __rcu *ah4_handlers __read_mostly;
      static struct xfrm4_protocol __rcu *ipcomp4_handlers __read_mostly;
      static DEFINE_MUTEX(xfrm4_protocol_mutex);
      
      static inline struct xfrm4_protocol __rcu **proto_handlers(u8 protocol)
      {
   11         switch (protocol) {
              case IPPROTO_ESP:
    3                 return &esp4_handlers;
              case IPPROTO_AH:
    1                 return &ah4_handlers;
              case IPPROTO_COMP:
    3                 return &ipcomp4_handlers;
              }
      
              return NULL;
      }
      
      #define for_each_protocol_rcu(head, handler)                \
              for (handler = rcu_dereference(head);                \
                   handler != NULL;                                \
                   handler = rcu_dereference(handler->next))        \
      
      static int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
      {
              int ret;
              struct xfrm4_protocol *handler;
   11         struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
      
              if (!head)
                      return 0;
      
    7         for_each_protocol_rcu(*head, handler)
    7                 if ((ret = handler->cb_handler(skb, err)) <= 0)
                              return ret;
      
              return 0;
      }
      
      int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
                          int encap_type)
      {
              int ret;
              struct xfrm4_protocol *handler;
    1         struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
      
    1         XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
              XFRM_SPI_SKB_CB(skb)->family = AF_INET;
              XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
      
              if (!head)
                      goto out;
      
    1         for_each_protocol_rcu(*head, handler)
    1                 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
                              return ret;
      
      out:
              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
      
              kfree_skb(skb);
    1         return 0;
      }
      EXPORT_SYMBOL(xfrm4_rcv_encap);
      
      static int xfrm4_esp_rcv(struct sk_buff *skb)
      {
              int ret;
              struct xfrm4_protocol *handler;
      
    2         XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
      
    2         for_each_protocol_rcu(esp4_handlers, handler)
    2                 if ((ret = handler->handler(skb)) != -EINVAL)
                              return ret;
      
              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
      
              kfree_skb(skb);
    2         return 0;
      }
      
      static int xfrm4_esp_err(struct sk_buff *skb, u32 info)
      {
              struct xfrm4_protocol *handler;
      
    1         for_each_protocol_rcu(esp4_handlers, handler)
    1                 if (!handler->err_handler(skb, info))
                              return 0;
      
    1         return -ENOENT;
      }
      
      static int xfrm4_ah_rcv(struct sk_buff *skb)
      {
              int ret;
              struct xfrm4_protocol *handler;
      
    1         XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
      
    1         for_each_protocol_rcu(ah4_handlers, handler)
    1                 if ((ret = handler->handler(skb)) != -EINVAL)
                              return ret;
      
              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
      
              kfree_skb(skb);
    1         return 0;
      }
      
      static int xfrm4_ah_err(struct sk_buff *skb, u32 info)
      {
              struct xfrm4_protocol *handler;
      
    2         for_each_protocol_rcu(ah4_handlers, handler)
    2                 if (!handler->err_handler(skb, info))
                              return 0;
      
    2         return -ENOENT;
      }
      
      static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
      {
              int ret;
              struct xfrm4_protocol *handler;
      
    3         XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
      
    3         for_each_protocol_rcu(ipcomp4_handlers, handler)
    3                 if ((ret = handler->handler(skb)) != -EINVAL)
                              return ret;
      
              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
      
              kfree_skb(skb);
    3         return 0;
      }
      
      static int xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
      {
              struct xfrm4_protocol *handler;
      
    1         for_each_protocol_rcu(ipcomp4_handlers, handler)
    1                 if (!handler->err_handler(skb, info))
                              return 0;
      
    1         return -ENOENT;
      }
      
      static const struct net_protocol esp4_protocol = {
              .handler        =        xfrm4_esp_rcv,
              .err_handler        =        xfrm4_esp_err,
              .no_policy        =        1,
              .netns_ok        =        1,
      };
      
      static const struct net_protocol ah4_protocol = {
              .handler        =        xfrm4_ah_rcv,
              .err_handler        =        xfrm4_ah_err,
              .no_policy        =        1,
              .netns_ok        =        1,
      };
      
      static const struct net_protocol ipcomp4_protocol = {
              .handler        =        xfrm4_ipcomp_rcv,
              .err_handler        =        xfrm4_ipcomp_err,
              .no_policy        =        1,
              .netns_ok        =        1,
      };
      
      static const struct xfrm_input_afinfo xfrm4_input_afinfo = {
              .family                =        AF_INET,
              .callback        =        xfrm4_rcv_cb,
      };
      
      static inline const struct net_protocol *netproto(unsigned char protocol)
      {
              switch (protocol) {
              case IPPROTO_ESP:
                      return &esp4_protocol;
              case IPPROTO_AH:
                      return &ah4_protocol;
              case IPPROTO_COMP:
                      return &ipcomp4_protocol;
              }
      
              return NULL;
      }
      
      int xfrm4_protocol_register(struct xfrm4_protocol *handler,
                                  unsigned char protocol)
      {
              struct xfrm4_protocol __rcu **pprev;
              struct xfrm4_protocol *t;
              bool add_netproto = false;
              int ret = -EEXIST;
              int priority = handler->priority;
      
              if (!proto_handlers(protocol) || !netproto(protocol))
                      return -EINVAL;
      
              mutex_lock(&xfrm4_protocol_mutex);
      
              if (!rcu_dereference_protected(*proto_handlers(protocol),
                                             lockdep_is_held(&xfrm4_protocol_mutex)))
                      add_netproto = true;
      
              for (pprev = proto_handlers(protocol);
                   (t = rcu_dereference_protected(*pprev,
                              lockdep_is_held(&xfrm4_protocol_mutex))) != NULL;
                   pprev = &t->next) {
                      if (t->priority < priority)
                              break;
                      if (t->priority == priority)
                              goto err;
              }
      
              handler->next = *pprev;
              rcu_assign_pointer(*pprev, handler);
      
              ret = 0;
      
      err:
              mutex_unlock(&xfrm4_protocol_mutex);
      
              if (add_netproto) {
                      if (inet_add_protocol(netproto(protocol), protocol)) {
                              pr_err("%s: can't add protocol\n", __func__);
                              ret = -EAGAIN;
                      }
              }
      
              return ret;
      }
      EXPORT_SYMBOL(xfrm4_protocol_register);
      
      int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
                                    unsigned char protocol)
      {
              struct xfrm4_protocol __rcu **pprev;
              struct xfrm4_protocol *t;
              int ret = -ENOENT;
      
              if (!proto_handlers(protocol) || !netproto(protocol))
                      return -EINVAL;
      
              mutex_lock(&xfrm4_protocol_mutex);
      
              for (pprev = proto_handlers(protocol);
                   (t = rcu_dereference_protected(*pprev,
                              lockdep_is_held(&xfrm4_protocol_mutex))) != NULL;
                   pprev = &t->next) {
                      if (t == handler) {
                              *pprev = handler->next;
                              ret = 0;
                              break;
                      }
              }
      
              if (!rcu_dereference_protected(*proto_handlers(protocol),
                                             lockdep_is_held(&xfrm4_protocol_mutex))) {
                      if (inet_del_protocol(netproto(protocol), protocol) < 0) {
                              pr_err("%s: can't remove protocol\n", __func__);
                              ret = -EAGAIN;
                      }
              }
      
              mutex_unlock(&xfrm4_protocol_mutex);
      
              synchronize_net();
      
              return ret;
      }
      EXPORT_SYMBOL(xfrm4_protocol_deregister);
      
      void __init xfrm4_protocol_init(void)
      {
              xfrm_input_register_afinfo(&xfrm4_input_afinfo);
      }
      EXPORT_SYMBOL(xfrm4_protocol_init);
      // SPDX-License-Identifier: GPL-2.0-or-later
      /*
       *        X.25 Packet Layer release 002
       *
       *        This is ALPHA test software. This code may break your machine,
       *        randomly fail to work with new releases, misbehave and/or generally
       *        screw up. It might even work.
       *
       *        This code REQUIRES 2.1.15 or higher
       *
       *        History
       *        X.25 001        Jonathan Naylor        Started coding.
       */
      
      #include <linux/if_arp.h>
      #include <linux/init.h>
      #include <linux/slab.h>
      #include <net/x25.h>
      
      LIST_HEAD(x25_route_list);
      DEFINE_RWLOCK(x25_route_list_lock);
      
      /*
       *        Add a new route.
       */
      static int x25_add_route(struct x25_address *address, unsigned int sigdigits,
                               struct net_device *dev)
      {
              struct x25_route *rt;
              struct list_head *entry;
              int rc = -EINVAL;
      
              write_lock_bh(&x25_route_list_lock);
      
              list_for_each(entry, &x25_route_list) {
                      rt = list_entry(entry, struct x25_route, node);
      
                      if (!memcmp(&rt->address, address, sigdigits) &&
                          rt->sigdigits == sigdigits)
                              goto out;
              }
      
              rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
              rc = -ENOMEM;
              if (!rt)
                      goto out;
      
              strcpy(rt->address.x25_addr, "000000000000000");
              memcpy(rt->address.x25_addr, address->x25_addr, sigdigits);
      
              rt->sigdigits = sigdigits;
              rt->dev       = dev;
              refcount_set(&rt->refcnt, 1);
      
              list_add(&rt->node, &x25_route_list);
              rc = 0;
      out:
              write_unlock_bh(&x25_route_list_lock);
              return rc;
      }
      
      /**
       * __x25_remove_route - remove route from x25_route_list
       * @rt: route to remove
       *
       * Remove route from x25_route_list. If it was there.
       * Caller must hold x25_route_list_lock.
       */
      static void __x25_remove_route(struct x25_route *rt)
      {
              if (rt->node.next) {
                      list_del(&rt->node);
                      x25_route_put(rt);
              }
      }
      
      static int x25_del_route(struct x25_address *address, unsigned int sigdigits,
                               struct net_device *dev)
      {
              struct x25_route *rt;
              struct list_head *entry;
              int rc = -EINVAL;
      
              write_lock_bh(&x25_route_list_lock);
      
              list_for_each(entry, &x25_route_list) {
                      rt = list_entry(entry, struct x25_route, node);
      
                      if (!memcmp(&rt->address, address, sigdigits) &&
                          rt->sigdigits == sigdigits && rt->dev == dev) {
                              __x25_remove_route(rt);
                              rc = 0;
                              break;
                      }
              }
      
              write_unlock_bh(&x25_route_list_lock);
              return rc;
      }
      
      /*
       *        A device has been removed, remove its routes.
       */
      void x25_route_device_down(struct net_device *dev)
      {
              struct x25_route *rt;
              struct list_head *entry, *tmp;
      
              write_lock_bh(&x25_route_list_lock);
      
              list_for_each_safe(entry, tmp, &x25_route_list) {
                      rt = list_entry(entry, struct x25_route, node);
      
                      if (rt->dev == dev)
                              __x25_remove_route(rt);
              }
              write_unlock_bh(&x25_route_list_lock);
      
              /* Remove any related forwarding */
              x25_clear_forward_by_dev(dev);
      }
      
      /*
       *        Check that the device given is a valid X.25 interface that is "up".
       */
      struct net_device *x25_dev_get(char *devname)
      {
    6         struct net_device *dev = dev_get_by_name(&init_net, devname);
      
              if (dev &&
    5             (!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25
      #if IS_ENABLED(CONFIG_LLC)
    4                                         && dev->type != ARPHRD_ETHER
      #endif
                                              ))){
    2                 dev_put(dev);
                      dev = NULL;
              }
      
    6         return dev;
      }
      
      /**
       *         x25_get_route -        Find a route given an X.25 address.
       *         @addr - address to find a route for
       *
       *         Find a route given an X.25 address.
       */
      struct x25_route *x25_get_route(struct x25_address *addr)
      {
              struct x25_route *rt, *use = NULL;
              struct list_head *entry;
      
    1         read_lock_bh(&x25_route_list_lock);
      
              list_for_each(entry, &x25_route_list) {
                      rt = list_entry(entry, struct x25_route, node);
      
                      if (!memcmp(&rt->address, addr, rt->sigdigits)) {
                              if (!use)
                                      use = rt;
                              else if (rt->sigdigits > use->sigdigits)
                                      use = rt;
                      }
              }
      
              if (use)
                      x25_route_hold(use);
      
    1         read_unlock_bh(&x25_route_list_lock);
              return use;
      }
      
      /*
       *        Handle the ioctls that control the routing functions.
       */
      int x25_route_ioctl(unsigned int cmd, void __user *arg)
      {
              struct x25_route_struct rt;
              struct net_device *dev;
              int rc = -EINVAL;
      
    4         if (cmd != SIOCADDRT && cmd != SIOCDELRT)
                      goto out;
      
              rc = -EFAULT;
    4         if (copy_from_user(&rt, arg, sizeof(rt)))
                      goto out;
      
              rc = -EINVAL;
    3         if (rt.sigdigits > 15)
                      goto out;
      
    2         dev = x25_dev_get(rt.device);
              if (!dev)
                      goto out;
      
              if (cmd == SIOCADDRT)
                      rc = x25_add_route(&rt.address, rt.sigdigits, dev);
              else
                      rc = x25_del_route(&rt.address, rt.sigdigits, dev);
              dev_put(dev);
      out:
    4         return rc;
      }
      
      /*
       *        Release all memory associated with X.25 routing structures.
       */
      void __exit x25_route_free(void)
      {
              struct x25_route *rt;
              struct list_head *entry, *tmp;
      
              write_lock_bh(&x25_route_list_lock);
              list_for_each_safe(entry, tmp, &x25_route_list) {
                      rt = list_entry(entry, struct x25_route, node);
                      __x25_remove_route(rt);
              }
              write_unlock_bh(&x25_route_list_lock);
      }
      /*
       * algif_rng: User-space interface for random number generators
       *
       * This file provides the user-space API for random number generators.
       *
       * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, and the entire permission notice in its entirety,
       *    including the disclaimer of warranties.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. The name of the author may not be used to endorse or promote
       *    products derived from this software without specific prior
       *    written permission.
       *
       * ALTERNATIVELY, this product may be distributed under the terms of
       * the GNU General Public License, in which case the provisions of the GPL2
       * are required INSTEAD OF the above restrictions.  (This clause is
       * necessary due to a potential bad interaction between the GPL and
       * the restrictions contained in a BSD-style copyright.)
       *
       * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
       * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
       * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
       * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
       * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
       * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
       * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
       * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
       * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
       * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
       * DAMAGE.
       */
      
      #include <linux/module.h>
      #include <crypto/rng.h>
      #include <linux/random.h>
      #include <crypto/if_alg.h>
      #include <linux/net.h>
      #include <net/sock.h>
      
      MODULE_LICENSE("GPL");
      MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
      MODULE_DESCRIPTION("User-space interface for random number generators");
      
      struct rng_ctx {
      #define MAXSIZE 128
              unsigned int len;
              struct crypto_rng *drng;
      };
      
      static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                             int flags)
      {
   18         struct sock *sk = sock->sk;
              struct alg_sock *ask = alg_sk(sk);
              struct rng_ctx *ctx = ask->private;
              int err = -EFAULT;
              int genlen = 0;
              u8 result[MAXSIZE];
      
   18         if (len == 0)
                      return 0;
              if (len > MAXSIZE)
                      len = MAXSIZE;
      
              /*
               * although not strictly needed, this is a precaution against coding
               * errors
               */
              memset(result, 0, len);
      
              /*
               * The enforcement of a proper seeding of an RNG is done within an
               * RNG implementation. Some RNGs (DRBG, krng) do not need specific
               * seeding as they automatically seed. The X9.31 DRNG will return
               * an error if it was not seeded properly.
               */
              genlen = crypto_rng_get_bytes(ctx->drng, result, len);
              if (genlen < 0)
                      return genlen;
      
   13         err = memcpy_to_msg(msg, result, len);
   12         memzero_explicit(result, len);
      
   14         return err ? err : len;
      }
      
      static struct proto_ops algif_rng_ops = {
              .family                =        PF_ALG,
      
              .connect        =        sock_no_connect,
              .socketpair        =        sock_no_socketpair,
              .getname        =        sock_no_getname,
              .ioctl                =        sock_no_ioctl,
              .listen                =        sock_no_listen,
              .shutdown        =        sock_no_shutdown,
              .getsockopt        =        sock_no_getsockopt,
              .mmap                =        sock_no_mmap,
              .bind                =        sock_no_bind,
              .accept                =        sock_no_accept,
              .setsockopt        =        sock_no_setsockopt,
              .sendmsg        =        sock_no_sendmsg,
              .sendpage        =        sock_no_sendpage,
      
              .release        =        af_alg_release,
              .recvmsg        =        rng_recvmsg,
      };
      
      static void *rng_bind(const char *name, u32 type, u32 mask)
      {
   14         return crypto_alloc_rng(name, type, mask);
      }
      
      static void rng_release(void *private)
      {
   10         crypto_free_rng(private);
      }
      
      static void rng_sock_destruct(struct sock *sk)
      {
              struct alg_sock *ask = alg_sk(sk);
    2         struct rng_ctx *ctx = ask->private;
      
              sock_kfree_s(sk, ctx, ctx->len);
              af_alg_release_parent(sk);
      }
      
      static int rng_accept_parent(void *private, struct sock *sk)
      {
              struct rng_ctx *ctx;
              struct alg_sock *ask = alg_sk(sk);
              unsigned int len = sizeof(*ctx);
      
    3         ctx = sock_kmalloc(sk, len, GFP_KERNEL);
              if (!ctx)
                      return -ENOMEM;
      
    2         ctx->len = len;
      
              /*
               * No seeding done at that point -- if multiple accepts are
               * done on one RNG instance, each resulting FD points to the same
               * state of the RNG.
               */
      
              ctx->drng = private;
              ask->private = ctx;
              sk->sk_destruct = rng_sock_destruct;
      
    2         return 0;
      }
      
      static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
      {
              /*
               * Check whether seedlen is of sufficient size is done in RNG
               * implementations.
               */
   21         return crypto_rng_reset(private, seed, seedlen);
      }
      
      static const struct af_alg_type algif_type_rng = {
              .bind                =        rng_bind,
              .release        =        rng_release,
              .accept                =        rng_accept_parent,
              .setkey                =        rng_setkey,
              .ops                =        &algif_rng_ops,
              .name                =        "rng",
              .owner                =        THIS_MODULE
      };
      
      static int __init rng_init(void)
      {
              return af_alg_register_type(&algif_type_rng);
      }
      
      static void __exit rng_exit(void)
      {
              int err = af_alg_unregister_type(&algif_type_rng);
              BUG_ON(err);
      }
      
      module_init(rng_init);
      module_exit(rng_exit);
      // SPDX-License-Identifier: GPL-2.0-only
      /* iptables module to match on related connections */
      /*
       * (C) 2001 Martin Josefsson <gandalf@wlug.westbo.se>
       */
      #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      #include <linux/module.h>
      #include <linux/skbuff.h>
      #include <linux/netfilter.h>
      #include <net/netfilter/nf_conntrack.h>
      #include <net/netfilter/nf_conntrack_core.h>
      #include <net/netfilter/nf_conntrack_helper.h>
      #include <linux/netfilter/x_tables.h>
      #include <linux/netfilter/xt_helper.h>
      
      MODULE_LICENSE("GPL");
      MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>");
      MODULE_DESCRIPTION("Xtables: Related connection matching");
      MODULE_ALIAS("ipt_helper");
      MODULE_ALIAS("ip6t_helper");
      
      
      static bool
      helper_mt(const struct sk_buff *skb, struct xt_action_param *par)
      {
              const struct xt_helper_info *info = par->matchinfo;
              const struct nf_conn *ct;
              const struct nf_conn_help *master_help;
              const struct nf_conntrack_helper *helper;
              enum ip_conntrack_info ctinfo;
              bool ret = info->invert;
      
              ct = nf_ct_get(skb, &ctinfo);
              if (!ct || !ct->master)
                      return ret;
      
              master_help = nfct_help(ct->master);
              if (!master_help)
                      return ret;
      
              /* rcu_read_lock()ed by nf_hook_thresh */
              helper = rcu_dereference(master_help->helper);
              if (!helper)
                      return ret;
      
              if (info->name[0] == '\0')
                      ret = !ret;
              else
                      ret ^= !strncmp(helper->name, info->name,
                                      strlen(helper->name));
              return ret;
      }
      
      static int helper_mt_check(const struct xt_mtchk_param *par)
      {
    2         struct xt_helper_info *info = par->matchinfo;
              int ret;
      
              ret = nf_ct_netns_get(par->net, par->family);
              if (ret < 0) {
                      pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
                                          par->family);
                      return ret;
              }
    1         info->name[sizeof(info->name) - 1] = '\0';
    1         return 0;
      }
      
      static void helper_mt_destroy(const struct xt_mtdtor_param *par)
      {
    1         nf_ct_netns_put(par->net, par->family);
      }
      
      static struct xt_match helper_mt_reg __read_mostly = {
              .name       = "helper",
              .revision   = 0,
              .family     = NFPROTO_UNSPEC,
              .checkentry = helper_mt_check,
              .match      = helper_mt,
              .destroy    = helper_mt_destroy,
              .matchsize  = sizeof(struct xt_helper_info),
              .me         = THIS_MODULE,
      };
      
      static int __init helper_mt_init(void)
      {
              return xt_register_match(&helper_mt_reg);
      }
      
      static void __exit helper_mt_exit(void)
      {
              xt_unregister_match(&helper_mt_reg);
      }
      
      module_init(helper_mt_init);
      module_exit(helper_mt_exit);
      /* SPDX-License-Identifier: GPL-2.0 */
      #ifndef _LINUX_PAGEMAP_H
      #define _LINUX_PAGEMAP_H
      
      /*
       * Copyright 1995 Linus Torvalds
       */
      #include <linux/mm.h>
      #include <linux/fs.h>
      #include <linux/list.h>
      #include <linux/highmem.h>
      #include <linux/compiler.h>
      #include <linux/uaccess.h>
      #include <linux/gfp.h>
      #include <linux/bitops.h>
      #include <linux/hardirq.h> /* for in_interrupt() */
      #include <linux/hugetlb_inline.h>
      
      struct pagevec;
      
      /*
       * Bits in mapping->flags.
       */
      enum mapping_flags {
              AS_EIO                = 0,        /* IO error on async write */
              AS_ENOSPC        = 1,        /* ENOSPC on async write */
              AS_MM_ALL_LOCKS        = 2,        /* under mm_take_all_locks() */
              AS_UNEVICTABLE        = 3,        /* e.g., ramdisk, SHM_LOCK */
              AS_EXITING        = 4,         /* final truncate in progress */
              /* writeback related tags are not used */
              AS_NO_WRITEBACK_TAGS = 5,
      };
      
      /**
       * mapping_set_error - record a writeback error in the address_space
       * @mapping - the mapping in which an error should be set
       * @error - the error to set in the mapping
       *
       * When writeback fails in some way, we must record that error so that
       * userspace can be informed when fsync and the like are called.  We endeavor
       * to report errors on any file that was open at the time of the error.  Some
       * internal callers also need to know when writeback errors have occurred.
       *
       * When a writeback error occurs, most filesystems will want to call
       * mapping_set_error to record the error in the mapping so that it can be
       * reported when the application calls fsync(2).
       */
      static inline void mapping_set_error(struct address_space *mapping, int error)
      {
              if (likely(!error))
                      return;
      
              /* Record in wb_err for checkers using errseq_t based tracking */
              filemap_set_wb_err(mapping, error);
      
              /* Record it in flags for now, for legacy callers */
              if (error == -ENOSPC)
                      set_bit(AS_ENOSPC, &mapping->flags);
              else
                      set_bit(AS_EIO, &mapping->flags);
      }
      
      static inline void mapping_set_unevictable(struct address_space *mapping)
      {
              set_bit(AS_UNEVICTABLE, &mapping->flags);
      }
      
      static inline void mapping_clear_unevictable(struct address_space *mapping)
      {
              clear_bit(AS_UNEVICTABLE, &mapping->flags);
      }
      
      static inline int mapping_unevictable(struct address_space *mapping)
      {
              if (mapping)
 1523                 return test_bit(AS_UNEVICTABLE, &mapping->flags);
              return !!mapping;
      }
      
      static inline void mapping_set_exiting(struct address_space *mapping)
      {
 1061         set_bit(AS_EXITING, &mapping->flags);
      }
      
      static inline int mapping_exiting(struct address_space *mapping)
      {
  214         return test_bit(AS_EXITING, &mapping->flags);
      }
      
      static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
      {
              set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
      }
      
      static inline int mapping_use_writeback_tags(struct address_space *mapping)
      {
  379         return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
      }
      
      static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
      {
              return mapping->gfp_mask;
      }
      
      /* Restricts the given gfp_mask to what the mapping allows. */
      static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
                      gfp_t gfp_mask)
      {
  134         return mapping_gfp_mask(mapping) & gfp_mask;
      }
      
      /*
       * This is non-atomic.  Only to be used before the mapping is activated.
       * Probably needs a barrier...
       */
      static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
      {
              m->gfp_mask = mask;
      }
      
      void release_pages(struct page **pages, int nr);
      
      /*
       * speculatively take a reference to a page.
       * If the page is free (_refcount == 0), then _refcount is untouched, and 0
       * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
       *
       * This function must be called inside the same rcu_read_lock() section as has
       * been used to lookup the page in the pagecache radix-tree (or page table):
       * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
       *
       * Unless an RCU grace period has passed, the count of all pages coming out
       * of the allocator must be considered unstable. page_count may return higher
       * than expected, and put_page must be able to do the right thing when the
       * page has been finished with, no matter what it is subsequently allocated
       * for (because put_page is what is used here to drop an invalid speculative
       * reference).
       *
       * This is the interesting part of the lockless pagecache (and lockless
       * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
       * has the following pattern:
       * 1. find page in radix tree
       * 2. conditionally increment refcount
       * 3. check the page is still in pagecache (if no, goto 1)
       *
       * Remove-side that cares about stability of _refcount (eg. reclaim) has the
       * following (with the i_pages lock held):
       * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
       * B. remove page from pagecache
       * C. free the page
       *
       * There are 2 critical interleavings that matter:
       * - 2 runs before A: in this case, A sees elevated refcount and bails out
       * - A runs before 2: in this case, 2 sees zero refcount and retries;
       *   subsequently, B will complete and 1 will find no page, causing the
       *   lookup to return NULL.
       *
       * It is possible that between 1 and 2, the page is removed then the exact same
       * page is inserted into the same position in pagecache. That's OK: the
       * old find_get_page using a lock could equally have run before or after
       * such a re-insertion, depending on order that locks are granted.
       *
       * Lookups racing against pagecache insertion isn't a big problem: either 1
       * will find the page or it will not. Likewise, the old find_get_page could run
       * either before the insertion or afterwards, depending on timing.
       */
      static inline int __page_cache_add_speculative(struct page *page, int count)
      {
      #ifdef CONFIG_TINY_RCU
      # ifdef CONFIG_PREEMPT_COUNT
              VM_BUG_ON(!in_atomic() && !irqs_disabled());
      # endif
              /*
               * Preempt must be disabled here - we rely on rcu_read_lock doing
               * this for us.
               *
               * Pagecache won't be truncated from interrupt context, so if we have
               * found a page in the radix tree here, we have pinned its refcount by
               * disabling preempt, and hence no need for the "speculative get" that
               * SMP requires.
               */
              VM_BUG_ON_PAGE(page_count(page) == 0, page);
              page_ref_add(page, count);
      
      #else
 2050         if (unlikely(!page_ref_add_unless(page, count, 0))) {
                      /*
                       * Either the page has been freed, or will be freed.
                       * In either case, retry here and the caller should
                       * do the right thing (see comments above).
                       */
                      return 0;
              }
      #endif
 2050         VM_BUG_ON_PAGE(PageTail(page), page);
      
              return 1;
      }
      
      static inline int page_cache_get_speculative(struct page *page)
      {
 1533         return __page_cache_add_speculative(page, 1);
      }
      
      static inline int page_cache_add_speculative(struct page *page, int count)
      {
  785         return __page_cache_add_speculative(page, count);
      }
      
      #ifdef CONFIG_NUMA
      extern struct page *__page_cache_alloc(gfp_t gfp);
      #else
      static inline struct page *__page_cache_alloc(gfp_t gfp)
      {
              return alloc_pages(gfp, 0);
      }
      #endif
      
      static inline struct page *page_cache_alloc(struct address_space *x)
      {
  135         return __page_cache_alloc(mapping_gfp_mask(x));
      }
      
      static inline gfp_t readahead_gfp_mask(struct address_space *x)
      {
  242         return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
      }
      
      typedef int filler_t(void *, struct page *);
      
      pgoff_t page_cache_next_miss(struct address_space *mapping,
                                   pgoff_t index, unsigned long max_scan);
      pgoff_t page_cache_prev_miss(struct address_space *mapping,
                                   pgoff_t index, unsigned long max_scan);
      
      #define FGP_ACCESSED                0x00000001
      #define FGP_LOCK                0x00000002
      #define FGP_CREAT                0x00000004
      #define FGP_WRITE                0x00000008
      #define FGP_NOFS                0x00000010
      #define FGP_NOWAIT                0x00000020
      #define FGP_FOR_MMAP                0x00000040
      
      struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
                      int fgp_flags, gfp_t cache_gfp_mask);
      
      /**
       * find_get_page - find and get a page reference
       * @mapping: the address_space to search
       * @offset: the page index
       *
       * Looks up the page cache slot at @mapping & @offset.  If there is a
       * page cache page, it is returned with an increased refcount.
       *
       * Otherwise, %NULL is returned.
       */
      static inline struct page *find_get_page(struct address_space *mapping,
                                              pgoff_t offset)
      {
  382         return pagecache_get_page(mapping, offset, 0, 0);
      }
      
      static inline struct page *find_get_page_flags(struct address_space *mapping,
                                              pgoff_t offset, int fgp_flags)
      {
              return pagecache_get_page(mapping, offset, fgp_flags, 0);
      }
      
      /**
       * find_lock_page - locate, pin and lock a pagecache page
       * @mapping: the address_space to search
       * @offset: the page index
       *
       * Looks up the page cache slot at @mapping & @offset.  If there is a
       * page cache page, it is returned locked and with an increased
       * refcount.
       *
       * Otherwise, %NULL is returned.
       *
       * find_lock_page() may sleep.
       */
      static inline struct page *find_lock_page(struct address_space *mapping,
                                              pgoff_t offset)
      {
   24         return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
      }
      
      /**
       * find_or_create_page - locate or add a pagecache page
       * @mapping: the page's address_space
       * @index: the page's index into the mapping
       * @gfp_mask: page allocation mode
       *
       * Looks up the page cache slot at @mapping & @offset.  If there is a
       * page cache page, it is returned locked and with an increased
       * refcount.
       *
       * If the page is not present, a new page is allocated using @gfp_mask
       * and added to the page cache and the VM's LRU list.  The page is
       * returned locked and with an increased refcount.
       *
       * On memory exhaustion, %NULL is returned.
       *
       * find_or_create_page() may sleep, even if @gfp_flags specifies an
       * atomic allocation!
       */
      static inline struct page *find_or_create_page(struct address_space *mapping,
                                              pgoff_t offset, gfp_t gfp_mask)
      {
              return pagecache_get_page(mapping, offset,
                                              FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
                                              gfp_mask);
      }
      
      /**
       * grab_cache_page_nowait - returns locked page at given index in given cache
       * @mapping: target address_space
       * @index: the page index
       *
       * Same as grab_cache_page(), but do not wait if the page is unavailable.
       * This is intended for speculative data generators, where the data can
       * be regenerated if the page couldn't be grabbed.  This routine should
       * be safe to call while holding the lock for another page.
       *
       * Clear __GFP_FS when allocating the page to avoid recursion into the fs
       * and deadlock against the caller's locked page.
       */
      static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
                                      pgoff_t index)
      {
              return pagecache_get_page(mapping, index,
                              FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
                              mapping_gfp_mask(mapping));
      }
      
      struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
      struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
      unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
                                unsigned int nr_entries, struct page **entries,
                                pgoff_t *indices);
      unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
                              pgoff_t end, unsigned int nr_pages,
                              struct page **pages);
      static inline unsigned find_get_pages(struct address_space *mapping,
                              pgoff_t *start, unsigned int nr_pages,
                              struct page **pages)
      {
              return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
                                          pages);
      }
      unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
                                     unsigned int nr_pages, struct page **pages);
      unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
                              pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
                              struct page **pages);
      static inline unsigned find_get_pages_tag(struct address_space *mapping,
                              pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
                              struct page **pages)
      {
              return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
                                              nr_pages, pages);
      }
      
      struct page *grab_cache_page_write_begin(struct address_space *mapping,
                              pgoff_t index, unsigned flags);
      
      /*
       * Returns locked page at given index in given cache, creating it if needed.
       */
      static inline struct page *grab_cache_page(struct address_space *mapping,
                                                                      pgoff_t index)
      {
              return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
      }
      
      extern struct page * read_cache_page(struct address_space *mapping,
                                      pgoff_t index, filler_t *filler, void *data);
      extern struct page * read_cache_page_gfp(struct address_space *mapping,
                                      pgoff_t index, gfp_t gfp_mask);
      extern int read_cache_pages(struct address_space *mapping,
                      struct list_head *pages, filler_t *filler, void *data);
      
      static inline struct page *read_mapping_page(struct address_space *mapping,
                                      pgoff_t index, void *data)
      {
              return read_cache_page(mapping, index, NULL, data);
      }
      
      /*
       * Get index of the page with in radix-tree
       * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
       */
      static inline pgoff_t page_to_index(struct page *page)
      {
              pgoff_t pgoff;
      
  367         if (likely(!PageTransTail(page)))
  367                 return page->index;
      
              /*
               *  We don't initialize ->index for tail pages: calculate based on
               *  head page
               */
              pgoff = compound_head(page)->index;
              pgoff += page - compound_head(page);
              return pgoff;
      }
      
      /*
       * Get the offset in PAGE_SIZE.
       * (TODO: hugepage should have ->index in PAGE_SIZE)
       */
      static inline pgoff_t page_to_pgoff(struct page *page)
      {
   72         if (unlikely(PageHeadHuge(page)))
                      return page->index << compound_order(page);
      
   72         return page_to_index(page);
      }
      
      /*
       * Return byte-offset into filesystem object for page.
       */
      static inline loff_t page_offset(struct page *page)
      {
              return ((loff_t)page->index) << PAGE_SHIFT;
      }
      
      static inline loff_t page_file_offset(struct page *page)
      {
              return ((loff_t)page_index(page)) << PAGE_SHIFT;
      }
      
      extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
                                           unsigned long address);
      
      static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
                                              unsigned long address)
      {
              pgoff_t pgoff;
              if (unlikely(is_vm_hugetlb_page(vma)))
                      return linear_hugepage_index(vma, address);
 2130         pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
              pgoff += vma->vm_pgoff;
              return pgoff;
      }
      
      extern void __lock_page(struct page *page);
      extern int __lock_page_killable(struct page *page);
      extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                                      unsigned int flags);
      extern void unlock_page(struct page *page);
      
      /*
       * Return true if the page was successfully locked
       */
      static inline int trylock_page(struct page *page)
      {
  765         page = compound_head(page);
 1525         return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
      }
      
      /*
       * lock_page may only be called if we have the page's inode pinned.
       */
      static inline void lock_page(struct page *page)
      {
  872         might_sleep();
  886         if (!trylock_page(page))
  190                 __lock_page(page);
    3 }
      
      /*
       * lock_page_killable is like lock_page but can be interrupted by fatal
       * signals.  It returns 0 if it locked the page and -EINTR if it was
       * killed while waiting.
       */
      static inline int lock_page_killable(struct page *page)
      {
   18         might_sleep();
   18         if (!trylock_page(page))
   14                 return __lock_page_killable(page);
              return 0;
      }
      
      /*
       * lock_page_or_retry - Lock the page, unless this would block and the
       * caller indicated that it can handle a retry.
       *
       * Return value and mmap_sem implications depend on flags; see
       * __lock_page_or_retry().
       */
      static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
                                           unsigned int flags)
      {
              might_sleep();
              return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
      }
      
      /*
       * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
       * and should not be used directly.
       */
      extern void wait_on_page_bit(struct page *page, int bit_nr);
      extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
      
      /* 
       * Wait for a page to be unlocked.
       *
       * This must be called with the caller "holding" the page,
       * ie with increased "page->count" so that the page won't
       * go away during the wait..
       */
      static inline void wait_on_page_locked(struct page *page)
      {
    1         if (PageLocked(page))
                      wait_on_page_bit(compound_head(page), PG_locked);
      }
      
      static inline int wait_on_page_locked_killable(struct page *page)
      {
  178         if (!PageLocked(page))
                      return 0;
  177         return wait_on_page_bit_killable(compound_head(page), PG_locked);
      }
      
      extern void put_and_wait_on_page_locked(struct page *page);
      
      void wait_on_page_writeback(struct page *page);
      extern void end_page_writeback(struct page *page);
      void wait_for_stable_page(struct page *page);
      
      void page_endio(struct page *page, bool is_write, int err);
      
      /*
       * Add an arbitrary waiter to a page's wait queue
       */
      extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
      
      /*
       * Fault everything in given userspace address range in.
       */
      static inline int fault_in_pages_writeable(char __user *uaddr, int size)
      {
              char __user *end = uaddr + size - 1;
      
              if (unlikely(size == 0))
                      return 0;
      
              if (unlikely(uaddr > end))
                      return -EFAULT;
              /*
               * Writing zeroes into userspace here is OK, because we know that if
               * the zero gets there, we'll be overwriting it.
               */
              do {
                      if (unlikely(__put_user(0, uaddr) != 0))
                              return -EFAULT;
                      uaddr += PAGE_SIZE;
              } while (uaddr <= end);
      
              /* Check whether the range spilled into the next page. */
              if (((unsigned long)uaddr & PAGE_MASK) ==
                              ((unsigned long)end & PAGE_MASK))
                      return __put_user(0, end);
      
              return 0;
      }