// SPDX-License-Identifier: GPL-2.0-or-later
      /*
       *        RAW sockets for IPv6
       *        Linux INET6 implementation
       *
       *        Authors:
       *        Pedro Roque                <roque@di.fc.ul.pt>
       *
       *        Adapted from linux/net/ipv4/raw.c
       *
       *        Fixes:
       *        Hideaki YOSHIFUJI        :        sin6_scope_id support
       *        YOSHIFUJI,H.@USAGI        :        raw checksum (RFC2292(bis) compliance)
       *        Kazunori MIYAZAWA @USAGI:        change process style to use ip6_append_data
       */
      
      #include <linux/errno.h>
      #include <linux/types.h>
      #include <linux/socket.h>
      #include <linux/slab.h>
      #include <linux/sockios.h>
      #include <linux/net.h>
      #include <linux/in6.h>
      #include <linux/netdevice.h>
      #include <linux/if_arp.h>
      #include <linux/icmpv6.h>
      #include <linux/netfilter.h>
      #include <linux/netfilter_ipv6.h>
      #include <linux/skbuff.h>
      #include <linux/compat.h>
      #include <linux/uaccess.h>
      #include <asm/ioctls.h>
      
      #include <net/net_namespace.h>
      #include <net/ip.h>
      #include <net/sock.h>
      #include <net/snmp.h>
      
      #include <net/ipv6.h>
      #include <net/ndisc.h>
      #include <net/protocol.h>
      #include <net/ip6_route.h>
      #include <net/ip6_checksum.h>
      #include <net/addrconf.h>
      #include <net/transp_v6.h>
      #include <net/udp.h>
      #include <net/inet_common.h>
      #include <net/tcp_states.h>
      #if IS_ENABLED(CONFIG_IPV6_MIP6)
      #include <net/mip6.h>
      #endif
      #include <linux/mroute6.h>
      
      #include <net/raw.h>
      #include <net/rawv6.h>
      #include <net/xfrm.h>
      
      #include <linux/proc_fs.h>
      #include <linux/seq_file.h>
      #include <linux/export.h>
      
      #define        ICMPV6_HDRLEN        4        /* ICMPv6 header, RFC 4443 Section 2.1 */
      
      struct raw_hashinfo raw_v6_hashinfo = {
              .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
      };
      EXPORT_SYMBOL_GPL(raw_v6_hashinfo);
      
      struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
                      unsigned short num, const struct in6_addr *loc_addr,
                      const struct in6_addr *rmt_addr, int dif, int sdif)
      {
    1         bool is_multicast = ipv6_addr_is_multicast(loc_addr);
      
              sk_for_each_from(sk)
    1                 if (inet_sk(sk)->inet_num == num) {
      
    1                         if (!net_eq(sock_net(sk), net))
                                      continue;
      
    1                         if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
                                  !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
                                      continue;
      
    1                         if (!raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
                                                       dif, sdif))
                                      continue;
      
    1                         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
                                      if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
                                              goto found;
                                      if (is_multicast &&
                                          inet6_mc_check(sk, loc_addr, rmt_addr))
                                              goto found;
                                      continue;
                              }
                              goto found;
                      }
              sk = NULL;
      found:
    1         return sk;
      }
      EXPORT_SYMBOL_GPL(__raw_v6_lookup);
      
      /*
       *        0 - deliver
       *        1 - block
       */
      static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
      {
              struct icmp6hdr _hdr;
              const struct icmp6hdr *hdr;
      
              /* We require only the four bytes of the ICMPv6 header, not any
               * additional bytes of message body in "struct icmp6hdr".
               */
              hdr = skb_header_pointer(skb, skb_transport_offset(skb),
                                       ICMPV6_HDRLEN, &_hdr);
              if (hdr) {
                      const __u32 *data = &raw6_sk(sk)->filter.data[0];
                      unsigned int type = hdr->icmp6_type;
      
                      return (data[type >> 5] & (1U << (type & 31))) != 0;
              }
              return 1;
      }
      
      #if IS_ENABLED(CONFIG_IPV6_MIP6)
      typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
      
      static mh_filter_t __rcu *mh_filter __read_mostly;
      
      int rawv6_mh_filter_register(mh_filter_t filter)
      {
              rcu_assign_pointer(mh_filter, filter);
              return 0;
      }
      EXPORT_SYMBOL(rawv6_mh_filter_register);
      
      int rawv6_mh_filter_unregister(mh_filter_t filter)
      {
              RCU_INIT_POINTER(mh_filter, NULL);
              synchronize_rcu();
              return 0;
      }
      EXPORT_SYMBOL(rawv6_mh_filter_unregister);
      
      #endif
      
      /*
       *        demultiplex raw sockets.
       *        (should consider queueing the skb in the sock receive_queue
       *        without calling rawv6.c)
       *
       *        Caller owns SKB so we must make clones.
       */
      static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
      {
              const struct in6_addr *saddr;
              const struct in6_addr *daddr;
              struct sock *sk;
              bool delivered = false;
              __u8 hash;
              struct net *net;
      
              saddr = &ipv6_hdr(skb)->saddr;
              daddr = saddr + 1;
      
              hash = nexthdr & (RAW_HTABLE_SIZE - 1);
      
              read_lock(&raw_v6_hashinfo.lock);
              sk = sk_head(&raw_v6_hashinfo.ht[hash]);
      
              if (!sk)
                      goto out;
      
              net = dev_net(skb->dev);
              sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr,
                                   inet6_iif(skb), inet6_sdif(skb));
      
              while (sk) {
                      int filtered;
      
                      delivered = true;
                      switch (nexthdr) {
                      case IPPROTO_ICMPV6:
                              filtered = icmpv6_filter(sk, skb);
                              break;
      
      #if IS_ENABLED(CONFIG_IPV6_MIP6)
                      case IPPROTO_MH:
                      {
                              /* XXX: To validate MH only once for each packet,
                               * this is placed here. It should be after checking
                               * xfrm policy, however it doesn't. The checking xfrm
                               * policy is placed in rawv6_rcv() because it is
                               * required for each socket.
                               */
                              mh_filter_t *filter;
      
                              filter = rcu_dereference(mh_filter);
                              filtered = filter ? (*filter)(sk, skb) : 0;
                              break;
                      }
      #endif
                      default:
                              filtered = 0;
                              break;
                      }
      
                      if (filtered < 0)
                              break;
                      if (filtered == 0) {
                              struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
      
                              /* Not releasing hash table! */
                              if (clone) {
                                      nf_reset_ct(clone);
                                      rawv6_rcv(sk, clone);
                              }
                      }
                      sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
                                           inet6_iif(skb), inet6_sdif(skb));
              }
      out:
              read_unlock(&raw_v6_hashinfo.lock);
              return delivered;
      }
      
      bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
      {
              struct sock *raw_sk;
      
              raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
              if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
                      raw_sk = NULL;
      
              return raw_sk != NULL;
      }
      
      /* This cleans up af_inet6 a bit. -DaveM */
      static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
      {
              struct inet_sock *inet = inet_sk(sk);
              struct ipv6_pinfo *np = inet6_sk(sk);
              struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
              __be32 v4addr = 0;
              int addr_type;
              int err;
      
              if (addr_len < SIN6_LEN_RFC2133)
                      return -EINVAL;
      
              if (addr->sin6_family != AF_INET6)
                      return -EINVAL;
      
              addr_type = ipv6_addr_type(&addr->sin6_addr);
      
              /* Raw sockets are IPv6 only */
              if (addr_type == IPV6_ADDR_MAPPED)
                      return -EADDRNOTAVAIL;
      
              lock_sock(sk);
      
              err = -EINVAL;
              if (sk->sk_state != TCP_CLOSE)
                      goto out;
      
              rcu_read_lock();
              /* Check if the address belongs to the host. */
              if (addr_type != IPV6_ADDR_ANY) {
                      struct net_device *dev = NULL;
      
                      if (__ipv6_addr_needs_scope_id(addr_type)) {
                              if (addr_len >= sizeof(struct sockaddr_in6) &&
                                  addr->sin6_scope_id) {
                                      /* Override any existing binding, if another
                                       * one is supplied by user.
                                       */
                                      sk->sk_bound_dev_if = addr->sin6_scope_id;
                              }
      
                              /* Binding to link-local address requires an interface */
                              if (!sk->sk_bound_dev_if)
                                      goto out_unlock;
                      }
      
                      if (sk->sk_bound_dev_if) {
                              err = -ENODEV;
                              dev = dev_get_by_index_rcu(sock_net(sk),
                                                         sk->sk_bound_dev_if);
                              if (!dev)
                                      goto out_unlock;
                      }
      
                      /* ipv4 addr of the socket is invalid.  Only the
                       * unspecified and mapped address have a v4 equivalent.
                       */
                      v4addr = LOOPBACK4_IPV6;
                      if (!(addr_type & IPV6_ADDR_MULTICAST) &&
                          !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
                              err = -EADDRNOTAVAIL;
                              if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
                                                 dev, 0)) {
                                      goto out_unlock;
                              }
                      }
              }
      
              inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
              sk->sk_v6_rcv_saddr = addr->sin6_addr;
              if (!(addr_type & IPV6_ADDR_MULTICAST))
                      np->saddr = addr->sin6_addr;
              err = 0;
      out_unlock:
              rcu_read_unlock();
      out:
              release_sock(sk);
              return err;
      }
      
      static void rawv6_err(struct sock *sk, struct sk_buff *skb,
                     struct inet6_skb_parm *opt,
                     u8 type, u8 code, int offset, __be32 info)
      {
              struct inet_sock *inet = inet_sk(sk);
              struct ipv6_pinfo *np = inet6_sk(sk);
              int err;
              int harderr;
      
              /* Report error on raw socket, if:
                 1. User requested recverr.
                 2. Socket is connected (otherwise the error indication
                    is useless without recverr and error is hard.
               */
              if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
                      return;
      
              harderr = icmpv6_err_convert(type, code, &err);
              if (type == ICMPV6_PKT_TOOBIG) {
                      ip6_sk_update_pmtu(skb, sk, info);
                      harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
              }
              if (type == NDISC_REDIRECT) {
                      ip6_sk_redirect(skb, sk);
                      return;
              }
              if (np->recverr) {
                      u8 *payload = skb->data;
                      if (!inet->hdrincl)
                              payload += offset;
                      ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
              }
      
              if (np->recverr || harderr) {
                      sk->sk_err = err;
                      sk->sk_error_report(sk);
              }
      }
      
      void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
                      u8 type, u8 code, int inner_offset, __be32 info)
      {
              struct sock *sk;
              int hash;
              const struct in6_addr *saddr, *daddr;
              struct net *net;
      
              hash = nexthdr & (RAW_HTABLE_SIZE - 1);
      
              read_lock(&raw_v6_hashinfo.lock);
              sk = sk_head(&raw_v6_hashinfo.ht[hash]);
              if (sk) {
                      /* Note: ipv6_hdr(skb) != skb->data */
                      const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
                      saddr = &ip6h->saddr;
                      daddr = &ip6h->daddr;
                      net = dev_net(skb->dev);
      
                      while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
                                                   inet6_iif(skb), inet6_iif(skb)))) {
                              rawv6_err(sk, skb, NULL, type, code,
                                              inner_offset, info);
                              sk = sk_next(sk);
                      }
              }
              read_unlock(&raw_v6_hashinfo.lock);
      }
      
      static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
      {
              if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
                  skb_checksum_complete(skb)) {
                      atomic_inc(&sk->sk_drops);
                      kfree_skb(skb);
                      return NET_RX_DROP;
              }
      
              /* Charge it to the socket. */
              skb_dst_drop(skb);
              if (sock_queue_rcv_skb(sk, skb) < 0) {
                      kfree_skb(skb);
                      return NET_RX_DROP;
              }
      
              return 0;
      }
      
      /*
       *        This is next to useless...
       *        if we demultiplex in network layer we don't need the extra call
       *        just to queue the skb...
       *        maybe we could have the network decide upon a hint if it
       *        should call raw_rcv for demultiplexing
       */
      int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
      {
              struct inet_sock *inet = inet_sk(sk);
              struct raw6_sock *rp = raw6_sk(sk);
      
              if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
                      atomic_inc(&sk->sk_drops);
                      kfree_skb(skb);
                      return NET_RX_DROP;
              }
      
              if (!rp->checksum)
                      skb->ip_summed = CHECKSUM_UNNECESSARY;
      
              if (skb->ip_summed == CHECKSUM_COMPLETE) {
                      skb_postpull_rcsum(skb, skb_network_header(skb),
                                         skb_network_header_len(skb));
                      if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                           &ipv6_hdr(skb)->daddr,
                                           skb->len, inet->inet_num, skb->csum))
                              skb->ip_summed = CHECKSUM_UNNECESSARY;
              }
              if (!skb_csum_unnecessary(skb))
                      skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                                               &ipv6_hdr(skb)->daddr,
                                                               skb->len,
                                                               inet->inet_num, 0));
      
              if (inet->hdrincl) {
                      if (skb_checksum_complete(skb)) {
                              atomic_inc(&sk->sk_drops);
                              kfree_skb(skb);
                              return NET_RX_DROP;
                      }
              }
      
              rawv6_rcv_skb(sk, skb);
              return 0;
      }
      
      
      /*
       *        This should be easy, if there is something there
       *        we return it, otherwise we block.
       */
      
      static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                               int noblock, int flags, int *addr_len)
      {
   12         struct ipv6_pinfo *np = inet6_sk(sk);
    9         DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
              struct sk_buff *skb;
              size_t copied;
              int err;
      
   12         if (flags & MSG_OOB)
                      return -EOPNOTSUPP;
      
   11         if (flags & MSG_ERRQUEUE)
    2                 return ipv6_recv_error(sk, msg, len, addr_len);
      
    9         if (np->rxpmtu && np->rxopt.bits.rxpmtu)
    1                 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
      
              skb = skb_recv_datagram(sk, flags, noblock, &err);
              if (!skb)
                      goto out;
      
    8         copied = skb->len;
              if (copied > len) {
                      copied = len;
    2                 msg->msg_flags |= MSG_TRUNC;
              }
      
    8         if (skb_csum_unnecessary(skb)) {
                      err = skb_copy_datagram_msg(skb, 0, msg, copied);
              } else if (msg->msg_flags&MSG_TRUNC) {
                      if (__skb_checksum_complete(skb))
                              goto csum_copy_err;
    8                 err = skb_copy_datagram_msg(skb, 0, msg, copied);
              } else {
                      err = skb_copy_and_csum_datagram_msg(skb, 0, msg);
                      if (err == -EINVAL)
                              goto csum_copy_err;
              }
    8         if (err)
                      goto out_free;
      
              /* Copy the address. */
    7         if (sin6) {
    3                 sin6->sin6_family = AF_INET6;
                      sin6->sin6_port = 0;
                      sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                      sin6->sin6_flowinfo = 0;
    3                 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
                                                                inet6_iif(skb));
                      *addr_len = sizeof(*sin6);
              }
      
    7         sock_recv_ts_and_drops(msg, sk, skb);
      
    7         if (np->rxopt.all)
                      ip6_datagram_recv_ctl(sk, msg, skb);
      
    7         err = copied;
              if (flags & MSG_TRUNC)
    2                 err = skb->len;
      
      out_free:
    8         skb_free_datagram(sk, skb);
      out:
   11         return err;
      
      csum_copy_err:
              skb_kill_datagram(sk, skb, flags);
      
              /* Error for blocking case is chosen to masquerade
                 as some normal condition.
               */
              err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
              goto out;
      }
      
      static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
                                           struct raw6_sock *rp)
      {
              struct sk_buff *skb;
              int err = 0;
              int offset;
              int len;
              int total_len;
              __wsum tmp_csum;
              __sum16 csum;
      
   65         if (!rp->checksum)
                      goto send;
      
   20         skb = skb_peek(&sk->sk_write_queue);
   19         if (!skb)
                      goto out;
      
   19         offset = rp->offset;
              total_len = inet_sk(sk)->cork.base.length;
              if (offset >= total_len - 1) {
                      err = -EINVAL;
    1                 ip6_flush_pending_frames(sk);
                      goto out;
              }
      
              /* should be check HW csum miyazawa */
   18         if (skb_queue_len(&sk->sk_write_queue) == 1) {
                      /*
                       * Only one fragment on the socket.
                       */
                      tmp_csum = skb->csum;
              } else {
                      struct sk_buff *csum_skb = NULL;
                      tmp_csum = 0;
      
    7                 skb_queue_walk(&sk->sk_write_queue, skb) {
    7                         tmp_csum = csum_add(tmp_csum, skb->csum);
      
                              if (csum_skb)
                                      continue;
      
    7                         len = skb->len - skb_transport_offset(skb);
                              if (offset >= len) {
                                      offset -= len;
                                      continue;
                              }
      
                              csum_skb = skb;
                      }
      
                      skb = csum_skb;
              }
      
   18         offset += skb_transport_offset(skb);
              err = skb_copy_bits(skb, offset, &csum, 2);
              if (err < 0) {
                      ip6_flush_pending_frames(sk);
                      goto out;
              }
      
              /* in case cksum was not initialized */
   18         if (unlikely(csum))
   14                 tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
      
              csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
   18                                total_len, fl6->flowi6_proto, tmp_csum);
      
    1         if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
                      csum = CSUM_MANGLED_0;
      
   18         BUG_ON(skb_store_bits(skb, offset, &csum, 2));
      
      send:
   63         err = ip6_push_pending_frames(sk);
      out:
   49         return err;
      }
      
      static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
                              struct flowi6 *fl6, struct dst_entry **dstp,
                              unsigned int flags, const struct sockcm_cookie *sockc)
      {
   60         struct ipv6_pinfo *np = inet6_sk(sk);
   57         struct net *net = sock_net(sk);
              struct ipv6hdr *iph;
              struct sk_buff *skb;
              int err;
              struct rt6_info *rt = (struct rt6_info *)*dstp;
   60         int hlen = LL_RESERVED_SPACE(rt->dst.dev);
              int tlen = rt->dst.dev->needed_tailroom;
      
              if (length > rt->dst.dev->mtu) {
    1                 ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
                      return -EMSGSIZE;
              }
   58         if (length < sizeof(struct ipv6hdr))
                      return -EINVAL;
   60         if (flags&MSG_PROBE)
                      goto out;
      
              skb = sock_alloc_send_skb(sk,
                                        length + hlen + tlen + 15,
                                        flags & MSG_DONTWAIT, &err);
              if (!skb)
                      goto error;
   57         skb_reserve(skb, hlen);
      
              skb->protocol = htons(ETH_P_IPV6);
              skb->priority = sk->sk_priority;
              skb->mark = sockc->mark;
              skb->tstamp = sockc->transmit_time;
      
              skb_put(skb, length);
              skb_reset_network_header(skb);
              iph = ipv6_hdr(skb);
      
              skb->ip_summed = CHECKSUM_NONE;
      
   57         skb_setup_tx_timestamp(skb, sockc->tsflags);
      
   57         if (flags & MSG_CONFIRM)
    1                 skb_set_dst_pending_confirm(skb, 1);
      
   57         skb->transport_header = skb->network_header;
   57         err = memcpy_from_msg(iph, msg, length);
              if (err) {
                      err = -EFAULT;
                      kfree_skb(skb);
                      goto error;
              }
      
              skb_dst_set(skb, &rt->dst);
              *dstp = NULL;
      
              /* if egress device is enslaved to an L3 master device pass the
               * skb to its handler for processing
               */
   57         skb = l3mdev_ip6_out(sk, skb);
              if (unlikely(!skb))
                      return 0;
      
              /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
               * in the error path. Since skb has been freed, the dst could
               * have been queued for deletion.
               */
   57         rcu_read_lock();
   57         IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
   57         err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
                            NULL, rt->dst.dev, dst_output);
   53         if (err > 0)
                      err = net_xmit_errno(err);
   53         if (err) {
    3                 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
    3                 rcu_read_unlock();
                      goto error_check;
              }
   50         rcu_read_unlock();
      out:
              return 0;
      
      error:
              IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
      error_check:
    3         if (err == -ENOBUFS && !np->recverr)
                      err = 0;
              return err;
      }
      
      struct raw6_frag_vec {
              struct msghdr *msg;
              int hlen;
              char c[4];
      };
      
      static int rawv6_probe_proto_opt(struct raw6_frag_vec *rfv, struct flowi6 *fl6)
      {
              int err = 0;
              switch (fl6->flowi6_proto) {
              case IPPROTO_ICMPV6:
   18                 rfv->hlen = 2;
                      err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
                      if (!err) {
   17                         fl6->fl6_icmp_type = rfv->c[0];
                              fl6->fl6_icmp_code = rfv->c[1];
                      }
                      break;
              case IPPROTO_MH:
    4                 rfv->hlen = 4;
                      err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
                      if (!err)
    4                         fl6->fl6_mh_type = rfv->c[2];
              }
              return err;
      }
      
      static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
                             struct sk_buff *skb)
      {
              struct raw6_frag_vec *rfv = from;
      
   36         if (offset < rfv->hlen) {
   20                 int copy = min(rfv->hlen - offset, len);
      
                      if (skb->ip_summed == CHECKSUM_PARTIAL)
                              memcpy(to, rfv->c + offset, copy);
                      else
   20                         skb->csum = csum_block_add(
                                      skb->csum,
                                      csum_partial_copy_nocheck(rfv->c + offset,
                                                                to, copy, 0),
                                      odd);
      
                      odd = 0;
   17                 offset += copy;
                      to += copy;
   20                 len -= copy;
      
                      if (!len)
                              return 0;
              }
      
   33         offset -= rfv->hlen;
      
   36         return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
      }
      
      static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
      {
              struct ipv6_txoptions *opt_to_free = NULL;
              struct ipv6_txoptions opt_space;
  147         DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
              struct in6_addr *daddr, *final_p, final;
              struct inet_sock *inet = inet_sk(sk);
  147         struct ipv6_pinfo *np = inet6_sk(sk);
              struct raw6_sock *rp = raw6_sk(sk);
              struct ipv6_txoptions *opt = NULL;
              struct ip6_flowlabel *flowlabel = NULL;
              struct dst_entry *dst = NULL;
              struct raw6_frag_vec rfv;
              struct flowi6 fl6;
              struct ipcm6_cookie ipc6;
  147         int addr_len = msg->msg_namelen;
              int hdrincl;
              u16 proto;
              int err;
      
              /* Rough check on arithmetic overflow,
                 better check is made in ip6_append_data().
               */
              if (len > INT_MAX)
                      return -EMSGSIZE;
      
              /* Mirror BSD error message compatibility */
  147         if (msg->msg_flags & MSG_OOB)
                      return -EOPNOTSUPP;
      
              /* hdrincl should be READ_ONCE(inet->hdrincl)
               * but READ_ONCE() doesn't work with bit fields.
               * Doing this indirectly yields the same result.
               */
  146         hdrincl = inet->hdrincl;
              hdrincl = READ_ONCE(hdrincl);
      
              /*
               *        Get and verify the address.
               */
              memset(&fl6, 0, sizeof(fl6));
      
              fl6.flowi6_mark = sk->sk_mark;
              fl6.flowi6_uid = sk->sk_uid;
      
              ipcm6_init(&ipc6);
              ipc6.sockc.tsflags = sk->sk_tsflags;
              ipc6.sockc.mark = sk->sk_mark;
      
              if (sin6) {
  145                 if (addr_len < SIN6_LEN_RFC2133)
                              return -EINVAL;
      
  144                 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
                              return -EAFNOSUPPORT;
      
                      /* port is the proto value [0..255] carried in nexthdr */
  142                 proto = ntohs(sin6->sin6_port);
      
                      if (!proto)
                              proto = inet->inet_num;
    3                 else if (proto != inet->inet_num)
                              return -EINVAL;
      
  141                 if (proto > 255)
                              return -EINVAL;
      
  141                 daddr = &sin6->sin6_addr;
                      if (np->sndflow) {
                              fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
                              if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
                                      flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                                      if (IS_ERR(flowlabel))
                                              return -EINVAL;
                              }
                      }
      
                      /*
                       * Otherwise it will be difficult to maintain
                       * sk->sk_dst_cache.
                       */
  141                 if (sk->sk_state == TCP_ESTABLISHED &&
                          ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
                              daddr = &sk->sk_v6_daddr;
      
  141                 if (addr_len >= sizeof(struct sockaddr_in6) &&
  140                     sin6->sin6_scope_id &&
   36                     __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
   33                         fl6.flowi6_oif = sin6->sin6_scope_id;
              } else {
    1                 if (sk->sk_state != TCP_ESTABLISHED)
                              return -EDESTADDRREQ;
      
                      proto = inet->inet_num;
                      daddr = &sk->sk_v6_daddr;
                      fl6.flowlabel = np->flow_label;
              }
      
  141         if (fl6.flowi6_oif == 0)
  108                 fl6.flowi6_oif = sk->sk_bound_dev_if;
      
  141         if (msg->msg_controllen) {
                      opt = &opt_space;
   13                 memset(opt, 0, sizeof(struct ipv6_txoptions));
                      opt->tot_len = sizeof(struct ipv6_txoptions);
                      ipc6.opt = opt;
      
                      err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
                      if (err < 0) {
    3                         fl6_sock_release(flowlabel);
                              return err;
                      }
   10                 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
    1                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                              if (IS_ERR(flowlabel))
                                      return -EINVAL;
                      }
    9                 if (!(opt->opt_nflen|opt->opt_flen))
                              opt = NULL;
              }
              if (!opt) {
  134                 opt = txopt_get(np);
                      opt_to_free = opt;
              }
  138         if (flowlabel)
                      opt = fl6_merge_options(&opt_space, flowlabel, opt);
  138         opt = ipv6_fixup_options(&opt_space, opt);
      
              fl6.flowi6_proto = proto;
              fl6.flowi6_mark = ipc6.sockc.mark;
      
              if (!hdrincl) {
   79                 rfv.msg = msg;
   57                 rfv.hlen = 0;
   22                 err = rawv6_probe_proto_opt(&rfv, &fl6);
                      if (err)
                              goto out;
              }
      
  137         if (!ipv6_addr_any(daddr))
  109                 fl6.daddr = *daddr;
              else
   32                 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
  137         if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
                      fl6.saddr = np->saddr;
      
  137         final_p = fl6_update_dst(&fl6, opt, &final);
      
  104         if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
   12                 fl6.flowi6_oif = np->mcast_oif;
              else if (!fl6.flowi6_oif)
   92                 fl6.flowi6_oif = np->ucast_oif;
  137         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
      
              if (hdrincl)
   60                 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
      
  137         if (ipc6.tclass < 0)
  137                 ipc6.tclass = np->tclass;
      
  137         fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
      
              dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
              if (IS_ERR(dst)) {
    1                 err = PTR_ERR(dst);
                      goto out;
              }
  132         if (ipc6.hlimit < 0)
  131                 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
      
  132         if (ipc6.dontfrag < 0)
  131                 ipc6.dontfrag = np->dontfrag;
      
  132         if (msg->msg_flags&MSG_CONFIRM)
                      goto do_confirm;
      
      back_from_confirm:
  130         if (hdrincl)
   60                 err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
                                              msg->msg_flags, &ipc6.sockc);
              else {
   71                 ipc6.opt = opt;
                      lock_sock(sk);
                      err = ip6_append_data(sk, raw6_getfrag, &rfv,
                              len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
                              msg->msg_flags);
      
                      if (err)
    1                         ip6_flush_pending_frames(sk);
   65                 else if (!(msg->msg_flags & MSG_MORE))
   65                         err = rawv6_push_pending_frames(sk, &fl6, rp);
   50                 release_sock(sk);
              }
      done:
  107         dst_release(dst);
      out:
  109         fl6_sock_release(flowlabel);
  109         txopt_put(opt_to_free);
  118         return err < 0 ? err : len;
      do_confirm:
    9         if (msg->msg_flags & MSG_PROBE)
    3                 dst_confirm_neigh(dst, &fl6.daddr);
    3         if (!(msg->msg_flags & MSG_PROBE) || len)
                      goto back_from_confirm;
              err = 0;
              goto done;
      }
      
      static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
                                     char __user *optval, int optlen)
      {
              switch (optname) {
              case ICMPV6_FILTER:
                      if (optlen > sizeof(struct icmp6_filter))
                              optlen = sizeof(struct icmp6_filter);
                      if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
                              return -EFAULT;
                      return 0;
              default:
                      return -ENOPROTOOPT;
              }
      
              return 0;
      }
      
      static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
                                     char __user *optval, int __user *optlen)
      {
              int len;
      
              switch (optname) {
              case ICMPV6_FILTER:
                      if (get_user(len, optlen))
                              return -EFAULT;
                      if (len < 0)
                              return -EINVAL;
                      if (len > sizeof(struct icmp6_filter))
                              len = sizeof(struct icmp6_filter);
                      if (put_user(len, optlen))
                              return -EFAULT;
                      if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
                              return -EFAULT;
                      return 0;
              default:
                      return -ENOPROTOOPT;
              }
      
              return 0;
      }
      
      
      static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
                                  char __user *optval, unsigned int optlen)
      {
              struct raw6_sock *rp = raw6_sk(sk);
              int val;
      
   13         if (get_user(val, (int __user *)optval))
                      return -EFAULT;
      
   13         switch (optname) {
              case IPV6_HDRINCL:
    8                 if (sk->sk_type != SOCK_RAW)
                              return -EINVAL;
    8                 inet_sk(sk)->hdrincl = !!val;
                      return 0;
              case IPV6_CHECKSUM:
    3                 if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
                          level == IPPROTO_IPV6) {
                              /*
                               * RFC3542 tells that IPV6_CHECKSUM socket
                               * option in the IPPROTO_IPV6 level is not
                               * allowed on ICMPv6 sockets.
                               * If you want to set it, use IPPROTO_RAW
                               * level IPV6_CHECKSUM socket option
                               * (Linux extension).
                               */
                              return -EINVAL;
                      }
      
                      /* You may get strange result with a positive odd offset;
                         RFC2292bis agrees with me. */
    3                 if (val > 0 && (val&1))
                              return -EINVAL;
    1                 if (val < 0) {
    1                         rp->checksum = 0;
                      } else {
    1                         rp->checksum = 1;
                              rp->offset = val;
                      }
      
                      return 0;
      
              default:
                      return -ENOPROTOOPT;
              }
      }
      
      static int rawv6_setsockopt(struct sock *sk, int level, int optname,
                                char __user *optval, unsigned int optlen)
      {
   71         switch (level) {
              case SOL_RAW:
                      break;
      
              case SOL_ICMPV6:
                      if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
                              return -EOPNOTSUPP;
                      return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
              case SOL_IPV6:
   69                 if (optname == IPV6_CHECKSUM ||
                          optname == IPV6_HDRINCL)
                              break;
                      /* fall through */
              default:
   71                 return ipv6_setsockopt(sk, level, optname, optval, optlen);
              }
      
   13         return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
      }
      
      #ifdef CONFIG_COMPAT
    1 static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
                                         char __user *optval, unsigned int optlen)
      {
              switch (level) {
              case SOL_RAW:
                      break;
              case SOL_ICMPV6:
    1                 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
                              return -EOPNOTSUPP;
                      return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
              case SOL_IPV6:
                      if (optname == IPV6_CHECKSUM ||
                          optname == IPV6_HDRINCL)
                              break;
                      /* fall through */
              default:
                      return compat_ipv6_setsockopt(sk, level, optname,
                                                    optval, optlen);
              }
              return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
      }
      #endif
      
      static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
                                  char __user *optval, int __user *optlen)
      {
              struct raw6_sock *rp = raw6_sk(sk);
              int val, len;
      
              if (get_user(len, optlen))
                      return -EFAULT;
      
              switch (optname) {
              case IPV6_HDRINCL:
                      val = inet_sk(sk)->hdrincl;
                      break;
              case IPV6_CHECKSUM:
                      /*
                       * We allow getsockopt() for IPPROTO_IPV6-level
                       * IPV6_CHECKSUM socket option on ICMPv6 sockets
                       * since RFC3542 is silent about it.
                       */
                      if (rp->checksum == 0)
                              val = -1;
                      else
                              val = rp->offset;
                      break;
      
              default:
                      return -ENOPROTOOPT;
              }
      
              len = min_t(unsigned int, sizeof(int), len);
      
              if (put_user(len, optlen))
                      return -EFAULT;
              if (copy_to_user(optval, &val, len))
                      return -EFAULT;
              return 0;
      }
      
      static int rawv6_getsockopt(struct sock *sk, int level, int optname,
                                char __user *optval, int __user *optlen)
      {
              switch (level) {
              case SOL_RAW:
                      break;
      
              case SOL_ICMPV6:
                      if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
                              return -EOPNOTSUPP;
                      return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
              case SOL_IPV6:
                      if (optname == IPV6_CHECKSUM ||
                          optname == IPV6_HDRINCL)
                              break;
                      /* fall through */
              default:
                      return ipv6_getsockopt(sk, level, optname, optval, optlen);
              }
      
              return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
      }
      
      #ifdef CONFIG_COMPAT
      static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
                                         char __user *optval, int __user *optlen)
      {
              switch (level) {
              case SOL_RAW:
                      break;
              case SOL_ICMPV6:
                      if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
                              return -EOPNOTSUPP;
                      return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
              case SOL_IPV6:
                      if (optname == IPV6_CHECKSUM ||
                          optname == IPV6_HDRINCL)
                              break;
                      /* fall through */
              default:
                      return compat_ipv6_getsockopt(sk, level, optname,
                                                    optval, optlen);
              }
              return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
      }
      #endif
      
    4 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
   11 {
              switch (cmd) {
              case SIOCOUTQ: {
    1                 int amount = sk_wmem_alloc_get(sk);
      
                      return put_user(amount, (int __user *)arg);
              }
              case SIOCINQ: {
                      struct sk_buff *skb;
                      int amount = 0;
      
    4                 spin_lock_bh(&sk->sk_receive_queue.lock);
                      skb = skb_peek(&sk->sk_receive_queue);
                      if (skb)
                              amount = skb->len;
    4                 spin_unlock_bh(&sk->sk_receive_queue.lock);
                      return put_user(amount, (int __user *)arg);
              }
      
              default:
      #ifdef CONFIG_IPV6_MROUTE
   11                 return ip6mr_ioctl(sk, cmd, (void __user *)arg);
      #else
                      return -ENOIOCTLCMD;
      #endif
              }
      }
      
      #ifdef CONFIG_COMPAT
      static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
      {
              switch (cmd) {
              case SIOCOUTQ:
              case SIOCINQ:
                      return -ENOIOCTLCMD;
              default:
      #ifdef CONFIG_IPV6_MROUTE
                      return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
      #else
                      return -ENOIOCTLCMD;
      #endif
              }
      }
      #endif
      
      static void rawv6_close(struct sock *sk, long timeout)
      {
   18         if (inet_sk(sk)->inet_num == IPPROTO_RAW)
    1                 ip6_ra_control(sk, -1);
   18         ip6mr_sk_done(sk);
              sk_common_release(sk);
      }
      
      static void raw6_destroy(struct sock *sk)
      {
   18         lock_sock(sk);
              ip6_flush_pending_frames(sk);
              release_sock(sk);
      
              inet6_destroy_sock(sk);
      }
      
      static int rawv6_init_sk(struct sock *sk)
      {
              struct raw6_sock *rp = raw6_sk(sk);
      
   34         switch (inet_sk(sk)->inet_num) {
              case IPPROTO_ICMPV6:
    3                 rp->checksum = 1;
                      rp->offset   = 2;
                      break;
              case IPPROTO_MH:
    1                 rp->checksum = 1;
                      rp->offset   = 4;
                      break;
              default:
                      break;
              }
   34         return 0;
      }
      
      struct proto rawv6_prot = {
              .name                   = "RAWv6",
              .owner                   = THIS_MODULE,
              .close                   = rawv6_close,
              .destroy           = raw6_destroy,
              .connect           = ip6_datagram_connect_v6_only,
              .disconnect           = __udp_disconnect,
              .ioctl                   = rawv6_ioctl,
              .init                   = rawv6_init_sk,
              .setsockopt           = rawv6_setsockopt,
              .getsockopt           = rawv6_getsockopt,
              .sendmsg           = rawv6_sendmsg,
              .recvmsg           = rawv6_recvmsg,
              .bind                   = rawv6_bind,
              .backlog_rcv           = rawv6_rcv_skb,
              .hash                   = raw_hash_sk,
              .unhash                   = raw_unhash_sk,
              .obj_size           = sizeof(struct raw6_sock),
              .useroffset           = offsetof(struct raw6_sock, filter),
              .usersize           = sizeof_field(struct raw6_sock, filter),
              .h.raw_hash           = &raw_v6_hashinfo,
      #ifdef CONFIG_COMPAT
              .compat_setsockopt = compat_rawv6_setsockopt,
              .compat_getsockopt = compat_rawv6_getsockopt,
              .compat_ioctl           = compat_rawv6_ioctl,
      #endif
              .diag_destroy           = raw_abort,
      };
      
      #ifdef CONFIG_PROC_FS
      static int raw6_seq_show(struct seq_file *seq, void *v)
      {
              if (v == SEQ_START_TOKEN) {
                      seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
              } else {
                      struct sock *sp = v;
                      __u16 srcp  = inet_sk(sp)->inet_num;
                      ip6_dgram_sock_seq_show(seq, v, srcp, 0,
                                              raw_seq_private(seq)->bucket);
              }
              return 0;
      }
      
      static const struct seq_operations raw6_seq_ops = {
              .start =        raw_seq_start,
              .next =                raw_seq_next,
              .stop =                raw_seq_stop,
              .show =                raw6_seq_show,
      };
      
      static int __net_init raw6_init_net(struct net *net)
      {
              if (!proc_create_net_data("raw6", 0444, net->proc_net, &raw6_seq_ops,
                              sizeof(struct raw_iter_state), &raw_v6_hashinfo))
                      return -ENOMEM;
      
              return 0;
      }
      
      static void __net_exit raw6_exit_net(struct net *net)
      {
              remove_proc_entry("raw6", net->proc_net);
      }
      
      static struct pernet_operations raw6_net_ops = {
              .init = raw6_init_net,
              .exit = raw6_exit_net,
      };
      
      int __init raw6_proc_init(void)
      {
              return register_pernet_subsys(&raw6_net_ops);
      }
      
      void raw6_proc_exit(void)
      {
              unregister_pernet_subsys(&raw6_net_ops);
      }
      #endif        /* CONFIG_PROC_FS */
      
      /* Same as inet6_dgram_ops, sans udp_poll.  */
      const struct proto_ops inet6_sockraw_ops = {
              .family                   = PF_INET6,
              .owner                   = THIS_MODULE,
              .release           = inet6_release,
              .bind                   = inet6_bind,
              .connect           = inet_dgram_connect,        /* ok                */
              .socketpair           = sock_no_socketpair,        /* a do nothing        */
              .accept                   = sock_no_accept,                /* a do nothing        */
              .getname           = inet6_getname,
              .poll                   = datagram_poll,                /* ok                */
              .ioctl                   = inet6_ioctl,                /* must change  */
              .gettstamp           = sock_gettstamp,
              .listen                   = sock_no_listen,                /* ok                */
              .shutdown           = inet_shutdown,                /* ok                */
              .setsockopt           = sock_common_setsockopt,        /* ok                */
              .getsockopt           = sock_common_getsockopt,        /* ok                */
              .sendmsg           = inet_sendmsg,                /* ok                */
              .recvmsg           = sock_common_recvmsg,        /* ok                */
              .mmap                   = sock_no_mmap,
              .sendpage           = sock_no_sendpage,
      #ifdef CONFIG_COMPAT
              .compat_setsockopt = compat_sock_common_setsockopt,
              .compat_getsockopt = compat_sock_common_getsockopt,
      #endif
      };
      
      static struct inet_protosw rawv6_protosw = {
              .type                = SOCK_RAW,
              .protocol        = IPPROTO_IP,        /* wild card */
              .prot                = &rawv6_prot,
              .ops                = &inet6_sockraw_ops,
              .flags                = INET_PROTOSW_REUSE,
      };
      
      int __init rawv6_init(void)
      {
              return inet6_register_protosw(&rawv6_protosw);
      }
      
      void rawv6_exit(void)
      {
              inet6_unregister_protosw(&rawv6_protosw);
      }
      /*
       * Resizable virtual memory filesystem for Linux.
       *
       * Copyright (C) 2000 Linus Torvalds.
       *                 2000 Transmeta Corp.
       *                 2000-2001 Christoph Rohland
       *                 2000-2001 SAP AG
       *                 2002 Red Hat Inc.
       * Copyright (C) 2002-2011 Hugh Dickins.
       * Copyright (C) 2011 Google Inc.
       * Copyright (C) 2002-2005 VERITAS Software Corporation.
       * Copyright (C) 2004 Andi Kleen, SuSE Labs
       *
       * Extended attribute support for tmpfs:
       * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
       * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
       *
       * tiny-shmem:
       * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
       *
       * This file is released under the GPL.
       */
      
      #include <linux/fs.h>
      #include <linux/init.h>
      #include <linux/vfs.h>
      #include <linux/mount.h>
      #include <linux/ramfs.h>
      #include <linux/pagemap.h>
      #include <linux/file.h>
      #include <linux/mm.h>
      #include <linux/random.h>
      #include <linux/sched/signal.h>
      #include <linux/export.h>
      #include <linux/swap.h>
      #include <linux/uio.h>
      #include <linux/khugepaged.h>
      #include <linux/hugetlb.h>
      #include <linux/frontswap.h>
      #include <linux/fs_parser.h>
      
      #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
      
      static struct vfsmount *shm_mnt;
      
      #ifdef CONFIG_SHMEM
      /*
       * This virtual memory filesystem is heavily based on the ramfs. It
       * extends ramfs by the ability to use swap and honor resource limits
       * which makes it a completely usable filesystem.
       */
      
      #include <linux/xattr.h>
      #include <linux/exportfs.h>
      #include <linux/posix_acl.h>
      #include <linux/posix_acl_xattr.h>
      #include <linux/mman.h>
      #include <linux/string.h>
      #include <linux/slab.h>
      #include <linux/backing-dev.h>
      #include <linux/shmem_fs.h>
      #include <linux/writeback.h>
      #include <linux/blkdev.h>
      #include <linux/pagevec.h>
      #include <linux/percpu_counter.h>
      #include <linux/falloc.h>
      #include <linux/splice.h>
      #include <linux/security.h>
      #include <linux/swapops.h>
      #include <linux/mempolicy.h>
      #include <linux/namei.h>
      #include <linux/ctype.h>
      #include <linux/migrate.h>
      #include <linux/highmem.h>
      #include <linux/seq_file.h>
      #include <linux/magic.h>
      #include <linux/syscalls.h>
      #include <linux/fcntl.h>
      #include <uapi/linux/memfd.h>
      #include <linux/userfaultfd_k.h>
      #include <linux/rmap.h>
      #include <linux/uuid.h>
      
      #include <linux/uaccess.h>
      #include <asm/pgtable.h>
      
      #include "internal.h"
      
      #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
      #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
      
      /* Pretend that each entry is of this size in directory's i_size */
      #define BOGO_DIRENT_SIZE 20
      
      /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
      #define SHORT_SYMLINK_LEN 128
      
      /*
       * shmem_fallocate communicates with shmem_fault or shmem_writepage via
       * inode->i_private (with i_mutex making sure that it has only one user at
       * a time): we would prefer not to enlarge the shmem inode just for that.
       */
      struct shmem_falloc {
              wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
              pgoff_t start;                /* start of range currently being fallocated */
              pgoff_t next;                /* the next page offset to be fallocated */
              pgoff_t nr_falloced;        /* how many new pages have been fallocated */
              pgoff_t nr_unswapped;        /* how often writepage refused to swap out */
      };
      
      struct shmem_options {
              unsigned long long blocks;
              unsigned long long inodes;
              struct mempolicy *mpol;
              kuid_t uid;
              kgid_t gid;
              umode_t mode;
              int huge;
              int seen;
      #define SHMEM_SEEN_BLOCKS 1
      #define SHMEM_SEEN_INODES 2
      #define SHMEM_SEEN_HUGE 4
      };
      
      #ifdef CONFIG_TMPFS
      static unsigned long shmem_default_max_blocks(void)
      {
              return totalram_pages() / 2;
      }
      
      static unsigned long shmem_default_max_inodes(void)
      {
              unsigned long nr_pages = totalram_pages();
      
              return min(nr_pages - totalhigh_pages(), nr_pages / 2);
      }
      #endif
      
      static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
      static int shmem_replace_page(struct page **pagep, gfp_t gfp,
                                      struct shmem_inode_info *info, pgoff_t index);
      static int shmem_swapin_page(struct inode *inode, pgoff_t index,
                                   struct page **pagep, enum sgp_type sgp,
                                   gfp_t gfp, struct vm_area_struct *vma,
                                   vm_fault_t *fault_type);
      static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
                      struct page **pagep, enum sgp_type sgp,
                      gfp_t gfp, struct vm_area_struct *vma,
                      struct vm_fault *vmf, vm_fault_t *fault_type);
      
      int shmem_getpage(struct inode *inode, pgoff_t index,
                      struct page **pagep, enum sgp_type sgp)
      {
   26         return shmem_getpage_gfp(inode, index, pagep, sgp,
                      mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
      }
      
      static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
      {
              return sb->s_fs_info;
      }
      
      /*
       * shmem_file_setup pre-accounts the whole fixed size of a VM object,
       * for shared memory and for shared anonymous (/dev/zero) mappings
       * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
       * consistent with the pre-accounting of private mappings ...
       */
      static inline int shmem_acct_size(unsigned long flags, loff_t size)
      {
              return (flags & VM_NORESERVE) ?
                      0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
      }
      
      static inline void shmem_unacct_size(unsigned long flags, loff_t size)
      {
              if (!(flags & VM_NORESERVE))
                      vm_unacct_memory(VM_ACCT(size));
      }
      
      static inline int shmem_reacct_size(unsigned long flags,
                      loff_t oldsize, loff_t newsize)
      {
              if (!(flags & VM_NORESERVE)) {
                      if (VM_ACCT(newsize) > VM_ACCT(oldsize))
                              return security_vm_enough_memory_mm(current->mm,
                                              VM_ACCT(newsize) - VM_ACCT(oldsize));
                      else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
                              vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
              }
              return 0;
      }
      
      /*
       * ... whereas tmpfs objects are accounted incrementally as
       * pages are allocated, in order to allow large sparse files.
       * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
       * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
       */
      static inline int shmem_acct_block(unsigned long flags, long pages)
      {
              if (!(flags & VM_NORESERVE))
                      return 0;
      
   20         return security_vm_enough_memory_mm(current->mm,
                              pages * VM_ACCT(PAGE_SIZE));
      }
      
   19 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
      {
              if (flags & VM_NORESERVE)
   19                 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
      }
      
      static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
      {
              struct shmem_inode_info *info = SHMEM_I(inode);
   20         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
      
   20         if (shmem_acct_block(info->flags, pages))
                      return false;
      
   20         if (sbinfo->max_blocks) {
                      if (percpu_counter_compare(&sbinfo->used_blocks,
   20                                            sbinfo->max_blocks - pages) > 0)
                              goto unacct;
   20                 percpu_counter_add(&sbinfo->used_blocks, pages);
              }
      
              return true;
      
      unacct:
              shmem_unacct_blocks(info->flags, pages);
              return false;
      }
      
      static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
      {
              struct shmem_inode_info *info = SHMEM_I(inode);
              struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
      
              if (sbinfo->max_blocks)
   19                 percpu_counter_sub(&sbinfo->used_blocks, pages);
   19         shmem_unacct_blocks(info->flags, pages);
      }
      
      static const struct super_operations shmem_ops;
      static const struct address_space_operations shmem_aops;
      static const struct file_operations shmem_file_operations;
      static const struct inode_operations shmem_inode_operations;
      static const struct inode_operations shmem_dir_inode_operations;
      static const struct inode_operations shmem_special_inode_operations;
      static const struct vm_operations_struct shmem_vm_ops;
      static struct file_system_type shmem_fs_type;
      
      bool vma_is_shmem(struct vm_area_struct *vma)
      {
              return vma->vm_ops == &shmem_vm_ops;
      }
      
      static LIST_HEAD(shmem_swaplist);
      static DEFINE_MUTEX(shmem_swaplist_mutex);
      
      static int shmem_reserve_inode(struct super_block *sb)
      {
    3         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
              if (sbinfo->max_inodes) {
    3                 spin_lock(&sbinfo->stat_lock);
                      if (!sbinfo->free_inodes) {
                              spin_unlock(&sbinfo->stat_lock);
                              return -ENOSPC;
                      }
    3                 sbinfo->free_inodes--;
    3                 spin_unlock(&sbinfo->stat_lock);
              }
              return 0;
      }
      
      static void shmem_free_inode(struct super_block *sb)
      {
    2         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
              if (sbinfo->max_inodes) {
    2                 spin_lock(&sbinfo->stat_lock);
                      sbinfo->free_inodes++;
                      spin_unlock(&sbinfo->stat_lock);
              }
    2 }
      
      /**
       * shmem_recalc_inode - recalculate the block usage of an inode
       * @inode: inode to recalc
       *
       * We have to calculate the free blocks since the mm can drop
       * undirtied hole pages behind our back.
       *
       * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
       * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
       *
       * It has to be called with the spinlock held.
       */
      static void shmem_recalc_inode(struct inode *inode)
      {
              struct shmem_inode_info *info = SHMEM_I(inode);
              long freed;
      
   29         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
              if (freed > 0) {
   19                 info->alloced -= freed;
                      inode->i_blocks -= freed * BLOCKS_PER_PAGE;
   19                 shmem_inode_unacct_blocks(inode, freed);
              }
   29 }
      
      bool shmem_charge(struct inode *inode, long pages)
      {
              struct shmem_inode_info *info = SHMEM_I(inode);
              unsigned long flags;
      
              if (!shmem_inode_acct_block(inode, pages))
                      return false;
      
              /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
              inode->i_mapping->nrpages += pages;
      
              spin_lock_irqsave(&info->lock, flags);
              info->alloced += pages;
              inode->i_blocks += pages * BLOCKS_PER_PAGE;
              shmem_recalc_inode(inode);
              spin_unlock_irqrestore(&info->lock, flags);
      
              return true;
      }
      
      void shmem_uncharge(struct inode *inode, long pages)
      {
              struct shmem_inode_info *info = SHMEM_I(inode);
              unsigned long flags;
      
              /* nrpages adjustment done by __delete_from_page_cache() or caller */
      
              spin_lock_irqsave(&info->lock, flags);
              info->alloced -= pages;
              inode->i_blocks -= pages * BLOCKS_PER_PAGE;
              shmem_recalc_inode(inode);
              spin_unlock_irqrestore(&info->lock, flags);
      
              shmem_inode_unacct_blocks(inode, pages);
      }
      
      /*
       * Replace item expected in xarray by a new item, while holding xa_lock.
       */
      static int shmem_replace_entry(struct address_space *mapping,
                              pgoff_t index, void *expected, void *replacement)
      {
              XA_STATE(xas, &mapping->i_pages, index);
              void *item;
      
              VM_BUG_ON(!expected);
              VM_BUG_ON(!replacement);
              item = xas_load(&xas);
              if (item != expected)
                      return -ENOENT;
              xas_store(&xas, replacement);
              return 0;
      }
      
      /*
       * Sometimes, before we decide whether to proceed or to fail, we must check
       * that an entry was not already brought back from swap by a racing thread.
       *
       * Checking page is not enough: by the time a SwapCache page is locked, it
       * might be reused, and again be SwapCache, using the same swap as before.
       */
      static bool shmem_confirm_swap(struct address_space *mapping,
                                     pgoff_t index, swp_entry_t swap)
      {
              return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
      }
      
      /*
       * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
       *
       * SHMEM_HUGE_NEVER:
       *        disables huge pages for the mount;
       * SHMEM_HUGE_ALWAYS:
       *        enables huge pages for the mount;
       * SHMEM_HUGE_WITHIN_SIZE:
       *        only allocate huge pages if the page will be fully within i_size,
       *        also respect fadvise()/madvise() hints;
       * SHMEM_HUGE_ADVISE:
       *        only allocate huge pages if requested with fadvise()/madvise();
       */
      
      #define SHMEM_HUGE_NEVER        0
      #define SHMEM_HUGE_ALWAYS        1
      #define SHMEM_HUGE_WITHIN_SIZE        2
      #define SHMEM_HUGE_ADVISE        3
      
      /*
       * Special values.
       * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
       *
       * SHMEM_HUGE_DENY:
       *        disables huge on shm_mnt and all mounts, for emergency use;
       * SHMEM_HUGE_FORCE:
       *        enables huge on shm_mnt and all mounts, w/o needing option, for testing;
       *
       */
      #define SHMEM_HUGE_DENY                (-1)
      #define SHMEM_HUGE_FORCE        (-2)
      
      #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
      /* ifdef here to avoid bloating shmem.o when not necessary */
      
      static int shmem_huge __read_mostly;
      
      #if defined(CONFIG_SYSFS)
      static int shmem_parse_huge(const char *str)
      {
              if (!strcmp(str, "never"))
                      return SHMEM_HUGE_NEVER;
              if (!strcmp(str, "always"))
                      return SHMEM_HUGE_ALWAYS;
              if (!strcmp(str, "within_size"))
                      return SHMEM_HUGE_WITHIN_SIZE;
              if (!strcmp(str, "advise"))
                      return SHMEM_HUGE_ADVISE;
              if (!strcmp(str, "deny"))
                      return SHMEM_HUGE_DENY;
              if (!strcmp(str, "force"))
                      return SHMEM_HUGE_FORCE;
              return -EINVAL;
      }
      #endif
      
      #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
      static const char *shmem_format_huge(int huge)
      {
              switch (huge) {
              case SHMEM_HUGE_NEVER:
                      return "never";
              case SHMEM_HUGE_ALWAYS:
                      return "always";
              case SHMEM_HUGE_WITHIN_SIZE:
                      return "within_size";
              case SHMEM_HUGE_ADVISE:
                      return "advise";
              case SHMEM_HUGE_DENY:
                      return "deny";
              case SHMEM_HUGE_FORCE:
                      return "force";
              default:
                      VM_BUG_ON(1);
                      return "bad_val";
              }
      }
      #endif
      
      static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                      struct shrink_control *sc, unsigned long nr_to_split)
      {
              LIST_HEAD(list), *pos, *next;
              LIST_HEAD(to_remove);
              struct inode *inode;
              struct shmem_inode_info *info;
              struct page *page;
              unsigned long batch = sc ? sc->nr_to_scan : 128;
              int removed = 0, split = 0;
      
              if (list_empty(&sbinfo->shrinklist))
                      return SHRINK_STOP;
      
              spin_lock(&sbinfo->shrinklist_lock);
              list_for_each_safe(pos, next, &sbinfo->shrinklist) {
                      info = list_entry(pos, struct shmem_inode_info, shrinklist);
      
                      /* pin the inode */
                      inode = igrab(&info->vfs_inode);
      
                      /* inode is about to be evicted */
                      if (!inode) {
                              list_del_init(&info->shrinklist);
                              removed++;
                              goto next;
                      }
      
                      /* Check if there's anything to gain */
                      if (round_up(inode->i_size, PAGE_SIZE) ==
                                      round_up(inode->i_size, HPAGE_PMD_SIZE)) {
                              list_move(&info->shrinklist, &to_remove);
                              removed++;
                              goto next;
                      }
      
                      list_move(&info->shrinklist, &list);
      next:
                      if (!--batch)
                              break;
              }
              spin_unlock(&sbinfo->shrinklist_lock);
      
              list_for_each_safe(pos, next, &to_remove) {
                      info = list_entry(pos, struct shmem_inode_info, shrinklist);
                      inode = &info->vfs_inode;
                      list_del_init(&info->shrinklist);
                      iput(inode);
              }
      
              list_for_each_safe(pos, next, &list) {
                      int ret;
      
                      info = list_entry(pos, struct shmem_inode_info, shrinklist);
                      inode = &info->vfs_inode;
      
                      if (nr_to_split && split >= nr_to_split)
                              goto leave;
      
                      page = find_get_page(inode->i_mapping,
                                      (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
                      if (!page)
                              goto drop;
      
                      /* No huge page at the end of the file: nothing to split */
                      if (!PageTransHuge(page)) {
                              put_page(page);
                              goto drop;
                      }
      
                      /*
                       * Leave the inode on the list if we failed to lock
                       * the page at this time.
                       *
                       * Waiting for the lock may lead to deadlock in the
                       * reclaim path.
                       */
                      if (!trylock_page(page)) {
                              put_page(page);
                              goto leave;
                      }
      
                      ret = split_huge_page(page);
                      unlock_page(page);
                      put_page(page);
      
                      /* If split failed leave the inode on the list */
                      if (ret)
                              goto leave;
      
                      split++;
      drop:
                      list_del_init(&info->shrinklist);
                      removed++;
      leave:
                      iput(inode);
              }
      
              spin_lock(&sbinfo->shrinklist_lock);
              list_splice_tail(&list, &sbinfo->shrinklist);
              sbinfo->shrinklist_len -= removed;
              spin_unlock(&sbinfo->shrinklist_lock);
      
              return split;
      }
      
      static long shmem_unused_huge_scan(struct super_block *sb,
                      struct shrink_control *sc)
      {
              struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
      
              if (!READ_ONCE(sbinfo->shrinklist_len))
                      return SHRINK_STOP;
      
              return shmem_unused_huge_shrink(sbinfo, sc, 0);
      }
      
      static long shmem_unused_huge_count(struct super_block *sb,
                      struct shrink_control *sc)
      {
              struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
              return READ_ONCE(sbinfo->shrinklist_len);
      }
      #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
      
      #define shmem_huge SHMEM_HUGE_DENY
      
      static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                      struct shrink_control *sc, unsigned long nr_to_split)
      {
              return 0;
      }
      #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
      
      static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
      {
              if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
                  (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
                  shmem_huge != SHMEM_HUGE_DENY)
                      return true;
              return false;
      }
      
      /*
       * Like add_to_page_cache_locked, but error if expected item has gone.
       */
      static int shmem_add_to_page_cache(struct page *page,
                                         struct address_space *mapping,
                                         pgoff_t index, void *expected, gfp_t gfp)
      {
   20         XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
              unsigned long i = 0;
   20         unsigned long nr = compound_nr(page);
      
   20         VM_BUG_ON_PAGE(PageTail(page), page);
              VM_BUG_ON_PAGE(index != round_down(index, nr), page);
   20         VM_BUG_ON_PAGE(!PageLocked(page), page);
   20         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
   20         VM_BUG_ON(expected && PageTransHuge(page));
      
   20         page_ref_add(page, nr);
              page->mapping = mapping;
              page->index = index;
      
              do {
                      void *entry;
   20                 xas_lock_irq(&xas);
                      entry = xas_find_conflict(&xas);
                      if (entry != expected)
                              xas_set_err(&xas, -EEXIST);
   20                 xas_create_range(&xas);
                      if (xas_error(&xas))
                              goto unlock;
      next:
   20                 xas_store(&xas, page);
                      if (++i < nr) {
                              xas_next(&xas);
                              goto next;
                      }
   20                 if (PageTransHuge(page)) {
                              count_vm_event(THP_FILE_ALLOC);
                              __inc_node_page_state(page, NR_SHMEM_THPS);
                      }
   20                 mapping->nrpages += nr;
   20                 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
   20                 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
      unlock:
   20                 xas_unlock_irq(&xas);
              } while (xas_nomem(&xas, gfp));
      
   20         if (xas_error(&xas)) {
                      page->mapping = NULL;
                      page_ref_sub(page, nr);
                      return xas_error(&xas);
              }
      
   20         return 0;
      }
      
      /*
       * Like delete_from_page_cache, but substitutes swap for page.
       */
      static void shmem_delete_from_page_cache(struct page *page, void *radswap)
      {
              struct address_space *mapping = page->mapping;
              int error;
      
              VM_BUG_ON_PAGE(PageCompound(page), page);
      
              xa_lock_irq(&mapping->i_pages);
              error = shmem_replace_entry(mapping, page->index, page, radswap);
              page->mapping = NULL;
              mapping->nrpages--;
              __dec_node_page_state(page, NR_FILE_PAGES);
              __dec_node_page_state(page, NR_SHMEM);
              xa_unlock_irq(&mapping->i_pages);
              put_page(page);
              BUG_ON(error);
      }
      
      /*
       * Remove swap entry from page cache, free the swap and its page cache.
       */
      static int shmem_free_swap(struct address_space *mapping,
                                 pgoff_t index, void *radswap)
      {
              void *old;
      
              old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
              if (old != radswap)
                      return -ENOENT;
              free_swap_and_cache(radix_to_swp_entry(radswap));
              return 0;
      }
      
      /*
       * Determine (in bytes) how many of the shmem object's pages mapped by the
       * given offsets are swapped out.
       *
       * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
       * as long as the inode doesn't go away and racy results are not a problem.
       */
      unsigned long shmem_partial_swap_usage(struct address_space *mapping,
                                                      pgoff_t start, pgoff_t end)
      {
              XA_STATE(xas, &mapping->i_pages, start);
              struct page *page;
              unsigned long swapped = 0;
      
              rcu_read_lock();
              xas_for_each(&xas, page, end - 1) {
                      if (xas_retry(&xas, page))
                              continue;
                      if (xa_is_value(page))
                              swapped++;
      
                      if (need_resched()) {
                              xas_pause(&xas);
                              cond_resched_rcu();
                      }
              }
      
              rcu_read_unlock();
      
              return swapped << PAGE_SHIFT;
      }
      
      /*
       * Determine (in bytes) how many of the shmem object's pages mapped by the
       * given vma is swapped out.
       *
       * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
       * as long as the inode doesn't go away and racy results are not a problem.
       */
      unsigned long shmem_swap_usage(struct vm_area_struct *vma)
      {
              struct inode *inode = file_inode(vma->vm_file);
              struct shmem_inode_info *info = SHMEM_I(inode);
              struct address_space *mapping = inode->i_mapping;
              unsigned long swapped;
      
              /* Be careful as we don't hold info->lock */
              swapped = READ_ONCE(info->swapped);
      
              /*
               * The easier cases are when the shmem object has nothing in swap, or
               * the vma maps it whole. Then we can simply use the stats that we
               * already track.
               */
              if (!swapped)
                      return 0;
      
              if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
                      return swapped << PAGE_SHIFT;
      
              /* Here comes the more involved part */
              return shmem_partial_swap_usage(mapping,
                              linear_page_index(vma, vma->vm_start),
                              linear_page_index(vma, vma->vm_end));
      }
      
      /*
       * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
       */
      void shmem_unlock_mapping(struct address_space *mapping)
      {
              struct pagevec pvec;
              pgoff_t indices[PAGEVEC_SIZE];
              pgoff_t index = 0;
      
              pagevec_init(&pvec);
              /*
               * Minor point, but we might as well stop if someone else SHM_LOCKs it.
               */
              while (!mapping_unevictable(mapping)) {
                      /*
                       * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
                       * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
                       */
                      pvec.nr = find_get_entries(mapping, index,
                                                 PAGEVEC_SIZE, pvec.pages, indices);
                      if (!pvec.nr)
                              break;
                      index = indices[pvec.nr - 1] + 1;
                      pagevec_remove_exceptionals(&pvec);
                      check_move_unevictable_pages(&pvec);
                      pagevec_release(&pvec);
                      cond_resched();
              }
      }
      
      /*
       * Remove range of pages and swap entries from page cache, and free them.
       * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
       */
      static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                                                       bool unfalloc)
      {
   22         struct address_space *mapping = inode->i_mapping;
              struct shmem_inode_info *info = SHMEM_I(inode);
              pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
              pgoff_t end = (lend + 1) >> PAGE_SHIFT;
   21         unsigned int partial_start = lstart & (PAGE_SIZE - 1);
              unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
              struct pagevec pvec;
              pgoff_t indices[PAGEVEC_SIZE];
              long nr_swaps_freed = 0;
              pgoff_t index;
              int i;
      
              if (lend == -1)
                      end = -1;        /* unsigned, so actually very big */
      
   22         pagevec_init(&pvec);
              index = start;
              while (index < end) {
                      pvec.nr = find_get_entries(mapping, index,
   22                         min(end - index, (pgoff_t)PAGEVEC_SIZE),
                              pvec.pages, indices);
                      if (!pvec.nr)
                              break;
   20                 for (i = 0; i < pagevec_count(&pvec); i++) {
   20                         struct page *page = pvec.pages[i];
      
                              index = indices[i];
                              if (index >= end)
                                      break;
      
   20                         if (xa_is_value(page)) {
                                      if (unfalloc)
                                              continue;
                                      nr_swaps_freed += !shmem_free_swap(mapping,
                                                                      index, page);
                                      continue;
                              }
      
   20                         VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
      
   20                         if (!trylock_page(page))
                                      continue;
      
   20                         if (PageTransTail(page)) {
                                      /* Middle of THP: zero out the page */
                                      clear_highpage(page);
                                      unlock_page(page);
                                      continue;
   20                         } else if (PageTransHuge(page)) {
                                      if (index == round_down(end, HPAGE_PMD_NR)) {
                                              /*
                                               * Range ends in the middle of THP:
                                               * zero out the page
                                               */
                                              clear_highpage(page);
                                              unlock_page(page);
                                              continue;
                                      }
                                      index += HPAGE_PMD_NR - 1;
                                      i += HPAGE_PMD_NR - 1;
                              }
      
   20                         if (!unfalloc || !PageUptodate(page)) {
   20                                 VM_BUG_ON_PAGE(PageTail(page), page);
   20                                 if (page_mapping(page) == mapping) {
   20                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
   20                                         truncate_inode_page(mapping, page);
                                      }
                              }
   20                         unlock_page(page);
                      }
   20                 pagevec_remove_exceptionals(&pvec);
   20                 pagevec_release(&pvec);
   20                 cond_resched();
                      index++;
              }
      
              if (partial_start) {
                      struct page *page = NULL;
                      shmem_getpage(inode, start - 1, &page, SGP_READ);
                      if (page) {
                              unsigned int top = PAGE_SIZE;
                              if (start > end) {
                                      top = partial_end;
                                      partial_end = 0;
                              }
                              zero_user_segment(page, partial_start, top);
                              set_page_dirty(page);
                              unlock_page(page);
                              put_page(page);
                      }
              }
   21         if (partial_end) {
                      struct page *page = NULL;
                      shmem_getpage(inode, end, &page, SGP_READ);
                      if (page) {
                              zero_user_segment(page, 0, partial_end);
                              set_page_dirty(page);
                              unlock_page(page);
                              put_page(page);
                      }
              }
   21         if (start >= end)
                      return;
      
              index = start;
              while (index < end) {
   21                 cond_resched();
      
                      pvec.nr = find_get_entries(mapping, index,
                                      min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                      pvec.pages, indices);
                      if (!pvec.nr) {
                              /* If all gone or hole-punch or unfalloc, we're done */
   21                         if (index == start || end != -1)
                                      break;
                              /* But if truncating, restart to make sure all gone */
                              index = start;
                              continue;
                      }
                      for (i = 0; i < pagevec_count(&pvec); i++) {
                              struct page *page = pvec.pages[i];
      
                              index = indices[i];
                              if (index >= end)
                                      break;
      
                              if (xa_is_value(page)) {
                                      if (unfalloc)
                                              continue;
                                      if (shmem_free_swap(mapping, index, page)) {
                                              /* Swap was replaced by page: retry */
                                              index--;
                                              break;
                                      }
                                      nr_swaps_freed++;
                                      continue;
                              }
      
                              lock_page(page);
      
                              if (PageTransTail(page)) {
                                      /* Middle of THP: zero out the page */
                                      clear_highpage(page);
                                      unlock_page(page);
                                      /*
                                       * Partial thp truncate due 'start' in middle
                                       * of THP: don't need to look on these pages
                                       * again on !pvec.nr restart.
                                       */
                                      if (index != round_down(end, HPAGE_PMD_NR))
                                              start++;
                                      continue;
                              } else if (PageTransHuge(page)) {
                                      if (index == round_down(end, HPAGE_PMD_NR)) {
                                              /*
                                               * Range ends in the middle of THP:
                                               * zero out the page
                                               */
                                              clear_highpage(page);
                                              unlock_page(page);
                                              continue;
                                      }
                                      index += HPAGE_PMD_NR - 1;
                                      i += HPAGE_PMD_NR - 1;
                              }
      
                              if (!unfalloc || !PageUptodate(page)) {
                                      VM_BUG_ON_PAGE(PageTail(page), page);
                                      if (page_mapping(page) == mapping) {
                                              VM_BUG_ON_PAGE(PageWriteback(page), page);
                                              truncate_inode_page(mapping, page);
                                      } else {
                                              /* Page was replaced by swap: retry */
                                              unlock_page(page);
                                              index--;
                                              break;
                                      }
                              }
                              unlock_page(page);
                      }
                      pagevec_remove_exceptionals(&pvec);
                      pagevec_release(&pvec);
                      index++;
              }
      
   21         spin_lock_irq(&info->lock);
              info->swapped -= nr_swaps_freed;
              shmem_recalc_inode(inode);
   21         spin_unlock_irq(&info->lock);
      }
      
      void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
      {
   22         shmem_undo_range(inode, lstart, lend, false);
              inode->i_ctime = inode->i_mtime = current_time(inode);
      }
      EXPORT_SYMBOL_GPL(shmem_truncate_range);
      
      static int shmem_getattr(const struct path *path, struct kstat *stat,
                               u32 request_mask, unsigned int query_flags)
      {
              struct inode *inode = path->dentry->d_inode;
              struct shmem_inode_info *info = SHMEM_I(inode);
              struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
      
              if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
                      spin_lock_irq(&info->lock);
                      shmem_recalc_inode(inode);
                      spin_unlock_irq(&info->lock);
              }
              generic_fillattr(inode, stat);
      
              if (is_huge_enabled(sb_info))
                      stat->blksize = HPAGE_PMD_SIZE;
      
              return 0;
      }
      
      static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
      {
   23         struct inode *inode = d_inode(dentry);
              struct shmem_inode_info *info = SHMEM_I(inode);
              struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
              int error;
      
              error = setattr_prepare(dentry, attr);
              if (error)
                      return error;
      
   23         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
   23                 loff_t oldsize = inode->i_size;
                      loff_t newsize = attr->ia_size;
      
                      /* protected by i_mutex */
   23                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
                          (newsize > oldsize && (info->seals & F_SEAL_GROW)))
                              return -EPERM;
      
   23                 if (newsize != oldsize) {
   20                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
                                              oldsize, newsize);
                              if (error)
                                      return error;
   20                         i_size_write(inode, newsize);
                              inode->i_ctime = inode->i_mtime = current_time(inode);
                      }
                      if (newsize <= oldsize) {
   23                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
                              if (oldsize > holebegin)
   20                                 unmap_mapping_range(inode->i_mapping,
                                                              holebegin, 0, 1);
   20                         if (info->alloced)
   20                                 shmem_truncate_range(inode,
                                                              newsize, (loff_t)-1);
                              /* unmap again to remove racily COWed private pages */
                              if (oldsize > holebegin)
   19                                 unmap_mapping_range(inode->i_mapping,
                                                              holebegin, 0, 1);
      
                              /*
                               * Part of the huge page can be beyond i_size: subject
                               * to shrink under memory pressure.
                               */
                              if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
   22                                 spin_lock(&sbinfo->shrinklist_lock);
                                      /*
                                       * _careful to defend against unlocked access to
                                       * ->shrink_list in shmem_unused_huge_shrink()
                                       */
    1                                 if (list_empty_careful(&info->shrinklist)) {
    1                                         list_add_tail(&info->shrinklist,
                                                              &sbinfo->shrinklist);
    1                                         sbinfo->shrinklist_len++;
                                      }
   22                                 spin_unlock(&sbinfo->shrinklist_lock);
                              }
                      }
              }
      
   22         setattr_copy(inode, attr);
   22         if (attr->ia_valid & ATTR_MODE)
                      error = posix_acl_chmod(inode, inode->i_mode);
              return error;
      }
      
      static void shmem_evict_inode(struct inode *inode)
      {
              struct shmem_inode_info *info = SHMEM_I(inode);
    2         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
      
    2         if (inode->i_mapping->a_ops == &shmem_aops) {
                      shmem_unacct_size(info->flags, inode->i_size);
    2                 inode->i_size = 0;
                      shmem_truncate_range(inode, 0, (loff_t)-1);
                      if (!list_empty(&info->shrinklist)) {
                              spin_lock(&sbinfo->shrinklist_lock);
                              if (!list_empty(&info->shrinklist)) {
                                      list_del_init(&info->shrinklist);
                                      sbinfo->shrinklist_len--;
                              }
                              spin_unlock(&sbinfo->shrinklist_lock);
                      }
    2                 while (!list_empty(&info->swaplist)) {
                              /* Wait while shmem_unuse() is scanning this inode... */
                              wait_var_event(&info->stop_eviction,
                                             !atomic_read(&info->stop_eviction));
                              mutex_lock(&shmem_swaplist_mutex);
                              /* ...but beware of the race if we peeked too early */
                              if (!atomic_read(&info->stop_eviction))
                                      list_del_init(&info->swaplist);
                              mutex_unlock(&shmem_swaplist_mutex);
                      }
              }
      
    2         simple_xattrs_free(&info->xattrs);
    2         WARN_ON(inode->i_blocks);
    2         shmem_free_inode(inode->i_sb);
              clear_inode(inode);
      }
      
      extern struct swap_info_struct *swap_info[];
      
      static int shmem_find_swap_entries(struct address_space *mapping,
                                         pgoff_t start, unsigned int nr_entries,
                                         struct page **entries, pgoff_t *indices,
                                         unsigned int type, bool frontswap)
      {
              XA_STATE(xas, &mapping->i_pages, start);
              struct page *page;
              swp_entry_t entry;
              unsigned int ret = 0;
      
              if (!nr_entries)
                      return 0;
      
              rcu_read_lock();
              xas_for_each(&xas, page, ULONG_MAX) {
                      if (xas_retry(&xas, page))
                              continue;
      
                      if (!xa_is_value(page))
                              continue;
      
                      entry = radix_to_swp_entry(page);
                      if (swp_type(entry) != type)
                              continue;
                      if (frontswap &&
                          !frontswap_test(swap_info[type], swp_offset(entry)))
                              continue;
      
                      indices[ret] = xas.xa_index;
                      entries[ret] = page;
      
                      if (need_resched()) {
                              xas_pause(&xas);
                              cond_resched_rcu();
                      }
                      if (++ret == nr_entries)
                              break;
              }
              rcu_read_unlock();
      
              return ret;
      }
      
      /*
       * Move the swapped pages for an inode to page cache. Returns the count
       * of pages swapped in, or the error in case of failure.
       */
      static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
                                          pgoff_t *indices)
      {
              int i = 0;
              int ret = 0;
              int error = 0;
              struct address_space *mapping = inode->i_mapping;
      
              for (i = 0; i < pvec.nr; i++) {
                      struct page *page = pvec.pages[i];
      
                      if (!xa_is_value(page))
                              continue;
                      error = shmem_swapin_page(inode, indices[i],
                                                &page, SGP_CACHE,
                                                mapping_gfp_mask(mapping),
                                                NULL, NULL);
                      if (error == 0) {
                              unlock_page(page);
                              put_page(page);
                              ret++;
                      }
                      if (error == -ENOMEM)
                              break;
                      error = 0;
              }
              return error ? error : ret;
      }
      
      /*
       * If swap found in inode, free it and move page from swapcache to filecache.
       */
      static int shmem_unuse_inode(struct inode *inode, unsigned int type,
                                   bool frontswap, unsigned long *fs_pages_to_unuse)
      {
              struct address_space *mapping = inode->i_mapping;
              pgoff_t start = 0;
              struct pagevec pvec;
              pgoff_t indices[PAGEVEC_SIZE];
              bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
              int ret = 0;
      
              pagevec_init(&pvec);
              do {
                      unsigned int nr_entries = PAGEVEC_SIZE;
      
                      if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
                              nr_entries = *fs_pages_to_unuse;
      
                      pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
                                                        pvec.pages, indices,
                                                        type, frontswap);
                      if (pvec.nr == 0) {
                              ret = 0;
                              break;
                      }
      
                      ret = shmem_unuse_swap_entries(inode, pvec, indices);
                      if (ret < 0)
                              break;
      
                      if (frontswap_partial) {
                              *fs_pages_to_unuse -= ret;
                              if (*fs_pages_to_unuse == 0) {
                                      ret = FRONTSWAP_PAGES_UNUSED;
                                      break;
                              }
                      }
      
                      start = indices[pvec.nr - 1];
              } while (true);
      
              return ret;
      }
      
      /*
       * Read all the shared memory data that resides in the swap
       * device 'type' back into memory, so the swap device can be
       * unused.
       */
      int shmem_unuse(unsigned int type, bool frontswap,
                      unsigned long *fs_pages_to_unuse)
      {
              struct shmem_inode_info *info, *next;
              int error = 0;
      
              if (list_empty(&shmem_swaplist))
                      return 0;
      
              mutex_lock(&shmem_swaplist_mutex);
              list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
                      if (!info->swapped) {
                              list_del_init(&info->swaplist);
                              continue;
                      }
                      /*
                       * Drop the swaplist mutex while searching the inode for swap;
                       * but before doing so, make sure shmem_evict_inode() will not
                       * remove placeholder inode from swaplist, nor let it be freed
                       * (igrab() would protect from unlink, but not from unmount).
                       */
                      atomic_inc(&info->stop_eviction);
                      mutex_unlock(&shmem_swaplist_mutex);
      
                      error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
                                                fs_pages_to_unuse);
                      cond_resched();
      
                      mutex_lock(&shmem_swaplist_mutex);
                      next = list_next_entry(info, swaplist);
                      if (!info->swapped)
                              list_del_init(&info->swaplist);
                      if (atomic_dec_and_test(&info->stop_eviction))
                              wake_up_var(&info->stop_eviction);
                      if (error)
                              break;
              }
              mutex_unlock(&shmem_swaplist_mutex);
      
              return error;
      }
      
      /*
       * Move the page from the page cache to the swap cache.
       */
      static int shmem_writepage(struct page *page, struct writeback_control *wbc)
      {
              struct shmem_inode_info *info;
              struct address_space *mapping;
              struct inode *inode;
              swp_entry_t swap;
              pgoff_t index;
      
              VM_BUG_ON_PAGE(PageCompound(page), page);
              BUG_ON(!PageLocked(page));
              mapping = page->mapping;
              index = page->index;
              inode = mapping->host;
              info = SHMEM_I(inode);
              if (info->flags & VM_LOCKED)
                      goto redirty;
              if (!total_swap_pages)
                      goto redirty;
      
              /*
               * Our capabilities prevent regular writeback or sync from ever calling
               * shmem_writepage; but a stacking filesystem might use ->writepage of
               * its underlying filesystem, in which case tmpfs should write out to
               * swap only in response to memory pressure, and not for the writeback
               * threads or sync.
               */
              if (!wbc->for_reclaim) {
                      WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
                      goto redirty;
              }
      
              /*
               * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
               * value into swapfile.c, the only way we can correctly account for a
               * fallocated page arriving here is now to initialize it and write it.
               *
               * That's okay for a page already fallocated earlier, but if we have
               * not yet completed the fallocation, then (a) we want to keep track
               * of this page in case we have to undo it, and (b) it may not be a
               * good idea to continue anyway, once we're pushing into swap.  So
               * reactivate the page, and let shmem_fallocate() quit when too many.
               */
              if (!PageUptodate(page)) {
                      if (inode->i_private) {
                              struct shmem_falloc *shmem_falloc;
                              spin_lock(&inode->i_lock);
                              shmem_falloc = inode->i_private;
                              if (shmem_falloc &&
                                  !shmem_falloc->waitq &&
                                  index >= shmem_falloc->start &&
                                  index < shmem_falloc->next)
                                      shmem_falloc->nr_unswapped++;
                              else
                                      shmem_falloc = NULL;
                              spin_unlock(&inode->i_lock);
                              if (shmem_falloc)
                                      goto redirty;
                      }
                      clear_highpage(page);
                      flush_dcache_page(page);
                      SetPageUptodate(page);
              }
      
              swap = get_swap_page(page);
              if (!swap.val)
                      goto redirty;
      
              /*
               * Add inode to shmem_unuse()'s list of swapped-out inodes,
               * if it's not already there.  Do it now before the page is
               * moved to swap cache, when its pagelock no longer protects
               * the inode from eviction.  But don't unlock the mutex until
               * we've incremented swapped, because shmem_unuse_inode() will
               * prune a !swapped inode from the swaplist under this mutex.
               */
              mutex_lock(&shmem_swaplist_mutex);
              if (list_empty(&info->swaplist))
                      list_add(&info->swaplist, &shmem_swaplist);
      
              if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
                      spin_lock_irq(&info->lock);
                      shmem_recalc_inode(inode);
                      info->swapped++;
                      spin_unlock_irq(&info->lock);
      
                      swap_shmem_alloc(swap);
                      shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
      
                      mutex_unlock(&shmem_swaplist_mutex);
                      BUG_ON(page_mapped(page));
                      swap_writepage(page, wbc);
                      return 0;
              }
      
              mutex_unlock(&shmem_swaplist_mutex);
              put_swap_page(page, swap);
      redirty:
              set_page_dirty(page);
              if (wbc->for_reclaim)
                      return AOP_WRITEPAGE_ACTIVATE;        /* Return with page locked */
              unlock_page(page);
              return 0;
      }
      
      #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
      static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
      {
              char buffer[64];
      
              if (!mpol || mpol->mode == MPOL_DEFAULT)
                      return;                /* show nothing */
      
              mpol_to_str(buffer, sizeof(buffer), mpol);
      
              seq_printf(seq, ",mpol=%s", buffer);
      }
      
      static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
      {
              struct mempolicy *mpol = NULL;
              if (sbinfo->mpol) {
                      spin_lock(&sbinfo->stat_lock);        /* prevent replace/use races */
                      mpol = sbinfo->mpol;
                      mpol_get(mpol);
                      spin_unlock(&sbinfo->stat_lock);
              }
              return mpol;
      }
      #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
      static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
      {
      }
      static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
      {
              return NULL;
      }
      #endif /* CONFIG_NUMA && CONFIG_TMPFS */
      #ifndef CONFIG_NUMA
      #define vm_policy vm_private_data
      #endif
      
      static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
                      struct shmem_inode_info *info, pgoff_t index)
      {
              /* Create a pseudo vma that just contains the policy */
   20         vma_init(vma, NULL);
              /* Bias interleave by inode number to distribute better across nodes */
              vma->vm_pgoff = index + info->vfs_inode.i_ino;
              vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
      }
      
      static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
      {
              /* Drop reference taken by mpol_shared_policy_lookup() */
              mpol_cond_put(vma->vm_policy);
      }
      
      static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
                              struct shmem_inode_info *info, pgoff_t index)
      {
              struct vm_area_struct pvma;
              struct page *page;
              struct vm_fault vmf;
      
              shmem_pseudo_vma_init(&pvma, info, index);
              vmf.vma = &pvma;
              vmf.address = 0;
              page = swap_cluster_readahead(swap, gfp, &vmf);
              shmem_pseudo_vma_destroy(&pvma);
      
              return page;
      }
      
      static struct page *shmem_alloc_hugepage(gfp_t gfp,
                      struct shmem_inode_info *info, pgoff_t index)
      {
              struct vm_area_struct pvma;
              struct address_space *mapping = info->vfs_inode.i_mapping;
              pgoff_t hindex;
              struct page *page;
      
              if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
                      return NULL;
      
              hindex = round_down(index, HPAGE_PMD_NR);
              if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
                                                                      XA_PRESENT))
                      return NULL;
      
              shmem_pseudo_vma_init(&pvma, info, hindex);
              page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
                              HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
              shmem_pseudo_vma_destroy(&pvma);
              if (page)
                      prep_transhuge_page(page);
              return page;
      }
      
      static struct page *shmem_alloc_page(gfp_t gfp,
                              struct shmem_inode_info *info, pgoff_t index)
      {
              struct vm_area_struct pvma;
              struct page *page;
      
   20         shmem_pseudo_vma_init(&pvma, info, index);
              page = alloc_page_vma(gfp, &pvma, 0);
              shmem_pseudo_vma_destroy(&pvma);
      
   20         return page;
      }
      
      static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
                      struct inode *inode,
                      pgoff_t index, bool huge)
      {
   20         struct shmem_inode_info *info = SHMEM_I(inode);
              struct page *page;
              int nr;
              int err = -ENOSPC;
      
              if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
                      huge = false;
   20         nr = huge ? HPAGE_PMD_NR : 1;
      
   20         if (!shmem_inode_acct_block(inode, nr))
                      goto failed;
      
              if (huge)
                      page = shmem_alloc_hugepage(gfp, info, index);
              else
   20                 page = shmem_alloc_page(gfp, info, index);
   20         if (page) {
   20                 __SetPageLocked(page);
   20                 __SetPageSwapBacked(page);
   20                 return page;
              }
      
              err = -ENOMEM;
              shmem_inode_unacct_blocks(inode, nr);
      failed:
              return ERR_PTR(err);
      }
      
      /*
       * When a page is moved from swapcache to shmem filecache (either by the
       * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
       * shmem_unuse_inode()), it may have been read in earlier from swap, in
       * ignorance of the mapping it belongs to.  If that mapping has special
       * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
       * we may need to copy to a suitable page before moving to filecache.
       *
       * In a future release, this may well be extended to respect cpuset and
       * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
       * but for now it is a simple matter of zone.
       */
      static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
      {
              return page_zonenum(page) > gfp_zone(gfp);
      }
      
      static int shmem_replace_page(struct page **pagep, gfp_t gfp,
                                      struct shmem_inode_info *info, pgoff_t index)
      {
              struct page *oldpage, *newpage;
              struct address_space *swap_mapping;
              swp_entry_t entry;
              pgoff_t swap_index;
              int error;
      
              oldpage = *pagep;
              entry.val = page_private(oldpage);
              swap_index = swp_offset(entry);
              swap_mapping = page_mapping(oldpage);
      
              /*
               * We have arrived here because our zones are constrained, so don't
               * limit chance of success by further cpuset and node constraints.
               */
              gfp &= ~GFP_CONSTRAINT_MASK;
              newpage = shmem_alloc_page(gfp, info, index);
              if (!newpage)
                      return -ENOMEM;
      
              get_page(newpage);
              copy_highpage(newpage, oldpage);
              flush_dcache_page(newpage);
      
              __SetPageLocked(newpage);
              __SetPageSwapBacked(newpage);
              SetPageUptodate(newpage);
              set_page_private(newpage, entry.val);
              SetPageSwapCache(newpage);
      
              /*
               * Our caller will very soon move newpage out of swapcache, but it's
               * a nice clean interface for us to replace oldpage by newpage there.
               */
              xa_lock_irq(&swap_mapping->i_pages);
              error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
              if (!error) {
                      __inc_node_page_state(newpage, NR_FILE_PAGES);
                      __dec_node_page_state(oldpage, NR_FILE_PAGES);
              }
              xa_unlock_irq(&swap_mapping->i_pages);
      
              if (unlikely(error)) {
                      /*
                       * Is this possible?  I think not, now that our callers check
                       * both PageSwapCache and page_private after getting page lock;
                       * but be defensive.  Reverse old to newpage for clear and free.
                       */
                      oldpage = newpage;
              } else {
                      mem_cgroup_migrate(oldpage, newpage);
                      lru_cache_add_anon(newpage);
                      *pagep = newpage;
              }
      
              ClearPageSwapCache(oldpage);
              set_page_private(oldpage, 0);
      
              unlock_page(oldpage);
              put_page(oldpage);
              put_page(oldpage);
              return error;
      }
      
      /*
       * Swap in the page pointed to by *pagep.
       * Caller has to make sure that *pagep contains a valid swapped page.
       * Returns 0 and the page in pagep if success. On failure, returns the
       * the error code and NULL in *pagep.
       */
      static int shmem_swapin_page(struct inode *inode, pgoff_t index,
                                   struct page **pagep, enum sgp_type sgp,
                                   gfp_t gfp, struct vm_area_struct *vma,
                                   vm_fault_t *fault_type)
      {
              struct address_space *mapping = inode->i_mapping;
              struct shmem_inode_info *info = SHMEM_I(inode);
              struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
              struct mem_cgroup *memcg;
              struct page *page;
              swp_entry_t swap;
              int error;
      
              VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
              swap = radix_to_swp_entry(*pagep);
              *pagep = NULL;
      
              /* Look it up and read it in.. */
              page = lookup_swap_cache(swap, NULL, 0);
              if (!page) {
                      /* Or update major stats only when swapin succeeds?? */
                      if (fault_type) {
                              *fault_type |= VM_FAULT_MAJOR;
                              count_vm_event(PGMAJFAULT);
                              count_memcg_event_mm(charge_mm, PGMAJFAULT);
                      }
                      /* Here we actually start the io */
                      page = shmem_swapin(swap, gfp, info, index);
                      if (!page) {
                              error = -ENOMEM;
                              goto failed;
                      }
              }
      
              /* We have to do this with page locked to prevent races */
              lock_page(page);
              if (!PageSwapCache(page) || page_private(page) != swap.val ||
                  !shmem_confirm_swap(mapping, index, swap)) {
                      error = -EEXIST;
                      goto unlock;
              }
              if (!PageUptodate(page)) {
                      error = -EIO;
                      goto failed;
              }
              wait_on_page_writeback(page);
      
              if (shmem_should_replace_page(page, gfp)) {
                      error = shmem_replace_page(&page, gfp, info, index);
                      if (error)
                              goto failed;
              }
      
              error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
                                                  false);
              if (!error) {
                      error = shmem_add_to_page_cache(page, mapping, index,
                                                      swp_to_radix_entry(swap), gfp);
                      /*
                       * We already confirmed swap under page lock, and make
                       * no memory allocation here, so usually no possibility
                       * of error; but free_swap_and_cache() only trylocks a
                       * page, so it is just possible that the entry has been
                       * truncated or holepunched since swap was confirmed.
                       * shmem_undo_range() will have done some of the
                       * unaccounting, now delete_from_swap_cache() will do
                       * the rest.
                       */
                      if (error) {
                              mem_cgroup_cancel_charge(page, memcg, false);
                              delete_from_swap_cache(page);
                      }
              }
              if (error)
                      goto failed;
      
              mem_cgroup_commit_charge(page, memcg, true, false);
      
              spin_lock_irq(&info->lock);
              info->swapped--;
              shmem_recalc_inode(inode);
              spin_unlock_irq(&info->lock);
      
              if (sgp == SGP_WRITE)
                      mark_page_accessed(page);
      
              delete_from_swap_cache(page);
              set_page_dirty(page);
              swap_free(swap);
      
              *pagep = page;
              return 0;
      failed:
              if (!shmem_confirm_swap(mapping, index, swap))
                      error = -EEXIST;
      unlock:
              if (page) {
                      unlock_page(page);
                      put_page(page);
              }
      
              return error;
      }
      
      /*
       * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
       *
       * If we allocate a new one we do not mark it dirty. That's up to the
       * vm. If we swap it in we mark it dirty since we also free the swap
       * entry since a page cannot live in both the swap and page cache.
       *
       * vmf and fault_type are only supplied by shmem_fault:
       * otherwise they are NULL.
       */
      static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
              struct page **pagep, enum sgp_type sgp, gfp_t gfp,
              struct vm_area_struct *vma, struct vm_fault *vmf,
                              vm_fault_t *fault_type)
      {
   26         struct address_space *mapping = inode->i_mapping;
              struct shmem_inode_info *info = SHMEM_I(inode);
              struct shmem_sb_info *sbinfo;
              struct mm_struct *charge_mm;
              struct mem_cgroup *memcg;
              struct page *page;
              enum sgp_type sgp_huge = sgp;
              pgoff_t hindex = index;
              int error;
              int once = 0;
              int alloced = 0;
      
              if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
                      return -EFBIG;
   26         if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
                      sgp = SGP_CACHE;
      repeat:
   26         if (sgp <= SGP_CACHE &&
   26             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
                      return -EINVAL;
              }
      
   26         sbinfo = SHMEM_SB(inode->i_sb);
   26         charge_mm = vma ? vma->vm_mm : current->mm;
      
   26         page = find_lock_entry(mapping, index);
              if (xa_is_value(page)) {
                      error = shmem_swapin_page(inode, index, &page,
                                                sgp, gfp, vma, fault_type);
                      if (error == -EEXIST)
                              goto repeat;
      
                      *pagep = page;
                      return error;
              }
      
   26         if (page && sgp == SGP_WRITE)
    9                 mark_page_accessed(page);
      
              /* fallocated page? */
    9         if (page && !PageUptodate(page)) {
                      if (sgp != SGP_READ)
                              goto clear;
                      unlock_page(page);
                      put_page(page);
                      page = NULL;
              }
   20         if (page || sgp == SGP_READ) {
    9                 *pagep = page;
                      return 0;
              }
      
              /*
               * Fast cache lookup did not find it:
               * bring it back from swap or allocate.
               */
      
   20         if (vma && userfaultfd_missing(vma)) {
                      *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
                      return 0;
              }
      
              /* shmem_symlink() */
   20         if (mapping->a_ops != &shmem_aops)
                      goto alloc_nohuge;
   20         if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
                      goto alloc_nohuge;
   20         if (shmem_huge == SHMEM_HUGE_FORCE)
                      goto alloc_huge;
   20         switch (sbinfo->huge) {
                      loff_t i_size;
                      pgoff_t off;
              case SHMEM_HUGE_NEVER:
                      goto alloc_nohuge;
              case SHMEM_HUGE_WITHIN_SIZE:
                      off = round_up(index, HPAGE_PMD_NR);
                      i_size = round_up(i_size_read(inode), PAGE_SIZE);
                      if (i_size >= HPAGE_PMD_SIZE &&
                          i_size >> PAGE_SHIFT >= off)
                              goto alloc_huge;
                      /* fallthrough */
              case SHMEM_HUGE_ADVISE:
                      if (sgp_huge == SGP_HUGE)
                              goto alloc_huge;
                      /* TODO: implement fadvise() hints */
                      goto alloc_nohuge;
              }
      
      alloc_huge:
              page = shmem_alloc_and_acct_page(gfp, inode, index, true);
              if (IS_ERR(page)) {
      alloc_nohuge:
   20                 page = shmem_alloc_and_acct_page(gfp, inode,
                                                       index, false);
              }
              if (IS_ERR(page)) {
                      int retry = 5;
      
                      error = PTR_ERR(page);
                      page = NULL;
                      if (error != -ENOSPC)
                              goto unlock;
                      /*
                       * Try to reclaim some space by splitting a huge page
                       * beyond i_size on the filesystem.
                       */
                      while (retry--) {
                              int ret;
      
                              ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
                              if (ret == SHRINK_STOP)
                                      break;
                              if (ret)
                                      goto alloc_nohuge;
                      }
                      goto unlock;
              }
      
   20         if (PageTransHuge(page))
                      hindex = round_down(index, HPAGE_PMD_NR);
              else
                      hindex = index;
      
   20         if (sgp == SGP_WRITE)
   20                 __SetPageReferenced(page);
      
              error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
   20                                             PageTransHuge(page));
              if (error)
                      goto unacct;
   20         error = shmem_add_to_page_cache(page, mapping, hindex,
                                              NULL, gfp & GFP_RECLAIM_MASK);
              if (error) {
                      mem_cgroup_cancel_charge(page, memcg,
                                               PageTransHuge(page));
                      goto unacct;
              }
              mem_cgroup_commit_charge(page, memcg, false,
   20                                  PageTransHuge(page));
              lru_cache_add_anon(page);
      
              spin_lock_irq(&info->lock);
   20         info->alloced += compound_nr(page);
   20         inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
              shmem_recalc_inode(inode);
              spin_unlock_irq(&info->lock);
              alloced = true;
      
   20         if (PageTransHuge(page) &&
                  DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
                              hindex + HPAGE_PMD_NR - 1) {
                      /*
                       * Part of the huge page is beyond i_size: subject
                       * to shrink under memory pressure.
                       */
                      spin_lock(&sbinfo->shrinklist_lock);
                      /*
                       * _careful to defend against unlocked access to
                       * ->shrink_list in shmem_unused_huge_shrink()
                       */
                      if (list_empty_careful(&info->shrinklist)) {
                              list_add_tail(&info->shrinklist,
                                            &sbinfo->shrinklist);
                              sbinfo->shrinklist_len++;
                      }
                      spin_unlock(&sbinfo->shrinklist_lock);
              }
      
              /*
               * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
               */
   20         if (sgp == SGP_FALLOC)
                      sgp = SGP_WRITE;
      clear:
              /*
               * Let SGP_WRITE caller clear ends if write does not fill page;
               * but SGP_FALLOC on a page fallocated earlier must initialize
               * it now, lest undo on failure cancel our earlier guarantee.
               */
   20         if (sgp != SGP_WRITE && !PageUptodate(page)) {
                      struct page *head = compound_head(page);
                      int i;
      
                      for (i = 0; i < compound_nr(head); i++) {
                              clear_highpage(head + i);
                              flush_dcache_page(head + i);
                      }
                      SetPageUptodate(head);
              }
      
              /* Perhaps the file has been truncated since we checked */
              if (sgp <= SGP_CACHE &&
                  ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
                      if (alloced) {
                              ClearPageDirty(page);
                              delete_from_page_cache(page);
                              spin_lock_irq(&info->lock);
                              shmem_recalc_inode(inode);
                              spin_unlock_irq(&info->lock);
                      }
                      error = -EINVAL;
                      goto unlock;
              }
   20         *pagep = page + index - hindex;
   26         return 0;
      
              /*
               * Error recovery.
               */
      unacct:
              shmem_inode_unacct_blocks(inode, compound_nr(page));
      
              if (PageTransHuge(page)) {
                      unlock_page(page);
                      put_page(page);
                      goto alloc_nohuge;
              }
      unlock:
              if (page) {
                      unlock_page(page);
                      put_page(page);
              }
              if (error == -ENOSPC && !once++) {
                      spin_lock_irq(&info->lock);
                      shmem_recalc_inode(inode);
                      spin_unlock_irq(&info->lock);
                      goto repeat;
              }
              if (error == -EEXIST)
                      goto repeat;
              return error;
      }
      
      /*
       * This is like autoremove_wake_function, but it removes the wait queue
       * entry unconditionally - even if something else had already woken the
       * target.
       */
      static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
      {
              int ret = default_wake_function(wait, mode, sync, key);
              list_del_init(&wait->entry);
              return ret;
      }
      
      static vm_fault_t shmem_fault(struct vm_fault *vmf)
      {
              struct vm_area_struct *vma = vmf->vma;
              struct inode *inode = file_inode(vma->vm_file);
              gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
              enum sgp_type sgp;
              int err;
              vm_fault_t ret = VM_FAULT_LOCKED;
      
              /*
               * Trinity finds that probing a hole which tmpfs is punching can
               * prevent the hole-punch from ever completing: which in turn
               * locks writers out with its hold on i_mutex.  So refrain from
               * faulting pages into the hole while it's being punched.  Although
               * shmem_undo_range() does remove the additions, it may be unable to
               * keep up, as each new page needs its own unmap_mapping_range() call,
               * and the i_mmap tree grows ever slower to scan if new vmas are added.
               *
               * It does not matter if we sometimes reach this check just before the
               * hole-punch begins, so that one fault then races with the punch:
               * we just need to make racing faults a rare case.
               *
               * The implementation below would be much simpler if we just used a
               * standard mutex or completion: but we cannot take i_mutex in fault,
               * and bloating every shmem inode for this unlikely case would be sad.
               */
              if (unlikely(inode->i_private)) {
                      struct shmem_falloc *shmem_falloc;
      
                      spin_lock(&inode->i_lock);
                      shmem_falloc = inode->i_private;
                      if (shmem_falloc &&
                          shmem_falloc->waitq &&
                          vmf->pgoff >= shmem_falloc->start &&
                          vmf->pgoff < shmem_falloc->next) {
                              wait_queue_head_t *shmem_falloc_waitq;
                              DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
      
                              ret = VM_FAULT_NOPAGE;
                              if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
                                 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
                                      /* It's polite to up mmap_sem if we can */
                                      up_read(&vma->vm_mm->mmap_sem);
                                      ret = VM_FAULT_RETRY;