/*        $NetBSD: if_loop.c,v 1.112 2020/10/14 16:10:32 roy Exp $        */
      
      /*
       * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
       * All rights reserved.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the name of the project nor the names of its contributors
       *    may be used to endorse or promote products derived from this software
       *    without specific prior written permission.
       *
       * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
       * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
       * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
       * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
       * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
       * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
       * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
       * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
       * SUCH DAMAGE.
       */
      
      /*
       * Copyright (c) 1982, 1986, 1993
       *        The Regents of the University of California.  All rights reserved.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the name of the University nor the names of its contributors
       *    may be used to endorse or promote products derived from this software
       *    without specific prior written permission.
       *
       * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
       * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
       * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
       * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
       * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
       * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
       * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
       * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
       * SUCH DAMAGE.
       *
       *        @(#)if_loop.c        8.2 (Berkeley) 1/9/95
       */
      
      /*
       * Loopback interface driver for protocol testing and timing.
       */
      
      #include <sys/cdefs.h>
      __KERNEL_RCSID(0, "$NetBSD: if_loop.c,v 1.112 2020/10/14 16:10:32 roy Exp $");
      
      #ifdef _KERNEL_OPT
      #include "opt_inet.h"
      #include "opt_atalk.h"
      #include "opt_mbuftrace.h"
      #include "opt_mpls.h"
      #include "opt_net_mpsafe.h"
      #endif
      
      #include <sys/param.h>
      #include <sys/systm.h>
      #include <sys/kernel.h>
      #include <sys/mbuf.h>
      #include <sys/socket.h>
      #include <sys/errno.h>
      #include <sys/ioctl.h>
      #include <sys/time.h>
      #include <sys/device.h>
      #include <sys/module.h>
      
      #include <sys/cpu.h>
      
      #include <net/if.h>
      #include <net/if_types.h>
      #include <net/netisr.h>
      #include <net/route.h>
      
      #ifdef        INET
      #include <netinet/in.h>
      #include <netinet/in_systm.h>
      #include <netinet/in_var.h>
      #include <netinet/in_offload.h>
      #include <netinet/ip.h>
      #endif
      
      #ifdef INET6
      #ifndef INET
      #include <netinet/in.h>
      #endif
      #include <netinet6/in6_var.h>
      #include <netinet6/in6_offload.h>
      #include <netinet/ip6.h>
      #endif
      
      #ifdef MPLS
      #include <netmpls/mpls.h>
      #include <netmpls/mpls_var.h>
      #endif
      
      #ifdef NETATALK
      #include <netatalk/at.h>
      #include <netatalk/at_var.h>
      #endif
      
      #include <net/bpf.h>
      
      #if defined(LARGE_LOMTU)
      #define LOMTU        (131072 +  MHLEN + MLEN)
      #define LOMTU_MAX LOMTU
      #else
      #define        LOMTU        (32768 +  MHLEN + MLEN)
      #define        LOMTU_MAX        (65536 +  MHLEN + MLEN)
      #endif
      
      #ifdef ALTQ
      static void        lostart(struct ifnet *);
      #endif
      
      static int        loop_clone_create(struct if_clone *, int);
      static int        loop_clone_destroy(struct ifnet *);
      
      static void        loop_rtrequest(int, struct rtentry *, const struct rt_addrinfo *);
      
      static struct if_clone loop_cloner =
          IF_CLONE_INITIALIZER("lo", loop_clone_create, loop_clone_destroy);
      
      void
      loopattach(int n)
      {
      
      #ifndef _MODULE
              loop_clone_create(&loop_cloner, 0);        /* lo0 always exists */
      #endif
      }
      
      void
      loopinit(void)
      {
      
              if (lo0ifp != NULL)        /* can happen in rump kernel */
                      return;
      
      #ifdef _MODULE
              loop_clone_create(&loop_cloner, 0);        /* lo0 always exists */
      #endif
              if_clone_attach(&loop_cloner);
      }
      
      static int
      loopdetach(void)
      {
              /* no detach for now; we don't allow lo0 to be deleted */
              return EBUSY;
      }
      
      static int
      loop_clone_create(struct if_clone *ifc, int unit)
      {
              struct ifnet *ifp;
              int rv;
      
              ifp = if_alloc(IFT_LOOP);
      
              if_initname(ifp, ifc->ifc_name, unit);
      
              ifp->if_mtu = LOMTU;
              ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
      #ifdef NET_MPSAFE
              ifp->if_extflags = IFEF_MPSAFE;
      #endif
              ifp->if_ioctl = loioctl;
              ifp->if_output = looutput;
      #ifdef ALTQ
              ifp->if_start = lostart;
      #endif
              ifp->if_type = IFT_LOOP;
              ifp->if_hdrlen = 0;
              ifp->if_addrlen = 0;
              ifp->if_dlt = DLT_NULL;
              IFQ_SET_READY(&ifp->if_snd);
              if (unit == 0)
                      lo0ifp = ifp;
              rv = if_initialize(ifp);
              if (rv != 0) {
                      if_free(ifp);
                      return rv;
              }
              ifp->if_link_state = LINK_STATE_UP;
              if_alloc_sadl(ifp);
              bpf_attach(ifp, DLT_NULL, sizeof(u_int));
      #ifdef MBUFTRACE
              ifp->if_mowner = malloc(sizeof(struct mowner), M_DEVBUF,
                  M_WAITOK | M_ZERO);
              strlcpy(ifp->if_mowner->mo_name, ifp->if_xname,
                  sizeof(ifp->if_mowner->mo_name));
              MOWNER_ATTACH(ifp->if_mowner);
      #endif
      
              ifp->if_flags |= IFF_RUNNING;
              if_register(ifp);
      
              return (0);
      }
      
      static int
      loop_clone_destroy(struct ifnet *ifp)
      {
      
              if (ifp == lo0ifp)
                      return (EPERM);
      
              ifp->if_flags &= ~IFF_RUNNING;
      
      #ifdef MBUFTRACE
              MOWNER_DETACH(ifp->if_mowner);
              free(ifp->if_mowner, M_DEVBUF);
      #endif
      
              bpf_detach(ifp);
              if_detach(ifp);
      
              if_free(ifp);
      
              return (0);
      }
      
      int
      looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
          const struct rtentry *rt)
      {
              pktqueue_t *pktq = NULL;
              struct ifqueue *ifq = NULL;
              int s, isr = -1;
              int csum_flags;
              int error = 0;
              size_t pktlen;
      
              MCLAIM(m, ifp->if_mowner);
      
   19         KERNEL_LOCK_UNLESS_NET_MPSAFE();
      
              if ((m->m_flags & M_PKTHDR) == 0)
                      panic("looutput: no header mbuf");
   20         if (ifp->if_flags & IFF_LOOPBACK)
   17                 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT);
   20         m_set_rcvif(m, ifp);
      
   19         if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
                      m_freem(m);
                      error = (rt->rt_flags & RTF_BLACKHOLE ? 0 :
                              rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
                      goto out;
              }
      
   20         pktlen = m->m_pkthdr.len;
      
              if_statadd2(ifp, if_opackets, 1, if_obytes, pktlen);
      
      #ifdef ALTQ
              /*
               * ALTQ on the loopback interface is just for debugging.  It's
               * used only for loopback interfaces, not for a simplex interface.
               */
              if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) &&
                  ifp->if_start == lostart) {
                      /*
                       * If the queueing discipline needs packet classification,
                       * do it before prepending the link headers.
                       */
                      IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
      
                      M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT);
                      if (m == NULL) {
                              error = ENOBUFS;
                              goto out;
                      }
                      *(mtod(m, uint32_t *)) = dst->sa_family;
      
                      error = if_transmit_lock(ifp, m);
                      goto out;
              }
      #endif /* ALTQ */
      
              m_tag_delete_chain(m);
      
      #ifdef MPLS
              if (rt != NULL && rt_gettag(rt) != NULL &&
                  rt_gettag(rt)->sa_family == AF_MPLS &&
                  (m->m_flags & (M_MCAST | M_BCAST)) == 0) {
                      union mpls_shim msh;
                      msh.s_addr = MPLS_GETSADDR(rt);
                      if (msh.shim.label != MPLS_LABEL_IMPLNULL) {
                              ifq = &mplsintrq;
                              isr = NETISR_MPLS;
                      }
              }
              if (isr != NETISR_MPLS)
      #endif
              switch (dst->sa_family) {
      
      #ifdef INET
              case AF_INET:
   16                 csum_flags = m->m_pkthdr.csum_flags;
                      KASSERT((csum_flags & ~(M_CSUM_IPv4|M_CSUM_UDPv4)) == 0);
   16                 if (csum_flags != 0 && IN_LOOPBACK_NEED_CHECKSUM(csum_flags)) {
                              in_undefer_cksum(m, 0, csum_flags);
                              m->m_pkthdr.csum_flags = 0;
                      } else {
                              /*
                               * Do nothing. Pass M_CSUM_IPv4 and M_CSUM_UDPv4 as
                               * they are to tell those are calculated and good.
                               */
                      }
   16                 pktq = ip_pktq;
                      break;
      #endif
      #ifdef INET6
              case AF_INET6:
    4                 csum_flags = m->m_pkthdr.csum_flags;
                      KASSERT((csum_flags & ~M_CSUM_UDPv6) == 0);
    4                 if (csum_flags != 0 &&
                          IN6_LOOPBACK_NEED_CHECKSUM(csum_flags)) {
                              in6_undefer_cksum(m, 0, csum_flags);
                              m->m_pkthdr.csum_flags = 0;
                      } else {
                              /*
                               * Do nothing. Pass M_CSUM_UDPv6 as
                               * they are to tell those are calculated and good.
                               */
                      }
    4                 m->m_flags |= M_LOOP;
                      pktq = ip6_pktq;
                      break;
      #endif
      #ifdef NETATALK
              case AF_APPLETALK:
                      ifq = &atintrq2;
                      isr = NETISR_ATALK;
                      break;
      #endif
              default:
                      printf("%s: can't handle af%d\n", ifp->if_xname,
                          dst->sa_family);
                      m_freem(m);
                      error = EAFNOSUPPORT;
                      goto out;
              }
      
   20         s = splnet();
              if (__predict_true(pktq)) {
                      error = 0;
      
   20                 if (__predict_true(pktq_enqueue(pktq, m, 0))) {
   20                         if_statadd2(ifp, if_ipackets, 1, if_ibytes, pktlen);
                      } else {
                              m_freem(m);
                              error = ENOBUFS;
                      }
   20                 splx(s);
                      goto out;
              }
              if (IF_QFULL(ifq)) {
                      IF_DROP(ifq);
                      m_freem(m);
                      splx(s);
                      error = ENOBUFS;
                      goto out;
              }
              if_statadd2(ifp, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
              IF_ENQUEUE(ifq, m);
              schednetisr(isr);
              splx(s);
      out:
   20         KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
              return error;
      }
      
      #ifdef ALTQ
      static void
      lostart(struct ifnet *ifp)
      {
              for (;;) {
                      pktqueue_t *pktq = NULL;
                      struct ifqueue *ifq = NULL;
                      struct mbuf *m;
                      size_t pktlen;
                      uint32_t af;
                      int s, isr = 0;
      
                      IFQ_DEQUEUE(&ifp->if_snd, m);
                      if (m == NULL)
                              return;
      
                      af = *(mtod(m, uint32_t *));
                      m_adj(m, sizeof(uint32_t));
      
                      switch (af) {
      #ifdef INET
                      case AF_INET:
                              pktq = ip_pktq;
                              break;
      #endif
      #ifdef INET6
                      case AF_INET6:
                              m->m_flags |= M_LOOP;
                              pktq = ip6_pktq;
                              break;
      #endif
      #ifdef NETATALK
                      case AF_APPLETALK:
                              ifq = &atintrq2;
                              isr = NETISR_ATALK;
                              break;
      #endif
                      default:
                              printf("%s: can't handle af%d\n", ifp->if_xname, af);
                              m_freem(m);
                              return;
                      }
                      pktlen = m->m_pkthdr.len;
      
                      s = splnet();
                      if (__predict_true(pktq)) {
                              if (__predict_false(pktq_enqueue(pktq, m, 0))) {
                                      m_freem(m);
                                      splx(s);
                                      return;
                              }
                              if_statadd2(ifp, if_ipackets, 1, if_ibytes, pktlen);
                              splx(s);
                              continue;
                      }
                      if (IF_QFULL(ifq)) {
                              IF_DROP(ifq);
                              splx(s);
                              m_freem(m);
                              return;
                      }
                      IF_ENQUEUE(ifq, m);
                      schednetisr(isr);
                      if_statadd2(ifp, if_ipackets, 1, if_ibytes, pktlen);
                      splx(s);
              }
      }
      #endif /* ALTQ */
      
      /* ARGSUSED */
      static void
      loop_rtrequest(int cmd, struct rtentry *rt,
          const struct rt_addrinfo *info)
      {
      
              if (rt)
                      rt->rt_rmx.rmx_mtu = lo0ifp->if_mtu;
      }
      
      /*
       * Process an ioctl request.
       */
      /* ARGSUSED */
      int
      loioctl(struct ifnet *ifp, u_long cmd, void *data)
      {
              struct ifaddr *ifa;
              struct ifreq *ifr = data;
              int error = 0;
      
    2         switch (cmd) {
      
              case SIOCINITIFADDR:
                      ifp->if_flags |= IFF_UP;
                      ifa = (struct ifaddr *)data;
                      if (ifa != NULL)
                              ifa->ifa_rtrequest = loop_rtrequest;
                      /*
                       * Everything else is done at a higher level.
                       */
                      break;
      
              case SIOCSIFMTU:
                      if ((unsigned)ifr->ifr_mtu > LOMTU_MAX)
                              error = EINVAL;
                      else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET){
                              error = 0;
                      }
                      break;
      
              case SIOCADDMULTI:
              case SIOCDELMULTI:
    2                 if (ifr == NULL) {
                              error = EAFNOSUPPORT;                /* XXX */
                              break;
                      }
    2                 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
      
      #ifdef INET
                      case AF_INET:
                              break;
      #endif
      #ifdef INET6
                      case AF_INET6:
                              break;
      #endif
      
                      default:
                              error = EAFNOSUPPORT;
                              break;
                      }
                      break;
      
              default:
                      error = ifioctl_common(ifp, cmd, data);
              }
    2         return (error);
      }
      
      /*
       * Module infrastructure
       */
      #include "if_module.h"
      
      IF_MODULE(MODULE_CLASS_DRIVER, loop, NULL)
      /*        $NetBSD: if.h,v 1.289 2020/10/15 10:20:44 roy Exp $        */
      
      /*-
       * Copyright (c) 1999, 2000, 2001 The NetBSD Foundation, Inc.
       * All rights reserved.
       *
       * This code is derived from software contributed to The NetBSD Foundation
       * by William Studenmund and Jason R. Thorpe.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       *
       * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
       * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
       * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
       * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      /*
       * Copyright (c) 1982, 1986, 1989, 1993
       *        The Regents of the University of California.  All rights reserved.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the name of the University nor the names of its contributors
       *    may be used to endorse or promote products derived from this software
       *    without specific prior written permission.
       *
       * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
       * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
       * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
       * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
       * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
       * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
       * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
       * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
       * SUCH DAMAGE.
       *
       *        @(#)if.h        8.3 (Berkeley) 2/9/95
       */
      
      #ifndef _NET_IF_H_
      #define _NET_IF_H_
      
      #if !defined(_KERNEL) && !defined(_STANDALONE)
      #include <stdbool.h>
      #endif
      
      #include <sys/featuretest.h>
      
      /*
       * Length of interface external name, including terminating '\0'.
       * Note: this is the same size as a generic device's external name.
       */
      #define IF_NAMESIZE 16
      
      /*
       * Length of interface description, including terminating '\0'.
       */
      #define        IFDESCRSIZE        64
      
      #if defined(_NETBSD_SOURCE)
      
      #include <sys/socket.h>
      #include <sys/queue.h>
      #include <sys/mutex.h>
      
      #include <net/dlt.h>
      #include <net/pfil.h>
      #ifdef _KERNEL
      #include <net/pktqueue.h>
      #include <sys/pslist.h>
      #include <sys/pserialize.h>
      #include <sys/psref.h>
      #include <sys/module_hook.h>
      #endif
      
      /*
       * Always include ALTQ glue here -- we use the ALTQ interface queue
       * structure even when ALTQ is not configured into the kernel so that
       * the size of struct ifnet does not changed based on the option.  The
       * ALTQ queue structure is API-compatible with the legacy ifqueue.
       */
      #include <altq/if_altq.h>
      
      /*
       * Structures defining a network interface, providing a packet
       * transport mechanism (ala level 0 of the PUP protocols).
       *
       * Each interface accepts output datagrams of a specified maximum
       * length, and provides higher level routines with input datagrams
       * received from its medium.
       *
       * Output occurs when the routine if_output is called, with four parameters:
       *        (*ifp->if_output)(ifp, m, dst, rt)
       * Here m is the mbuf chain to be sent and dst is the destination address.
       * The output routine encapsulates the supplied datagram if necessary,
       * and then transmits it on its medium.
       *
       * On input, each interface unwraps the data received by it, and either
       * places it on the input queue of a internetwork datagram routine
       * and posts the associated software interrupt, or passes the datagram to a raw
       * packet input routine.
       *
       * Routines exist for locating interfaces by their addresses
       * or for locating a interface on a certain network, as well as more general
       * routing and gateway routines maintaining information used to locate
       * interfaces.  These routines live in the files if.c and route.c
       */
      #include <sys/time.h>
      
      #if defined(_KERNEL_OPT)
      #include "opt_compat_netbsd.h"
      #include "opt_gateway.h"
      #endif
      
      struct mbuf;
      struct proc;
      struct rtentry;
      struct socket;
      struct ether_header;
      struct ifaddr;
      struct ifnet;
      struct rt_addrinfo;
      
      #define        IFNAMSIZ        IF_NAMESIZE
      
      /*
       * Structure describing a `cloning' interface.
       */
      struct if_clone {
              LIST_ENTRY(if_clone) ifc_list;        /* on list of cloners */
              const char *ifc_name;                /* name of device, e.g. `gif' */
              size_t ifc_namelen;                /* length of name */
      
              int        (*ifc_create)(struct if_clone *, int);
              int        (*ifc_destroy)(struct ifnet *);
      };
      
      #define        IF_CLONE_INITIALIZER(name, create, destroy)                        \
              { { NULL, NULL }, name, sizeof(name) - 1, create, destroy }
      
      /*
       * Structure used to query names of interface cloners.
       */
      struct if_clonereq {
              int        ifcr_total;                /* total cloners (out) */
              int        ifcr_count;                /* room for this many in user buffer */
              char        *ifcr_buffer;                /* buffer for cloner names */
      };
      
      /*
       * Structure defining statistics and other data kept regarding a network
       * interface.
       *
       * Only used for exporting data from the interface.
       */
      struct if_data {
              /* generic interface information */
              u_char        ifi_type;                /* ethernet, tokenring, etc. */
              u_char        ifi_addrlen;                /* media address length */
              u_char        ifi_hdrlen;                /* media header length */
              int        ifi_link_state;                /* current link state */
              uint64_t ifi_mtu;                /* maximum transmission unit */
              uint64_t ifi_metric;                /* routing metric (external only) */
              uint64_t ifi_baudrate;                /* linespeed */
              /* volatile statistics */
              uint64_t ifi_ipackets;                /* packets received on interface */
              uint64_t ifi_ierrors;                /* input errors on interface */
              uint64_t ifi_opackets;                /* packets sent on interface */
              uint64_t ifi_oerrors;                /* output errors on interface */
              uint64_t ifi_collisions;        /* collisions on csma interfaces */
              uint64_t ifi_ibytes;                /* total number of octets received */
              uint64_t ifi_obytes;                /* total number of octets sent */
              uint64_t ifi_imcasts;                /* packets received via multicast */
              uint64_t ifi_omcasts;                /* packets sent via multicast */
              uint64_t ifi_iqdrops;                /* dropped on input, this interface */
              uint64_t ifi_noproto;                /* destined for unsupported protocol */
              struct        timespec ifi_lastchange;/* last operational state change */
      };
      
      /*
       * Values for if_link_state.
       */
      #define        LINK_STATE_UNKNOWN        0        /* link invalid/unknown */
      #define        LINK_STATE_DOWN                1        /* link is down */
      #define        LINK_STATE_UP                2        /* link is up */
      
      /*
       * Status bit descriptions for the various interface types.
       */
      struct if_status_description {
              unsigned char        ifs_type;
              unsigned char        ifs_state;
              const char        *ifs_string;
      };
      
      #define LINK_STATE_DESC_MATCH(_ifs, _t, _s)                                \
              (((_ifs)->ifs_type == (_t) || (_ifs)->ifs_type == 0) &&                \
                  (_ifs)->ifs_state == (_s))
      
      #define LINK_STATE_DESCRIPTIONS {                                        \
              { IFT_ETHER, LINK_STATE_DOWN, "no carrier" },                        \
              { IFT_IEEE80211, LINK_STATE_DOWN, "no network" },                \
              { IFT_PPP, LINK_STATE_DOWN, "no carrier" },                        \
              { IFT_CARP, LINK_STATE_DOWN, "backup" },                        \
              { IFT_CARP, LINK_STATE_UP, "master" },                                \
              { 0, LINK_STATE_UP, "active" },                                        \
              { 0, LINK_STATE_UNKNOWN, "unknown" },                                \
              { 0, LINK_STATE_DOWN, "down" },                                        \
              { 0, 0, NULL }                                                        \
      }
      
      /*
       * Structure defining a queue for a network interface.
       */
      struct ifqueue {
              struct                mbuf *ifq_head;
              struct                mbuf *ifq_tail;
              int                ifq_len;
              int                ifq_maxlen;
              int                ifq_drops;
              kmutex_t        *ifq_lock;
      };
      
      #ifdef _KERNEL
      #include <sys/percpu.h>
      #include <sys/callout.h>
      #include <sys/rwlock.h>
      #include <sys/workqueue.h>
      
      #endif /* _KERNEL */
      
      /*
       * Structure defining a queue for a network interface.
       *
       * (Would like to call this struct ``if'', but C isn't PL/1.)
       */
      TAILQ_HEAD(ifnet_head, ifnet);                /* the actual queue head */
      
      struct bridge_softc;
      struct bridge_iflist;
      struct callout;
      struct krwlock;
      struct if_percpuq;
      struct if_deferred_start;
      struct in6_multi;
      
      typedef unsigned short if_index_t;
      
      /*
       * Interface.  Field markings and the corresponding locks:
       *
       * i:        IFNET_LOCK (a.k.a., if_ioctl_lock)
       * q:        ifq_lock (struct ifaltq)
       * a:        if_afdata_lock
       * 6:        in6_multilock (global lock)
       * ::        unlocked, stable
       * ?:        unknown, maybe unsafe
       *
       * Lock order: IFNET_LOCK => in6_multilock => if_afdata_lock => ifq_lock
       *   Note that currently if_afdata_lock and ifq_lock aren't held
       *   at the same time, but define the order anyway.
       *
       * Lock order of IFNET_LOCK with other locks:
       *     softnet_lock => solock => IFNET_LOCK => ND6_LOCK, in_multilock
       */
      typedef struct ifnet {
              void                *if_softc;        /* :: lower-level data for this if */
              /* DEPRECATED. Keep it to avoid breaking kvm(3) users */
              TAILQ_ENTRY(ifnet)
                              if_list;        /* i: all struct ifnets are chained */
              TAILQ_HEAD(, ifaddr)
                              if_addrlist;        /* i: linked list of addresses per if */
              char                if_xname[IFNAMSIZ];
                                              /* :: external name (name + unit) */
              int                if_pcount;        /* i: number of promiscuous listeners */
              struct bpf_if        *if_bpf;        /* :: packet filter structure */
              if_index_t        if_index;        /* :: numeric abbreviation for this if */
              short                if_timer;        /* ?: time 'til if_slowtimo called */
              unsigned short        if_flags;        /* i: up/down, broadcast, etc. */
              short                if_extflags;        /* :: if_output MP-safe, etc. */
              u_char                if_type;        /* :: ethernet, tokenring, etc. */
              u_char                if_addrlen;        /* :: media address length */
              u_char                if_hdrlen;        /* :: media header length */
              /* XXX audit :? fields here. */
              int                if_link_state;        /* :? current link state */
              uint64_t        if_mtu;                /* :? maximum transmission unit */
              uint64_t        if_metric;        /* :? routing metric (external only) */
              uint64_t        if_baudrate;        /* :? linespeed */
              struct timespec        if_lastchange;        /* :? last operational state change */
      #ifdef _KERNEL
              percpu_t        *if_stats;        /* :: statistics */
      #else
              void                *if_stats;        /* opaque to user-space */
      #endif /* _KERNEL */
              /*
               * Procedure handles.  If you add more of these, don't forget the
               * corresponding NULL stub in if.c.
               */
              int                (*if_output)        /* :: output routine (enqueue) */
                                  (struct ifnet *, struct mbuf *, const struct sockaddr *,
                                   const struct rtentry *);
              void                (*_if_input)        /* :: input routine (from h/w driver) */
                                  (struct ifnet *, struct mbuf *);
              void                (*if_start)        /* :: initiate output routine */
                                  (struct ifnet *);
              int                (*if_transmit)        /* :: output routine, must be MP-safe */
                                  (struct ifnet *, struct mbuf *);
              int                (*if_ioctl)        /* :: ioctl routine */
                                  (struct ifnet *, u_long, void *);
              int                (*if_init)        /* :: init routine */
                                  (struct ifnet *);
              void                (*if_stop)        /* :: stop routine */
                                  (struct ifnet *, int);
              void                (*if_slowtimo)        /* :: timer routine */
                                  (struct ifnet *);
      #define        if_watchdog        if_slowtimo
              void                (*if_drain)        /* :: routine to release resources */
                                  (struct ifnet *);
              struct ifaltq        if_snd;                /* q: output queue (includes altq) */
              struct ifaddr        *if_dl;                /* i: identity of this interface. */
              const struct sockaddr_dl
                              *if_sadl;        /* i: pointer to sockaddr_dl of if_dl */
              /*
               * May be NULL.  If not NULL, it is the address assigned
               * to the interface by the manufacturer, so it very likely
               * to be unique.  It MUST NOT be deleted.  It is highly
               * suitable for deriving the EUI64 for the interface.
               */
              struct ifaddr        *if_hwdl;        /* i: h/w identity */
              const uint8_t        *if_broadcastaddr;
                                              /* :: linklevel broadcast bytestring */
              struct bridge_softc
                              *if_bridge;        /* i: bridge glue */
              struct bridge_iflist
                              *if_bridgeif;        /* i: shortcut to interface list entry */
              int                if_dlt;                /* :: data link type (<net/dlt.h>) */
              pfil_head_t *        if_pfil;        /* :: filtering point */
              uint64_t        if_capabilities;
                                              /* i: interface capabilities */
              uint64_t        if_capenable;        /* i: capabilities enabled */
              union {
                      void *                carp_s;        /* carp structure (used by !carp ifs) */
                      struct ifnet        *carp_d;/* ptr to carpdev (used by carp ifs) */
              }                if_carp_ptr;        /* ?: */
      #define if_carp                if_carp_ptr.carp_s
      #define if_carpdev        if_carp_ptr.carp_d
              /*
               * These are pre-computed based on an interfaces enabled
               * capabilities, for speed elsewhere.
               */
              int                if_csum_flags_tx;
                                              /* i: M_CSUM_* flags for Tx */
              int                if_csum_flags_rx;
                                              /* i: M_CSUM_* flags for Rx */
      
              void                *if_afdata[AF_MAX];
                                              /* a: */
              struct mowner        *if_mowner;        /* ?: who owns mbufs for this interface */
      
              void                *if_agrprivate;        /* ?: used only when #if NAGR > 0 */
              void                *if_npf_private;/* ?: associated NPF context */
      
              /*
               * pf specific data, used only when #if NPF > 0.
               */
              void                *if_pf_kif;        /* ?: pf interface abstraction */
              void                *if_pf_groups;        /* ?: pf interface groups */
              /*
               * During an ifnet's lifetime, it has only one if_index, but
               * and if_index is not sufficient to identify an ifnet
               * because during the lifetime of the system, many ifnets may occupy a
               * given if_index.  Let us tell different ifnets at the same
               * if_index apart by their if_index_gen, a unique number that each ifnet
               * is assigned when it if_attach()s.  Now, the kernel can use the
               * pair (if_index, if_index_gen) as a weak reference to an ifnet.
               */
              uint64_t        if_index_gen;        /* :: generation number for the ifnet
                                               * at if_index: if two ifnets' index
                                               * and generation number are both the
                                               * same, they are the same ifnet.
                                               */
              struct sysctllog
                              *if_sysctl_log;        /* :: */
              int                (*if_initaddr)  /* :: */
                                  (struct ifnet *, struct ifaddr *, bool);
              int                (*if_setflags)        /* :: */
                                  (struct ifnet *, const u_short);
              kmutex_t        *if_ioctl_lock;        /* :: */
              char                *if_description;        /* i: interface description */
      #ifdef _KERNEL /* XXX kvm(3) */
              struct callout        *if_slowtimo_ch;/* :: */
              struct krwlock        *if_afdata_lock;/* :: */
              struct if_percpuq
                              *if_percpuq;        /* :: we should remove it in the future */
              struct work        if_link_work;        /* q: linkage on link state work queue */
              uint16_t        if_link_queue;        /* q: masked link state change queue */
                                              /* q: is link state work scheduled? */
              bool                if_link_scheduled;
              void                (*if_link_state_changed)(struct ifnet *, int);
              struct pslist_entry
                              if_pslist_entry;/* i: */
              struct psref_target
                              if_psref;        /* :: */
              struct pslist_head
                              if_addr_pslist;        /* i: */
              struct if_deferred_start
                              *if_deferred_start;
                                              /* :: */
              /* XXX should be protocol independent */
              LIST_HEAD(, in6_multi)
                              if_multiaddrs;        /* 6: */
      #endif
      } ifnet_t;
      
      #include <net/if_stats.h>
       
      #define        if_name(ifp)        ((ifp)->if_xname)
      
      #define        IFF_UP                0x0001                /* interface is up */
      #define        IFF_BROADCAST        0x0002                /* broadcast address valid */
      #define        IFF_DEBUG        0x0004                /* turn on debugging */
      #define        IFF_LOOPBACK        0x0008                /* is a loopback net */
      #define        IFF_POINTOPOINT        0x0010                /* interface is point-to-point link */
      /*                        0x0020                   was IFF_NOTRAILERS */
      #define        IFF_RUNNING        0x0040                /* resources allocated */
      #define        IFF_NOARP        0x0080                /* no address resolution protocol */
      #define        IFF_PROMISC        0x0100                /* receive all packets */
      #define        IFF_ALLMULTI        0x0200                /* receive all multicast packets */
      #define        IFF_OACTIVE        0x0400                /* transmission in progress */
      #define        IFF_SIMPLEX        0x0800                /* can't hear own transmissions */
      #define        IFF_LINK0        0x1000                /* per link layer defined bit */
      #define        IFF_LINK1        0x2000                /* per link layer defined bit */
      #define        IFF_LINK2        0x4000                /* per link layer defined bit */
      #define        IFF_MULTICAST        0x8000                /* supports multicast */
      
      #define        IFEF_MPSAFE                        __BIT(0)        /* handlers can run in parallel (see below) */
      
      /*
       * The guidelines for converting an interface to IFEF_MPSAFE are as follows
       *
       * Enabling IFEF_MPSAFE on an interface suppresses taking KERNEL_LOCK when
       * calling the following handlers:
       * - if_start
       *   - Note that if_transmit is always called without KERNEL_LOCK
       * - if_output
       * - if_ioctl
       * - if_init
       * - if_stop
       *
       * This means that an interface with IFEF_MPSAFE must make the above handlers
       * MP-safe or take KERNEL_LOCK by itself inside handlers that aren't MP-safe
       * yet.
       *
       * There are some additional restrictions to access member variables of struct
       * ifnet:
       * - if_flags
       *   - Must be updated with holding IFNET_LOCK
       *   - You cannot use the flag in Tx/Rx paths anymore because there is no
       *     synchronization on the flag except for IFNET_LOCK
       *   - Note that IFNET_LOCK can't be taken in softint because it's known
       *     that it causes a deadlock
       *     - Some synchronization mechanisms such as pserialize_perform are called
       *       with IFNET_LOCK and also require context switches on every CPUs
       *       that mean softints finish so trying to take IFNET_LOCK in softint
       *       might block on IFNET_LOCK and prevent such synchronization mechanisms
       *       from being completed
       *     - Currently the deadlock occurs only if NET_MPSAFE is enabled, however,
       *       we should deal with the restriction because NET_MPSAFE will be enabled
       *       by default in the future
       * - if_watchdog and if_timer
       *   - The watchdog framework works only for non-IFEF_MPSAFE interfaces
       *     that rely on KERNEL_LOCK
       *   - Interfaces with IFEF_MPSAFE have to provide its own watchdog mechanism
       *     if needed
       *     - Keep if_watchdog NULL when calling if_attach
       */
      
      #ifdef _KERNEL
      static __inline bool
      if_is_mpsafe(struct ifnet *ifp)
      {
      
              return ((ifp->if_extflags & IFEF_MPSAFE) != 0);
      }
      
      static __inline int
    4 if_output_lock(struct ifnet *cifp, struct ifnet *ifp, struct mbuf *m,
          const struct sockaddr *dst, const struct rtentry *rt)
      {
      
   34         if (if_is_mpsafe(cifp)) {
   39                 return (*cifp->if_output)(ifp, m, dst, rt);
              } else {
                      int ret;
      
   32                 KERNEL_LOCK(1, NULL);
                      ret = (*cifp->if_output)(ifp, m, dst, rt);
                      KERNEL_UNLOCK_ONE(NULL);
                      return ret;
              }
      }
      
      static __inline void
   16 if_start_lock(struct ifnet *ifp)
      {
      
   16         if (if_is_mpsafe(ifp)) {
                      (*ifp->if_start)(ifp);
              } else {
   16                 KERNEL_LOCK(1, NULL);
                      (*ifp->if_start)(ifp);
                      KERNEL_UNLOCK_ONE(NULL);
              }
      }
      
      #define KERNEL_LOCK_IF_IFP_MPSAFE(ifp)                                        \
              do { if (if_is_mpsafe(ifp)) { KERNEL_LOCK(1, NULL); } } while (0)
      #define KERNEL_UNLOCK_IF_IFP_MPSAFE(ifp)                                \
              do { if (if_is_mpsafe(ifp)) { KERNEL_UNLOCK_ONE(NULL); } } while (0)
      
      #define KERNEL_LOCK_UNLESS_IFP_MPSAFE(ifp)                                \
              do { if (!if_is_mpsafe(ifp)) { KERNEL_LOCK(1, NULL); } } while (0)
      #define KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(ifp)                                \
              do { if (!if_is_mpsafe(ifp)) { KERNEL_UNLOCK_ONE(NULL); } } while (0)
      
      #ifdef _KERNEL_OPT
      #include "opt_net_mpsafe.h"
      #endif
      
      /* XXX explore a better place to define */
      #ifdef NET_MPSAFE
      
      #define KERNEL_LOCK_UNLESS_NET_MPSAFE()                do { } while (0)
      #define KERNEL_UNLOCK_UNLESS_NET_MPSAFE()        do { } while (0)
      
      #define SOFTNET_LOCK_UNLESS_NET_MPSAFE()        do { } while (0)
      #define SOFTNET_UNLOCK_UNLESS_NET_MPSAFE()        do { } while (0)
      
      #define SOFTNET_LOCK_IF_NET_MPSAFE()                                        \
              do { mutex_enter(softnet_lock); } while (0)
      #define SOFTNET_UNLOCK_IF_NET_MPSAFE()                                        \
              do { mutex_exit(softnet_lock); } while (0)
      
      #else /* NET_MPSAFE */
      
      #define KERNEL_LOCK_UNLESS_NET_MPSAFE()                                        \
              do { KERNEL_LOCK(1, NULL); } while (0)
      #define KERNEL_UNLOCK_UNLESS_NET_MPSAFE()                                \
              do { KERNEL_UNLOCK_ONE(NULL); } while (0)
      
      #define SOFTNET_LOCK_UNLESS_NET_MPSAFE()                                \
              do { mutex_enter(softnet_lock); } while (0)
      #define SOFTNET_UNLOCK_UNLESS_NET_MPSAFE()                                \
              do { mutex_exit(softnet_lock); } while (0)
      
      #define SOFTNET_LOCK_IF_NET_MPSAFE()                do { } while (0)
      #define SOFTNET_UNLOCK_IF_NET_MPSAFE()                do { } while (0)
      
      #endif /* NET_MPSAFE */
      
      #define SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE()                                \
              do {                                                                \
                      SOFTNET_LOCK_UNLESS_NET_MPSAFE();                        \
                      KERNEL_LOCK_UNLESS_NET_MPSAFE();                        \
              } while (0)
      
      #define SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE()                        \
              do {                                                                \
                      KERNEL_UNLOCK_UNLESS_NET_MPSAFE();                        \
                      SOFTNET_UNLOCK_UNLESS_NET_MPSAFE();                        \
              } while (0)
      
      #endif /* _KERNEL */
      
      #define        IFFBITS \
          "\020\1UP\2BROADCAST\3DEBUG\4LOOPBACK\5POINTOPOINT" \
          "\7RUNNING\10NOARP\11PROMISC\12ALLMULTI\13OACTIVE\14SIMPLEX" \
          "\15LINK0\16LINK1\17LINK2\20MULTICAST"
      
      /* flags set internally only: */
      #define        IFF_CANTCHANGE \
              (IFF_BROADCAST|IFF_POINTOPOINT|IFF_RUNNING|IFF_OACTIVE|\
                  IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI|IFF_PROMISC)
      
      /*
       * Some convenience macros used for setting ifi_baudrate.
       */
      #define        IF_Kbps(x)        ((x) * 1000ULL)                        /* kilobits/sec. */
      #define        IF_Mbps(x)        (IF_Kbps((x) * 1000ULL))        /* megabits/sec. */
      #define        IF_Gbps(x)        (IF_Mbps((x) * 1000ULL))        /* gigabits/sec. */
      
      /* Capabilities that interfaces can advertise. */
                                              /* 0x01 .. 0x40 were previously used */
      #define        IFCAP_TSOv4                0x00080        /* can do TCPv4 segmentation offload */
      #define        IFCAP_CSUM_IPv4_Rx        0x00100        /* can do IPv4 header checksums (Rx) */
      #define        IFCAP_CSUM_IPv4_Tx        0x00200        /* can do IPv4 header checksums (Tx) */
      #define        IFCAP_CSUM_TCPv4_Rx        0x00400        /* can do IPv4/TCP checksums (Rx) */
      #define        IFCAP_CSUM_TCPv4_Tx        0x00800        /* can do IPv4/TCP checksums (Tx) */
      #define        IFCAP_CSUM_UDPv4_Rx        0x01000        /* can do IPv4/UDP checksums (Rx) */
      #define        IFCAP_CSUM_UDPv4_Tx        0x02000        /* can do IPv4/UDP checksums (Tx) */
      #define        IFCAP_CSUM_TCPv6_Rx        0x04000        /* can do IPv6/TCP checksums (Rx) */
      #define        IFCAP_CSUM_TCPv6_Tx        0x08000        /* can do IPv6/TCP checksums (Tx) */
      #define        IFCAP_CSUM_UDPv6_Rx        0x10000        /* can do IPv6/UDP checksums (Rx) */
      #define        IFCAP_CSUM_UDPv6_Tx        0x20000        /* can do IPv6/UDP checksums (Tx) */
      #define        IFCAP_TSOv6                0x40000        /* can do TCPv6 segmentation offload */
      #define        IFCAP_LRO                0x80000        /* can do Large Receive Offload */
      #define        IFCAP_MASK                0xfff80 /* currently valid capabilities */
      
      #define        IFCAPBITS                \
              "\020"                        \
              "\10TSO4"                \
              "\11IP4CSUM_Rx"                \
              "\12IP4CSUM_Tx"                \
              "\13TCP4CSUM_Rx"        \
              "\14TCP4CSUM_Tx"        \
              "\15UDP4CSUM_Rx"        \
              "\16UDP4CSUM_Tx"        \
              "\17TCP6CSUM_Rx"        \
              "\20TCP6CSUM_Tx"        \
              "\21UDP6CSUM_Rx"        \
              "\22UDP6CSUM_Tx"        \
              "\23TSO6"                \
              "\24LRO"                \
      
      #define        IF_AFDATA_LOCK_INIT(ifp)        \
              do {(ifp)->if_afdata_lock = rw_obj_alloc();} while (0)
      
      #define        IF_AFDATA_LOCK_DESTROY(ifp)        rw_obj_free((ifp)->if_afdata_lock)
      
      #define        IF_AFDATA_WLOCK(ifp)        rw_enter((ifp)->if_afdata_lock, RW_WRITER)
      #define        IF_AFDATA_RLOCK(ifp)        rw_enter((ifp)->if_afdata_lock, RW_READER)
      #define        IF_AFDATA_WUNLOCK(ifp)        rw_exit((ifp)->if_afdata_lock)
      #define        IF_AFDATA_RUNLOCK(ifp)        rw_exit((ifp)->if_afdata_lock)
      #define        IF_AFDATA_LOCK(ifp)        IF_AFDATA_WLOCK(ifp)
      #define        IF_AFDATA_UNLOCK(ifp)        IF_AFDATA_WUNLOCK(ifp)
      #define        IF_AFDATA_TRYLOCK(ifp)        rw_tryenter((ifp)->if_afdata_lock, RW_WRITER)
      
      #define        IF_AFDATA_LOCK_ASSERT(ifp)        \
              KASSERT(rw_lock_held((ifp)->if_afdata_lock))
      #define        IF_AFDATA_RLOCK_ASSERT(ifp)        \
              KASSERT(rw_read_held((ifp)->if_afdata_lock))
      #define        IF_AFDATA_WLOCK_ASSERT(ifp)        \
              KASSERT(rw_write_held((ifp)->if_afdata_lock))
      
      /*
       * Output queues (ifp->if_snd) and internetwork datagram level (pup level 1)
       * input routines have queues of messages stored on ifqueue structures
       * (defined above).  Entries are added to and deleted from these structures
       * by these macros, which should be called with ipl raised to splnet().
       */
      #define        IF_QFULL(ifq)                ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
      #define        IF_DROP(ifq)                ((ifq)->ifq_drops++)
      #define        IF_ENQUEUE(ifq, m) do { \
              (m)->m_nextpkt = 0; \
              if ((ifq)->ifq_tail == 0) \
                      (ifq)->ifq_head = m; \
              else \
                      (ifq)->ifq_tail->m_nextpkt = m; \
              (ifq)->ifq_tail = m; \
              (ifq)->ifq_len++; \
      } while (/*CONSTCOND*/0)
      #define        IF_PREPEND(ifq, m) do { \
              (m)->m_nextpkt = (ifq)->ifq_head; \
              if ((ifq)->ifq_tail == 0) \
                      (ifq)->ifq_tail = (m); \
              (ifq)->ifq_head = (m); \
              (ifq)->ifq_len++; \
      } while (/*CONSTCOND*/0)
      #define        IF_DEQUEUE(ifq, m) do { \
              (m) = (ifq)->ifq_head; \
              if (m) { \
                      if (((ifq)->ifq_head = (m)->m_nextpkt) == 0) \
                              (ifq)->ifq_tail = 0; \
                      (m)->m_nextpkt = 0; \
                      (ifq)->ifq_len--; \
              } \
      } while (/*CONSTCOND*/0) 
      #define        IF_POLL(ifq, m)                ((m) = (ifq)->ifq_head)
      #define        IF_PURGE(ifq)                                                        \
      do {                                                                        \
              struct mbuf *__m0;                                                \
                                                                              \
              for (;;) {                                                        \
                      IF_DEQUEUE((ifq), __m0);                                \
                      if (__m0 == NULL)                                        \
                              break;                                                \
                      else                                                        \
                              m_freem(__m0);                                        \
              }                                                                \
      } while (/*CONSTCOND*/ 0)
      #define        IF_IS_EMPTY(ifq)        ((ifq)->ifq_len == 0)
      
      #ifndef IFQ_MAXLEN
      #define        IFQ_MAXLEN        256
      #endif
      #define        IFNET_SLOWHZ        1                /* granularity is 1 second */
      
      /*
       * Structure defining statistics and other data kept regarding an address
       * on a network interface.
       */
      struct ifaddr_data {
              int64_t        ifad_inbytes;
              int64_t        ifad_outbytes;
      };
      
      /*
       * The ifaddr structure contains information about one address
       * of an interface.  They are maintained by the different address families,
       * are allocated and attached when an address is set, and are linked
       * together so all addresses for an interface can be located.
       */
      struct ifaddr {
              struct        sockaddr *ifa_addr;        /* address of interface */
              struct        sockaddr *ifa_dstaddr;        /* other end of p-to-p link */
      #define        ifa_broadaddr        ifa_dstaddr        /* broadcast address interface */
              struct        sockaddr *ifa_netmask;        /* used to determine subnet */
              struct        ifnet *ifa_ifp;                /* back-pointer to interface */
              TAILQ_ENTRY(ifaddr) ifa_list;        /* list of addresses for interface */
              struct        ifaddr_data        ifa_data;        /* statistics on the address */
              void        (*ifa_rtrequest)        /* check or clean routes (+ or -)'d */
                              (int, struct rtentry *, const struct rt_addrinfo *);
              u_int        ifa_flags;                /* mostly rt_flags for cloning */
              int        ifa_refcnt;                /* count of references */
              int        ifa_metric;                /* cost of going out this interface */
              struct ifaddr        *(*ifa_getifa)(struct ifaddr *,
                                             const struct sockaddr *);
              uint32_t        *ifa_seqno;
              int16_t        ifa_preference;        /* preference level for this address */
      #ifdef _KERNEL
              struct pslist_entry     ifa_pslist_entry;
              struct psref_target        ifa_psref;
      #endif
      };
      #define        IFA_ROUTE        RTF_UP        /* (0x01) route installed */
      #define        IFA_DESTROYING        0x2
      
      /*
       * Message format for use in obtaining information about interfaces from
       * sysctl and the routing socket.  We need to force 64-bit alignment if we
       * aren't using compatiblity definitons.
       */
      #if !defined(_KERNEL) || !defined(COMPAT_RTSOCK)
      #define        __align64        __aligned(sizeof(uint64_t))
      #else
      #define        __align64
      #endif
      struct if_msghdr {
              u_short        ifm_msglen __align64;
                                      /* to skip over non-understood messages */
              u_char        ifm_version;        /* future binary compatibility */
              u_char        ifm_type;        /* message type */
              int        ifm_addrs;        /* like rtm_addrs */
              int        ifm_flags;        /* value of if_flags */
              u_short        ifm_index;        /* index for associated ifp */
              struct        if_data ifm_data __align64;
                                      /* statistics and other data about if */
      };
      
      /*
       * Message format for use in obtaining information about interface addresses
       * from sysctl and the routing socket.
       */
      struct ifa_msghdr {
              u_short        ifam_msglen __align64;
                                      /* to skip over non-understood messages */
              u_char        ifam_version;        /* future binary compatibility */
              u_char        ifam_type;        /* message type */
              u_short        ifam_index;        /* index for associated ifp */
              int        ifam_flags;        /* value of ifa_flags */
              int        ifam_addrs;        /* like rtm_addrs */
              pid_t        ifam_pid;        /* identify sender */
              int        ifam_addrflags;        /* family specific address flags */
              int        ifam_metric;        /* value of ifa_metric */
      };
      
      /*
       * Message format announcing the arrival or departure of a network interface.
       */
      struct if_announcemsghdr {
              u_short        ifan_msglen __align64;
                                      /* to skip over non-understood messages */
              u_char        ifan_version;        /* future binary compatibility */
              u_char        ifan_type;        /* message type */
              u_short        ifan_index;        /* index for associated ifp */
              char        ifan_name[IFNAMSIZ]; /* if name, e.g. "en0" */
              u_short        ifan_what;        /* what type of announcement */
      };
      
      #define        IFAN_ARRIVAL        0        /* interface arrival */
      #define        IFAN_DEPARTURE        1        /* interface departure */
      
      #undef __align64
      
      /*
       * Interface request structure used for socket
       * ioctl's.  All interface ioctl's must have parameter
       * definitions which begin with ifr_name.  The
       * remainder may be interface specific.
       */
      struct        ifreq {
              char        ifr_name[IFNAMSIZ];                /* if name, e.g. "en0" */
              union {
                      struct        sockaddr ifru_addr;
                      struct        sockaddr ifru_dstaddr;
                      struct        sockaddr ifru_broadaddr;
                      struct        sockaddr_storage ifru_space;
                      short        ifru_flags;
                      int        ifru_addrflags;
                      int        ifru_metric;
                      int        ifru_mtu;
                      int        ifru_dlt;
                      u_int        ifru_value;
                      void *        ifru_data;
                      struct {
                              uint32_t        b_buflen;
                              void                *b_buf;
                      } ifru_b;
              } ifr_ifru;
      #define        ifr_addr        ifr_ifru.ifru_addr        /* address */
      #define        ifr_dstaddr        ifr_ifru.ifru_dstaddr        /* other end of p-to-p link */
      #define        ifr_broadaddr        ifr_ifru.ifru_broadaddr        /* broadcast address */
      #define        ifr_space        ifr_ifru.ifru_space        /* sockaddr_storage */
      #define        ifr_flags        ifr_ifru.ifru_flags        /* flags */
      #define        ifr_addrflags        ifr_ifru.ifru_addrflags        /* addr flags */
      #define        ifr_metric        ifr_ifru.ifru_metric        /* metric */
      #define        ifr_mtu                ifr_ifru.ifru_mtu        /* mtu */
      #define        ifr_dlt                ifr_ifru.ifru_dlt        /* data link type (DLT_*) */
      #define        ifr_value        ifr_ifru.ifru_value        /* generic value */
      #define        ifr_media        ifr_ifru.ifru_metric        /* media options (overload) */
      #define        ifr_data        ifr_ifru.ifru_data        /* for use by interface
                                                       * XXX deprecated
                                                       */
      #define        ifr_buf                ifr_ifru.ifru_b.b_buf        /* new interface ioctls */
      #define        ifr_buflen        ifr_ifru.ifru_b.b_buflen
      #define        ifr_index        ifr_ifru.ifru_value        /* interface index, BSD */
      #define        ifr_ifindex        ifr_index                /* interface index, linux */
      };
      
      #ifdef _KERNEL
      #define        ifreq_setdstaddr        ifreq_setaddr
      #define        ifreq_setbroadaddr        ifreq_setaddr
      #define        ifreq_getdstaddr        ifreq_getaddr
      #define        ifreq_getbroadaddr        ifreq_getaddr
      
      static __inline const struct sockaddr *
      /*ARGSUSED*/
      ifreq_getaddr(u_long cmd, const struct ifreq *ifr)
      {
    5         return &ifr->ifr_addr;
      }
      #endif /* _KERNEL */
      
      struct ifcapreq {
              char                ifcr_name[IFNAMSIZ];        /* if name, e.g. "en0" */
              uint64_t        ifcr_capabilities;        /* supported capabiliites */
              uint64_t        ifcr_capenable;                /* capabilities enabled */
      };
      
      struct ifaliasreq {
              char        ifra_name[IFNAMSIZ];                /* if name, e.g. "en0" */
              struct        sockaddr ifra_addr;
              struct        sockaddr ifra_dstaddr;
      #define        ifra_broadaddr        ifra_dstaddr
              struct        sockaddr ifra_mask;
      };
      
      struct ifdatareq {
              char        ifdr_name[IFNAMSIZ];                /* if name, e.g. "en0" */
              struct        if_data ifdr_data;
      };
      
      struct ifmediareq {
              char        ifm_name[IFNAMSIZ];        /* if name, e.g. "en0" */
              int        ifm_current;                /* IFMWD: current media options */
              int        ifm_mask;                /* IFMWD: don't care mask */
              int        ifm_status;                /* media status */
              int        ifm_active;                /* IFMWD: active options */
              int        ifm_count;                /* # entries in ifm_ulist
                                                 array */
              int        *ifm_ulist;                /* array of ifmedia word */
      };
      
      
      struct  ifdrv {
              char                ifd_name[IFNAMSIZ];        /* if name, e.g. "en0" */
              unsigned long        ifd_cmd;
              size_t                ifd_len;
              void                *ifd_data;
      };
      #define IFLINKSTR_QUERYLEN        0x01
      #define IFLINKSTR_UNSET                0x02
      
      /*
       * Structure used in SIOCGIFCONF request.
       * Used to retrieve interface configuration
       * for machine (useful for programs which
       * must know all networks accessible).
       */
      struct        ifconf {
              int        ifc_len;                /* size of associated buffer */
              union {
                      void *        ifcu_buf;
                      struct        ifreq *ifcu_req;
              } ifc_ifcu;
      #define        ifc_buf        ifc_ifcu.ifcu_buf        /* buffer address */
      #define        ifc_req        ifc_ifcu.ifcu_req        /* array of structures returned */
      };
      
      /*
       * Structure for SIOC[AGD]LIFADDR
       */
      struct if_laddrreq {
              char iflr_name[IFNAMSIZ];
              unsigned int flags;
      #define IFLR_PREFIX        0x8000        /* in: prefix given  out: kernel fills id */
      #define IFLR_ACTIVE        0x4000        /* in/out: link-layer address activation */
      #define IFLR_FACTORY        0x2000        /* in/out: factory link-layer address */
              unsigned int prefixlen;                /* in/out */
              struct sockaddr_storage addr;        /* in/out */
              struct sockaddr_storage dstaddr; /* out */
      };
      
      /*
       * Structure for SIOC[SG]IFADDRPREF
       */
      struct if_addrprefreq {
              char                        ifap_name[IFNAMSIZ];
              int16_t                        ifap_preference;        /* in/out */
              struct sockaddr_storage        ifap_addr;                /* in/out */
      };
      
      #include <net/if_arp.h>
      
      #endif /* _NETBSD_SOURCE */
      
      #ifdef _KERNEL
      #ifdef ALTQ
      #define IFQ_ENQUEUE(ifq, m, err)                                        \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              if (ALTQ_IS_ENABLED(ifq))                                        \
                      ALTQ_ENQUEUE((ifq), (m), (err));                        \
              else {                                                                \
                      if (IF_QFULL(ifq)) {                                        \
                              m_freem(m);                                        \
                              (err) = ENOBUFS;                                \
                      } else {                                                \
                              IF_ENQUEUE((ifq), (m));                                \
                              (err) = 0;                                        \
                      }                                                        \
              }                                                                \
              if ((err))                                                        \
                      (ifq)->ifq_drops++;                                        \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define IFQ_DEQUEUE(ifq, m)                                                \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              if (TBR_IS_ENABLED(ifq))                                        \
                      (m) = tbr_dequeue((ifq), ALTDQ_REMOVE);                        \
              else if (ALTQ_IS_ENABLED(ifq))                                        \
                      ALTQ_DEQUEUE((ifq), (m));                                \
              else                                                                \
                      IF_DEQUEUE((ifq), (m));                                        \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_POLL(ifq, m)                                                \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              if (TBR_IS_ENABLED(ifq))                                        \
                      (m) = tbr_dequeue((ifq), ALTDQ_POLL);                        \
              else if (ALTQ_IS_ENABLED(ifq))                                        \
                      ALTQ_POLL((ifq), (m));                                        \
              else                                                                \
                      IF_POLL((ifq), (m));                                        \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_PURGE(ifq)                                                        \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              if (ALTQ_IS_ENABLED(ifq))                                        \
                      ALTQ_PURGE(ifq);                                        \
              else                                                                \
                      IF_PURGE(ifq);                                                \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_SET_READY(ifq)                                                \
      do {                                                                        \
              (ifq)->altq_flags |= ALTQF_READY;                                \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_CLASSIFY(ifq, m, af)                                        \
      do {                                                                        \
              KASSERT(((m)->m_flags & M_PKTHDR) != 0);                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              if (ALTQ_IS_ENABLED(ifq)) {                                        \
                      if (ALTQ_NEEDS_CLASSIFY(ifq))                                \
                              (m)->m_pkthdr.pattr_class = (*(ifq)->altq_classify) \
                                      ((ifq)->altq_clfier, (m), (af));        \
                      (m)->m_pkthdr.pattr_af = (af);                                \
                      (m)->m_pkthdr.pattr_hdr = mtod((m), void *);                \
              }                                                                \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      #else /* ! ALTQ */
      #define        IFQ_ENQUEUE(ifq, m, err)                                        \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              if (IF_QFULL(ifq)) {                                                \
                      m_freem(m);                                                \
                      (err) = ENOBUFS;                                        \
              } else {                                                        \
                      IF_ENQUEUE((ifq), (m));                                        \
                      (err) = 0;                                                \
              }                                                                \
              if (err)                                                        \
                      (ifq)->ifq_drops++;                                        \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_DEQUEUE(ifq, m)                                                \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              IF_DEQUEUE((ifq), (m));                                                \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_POLL(ifq, m)                                                \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              IF_POLL((ifq), (m));                                                \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_PURGE(ifq)                                                        \
      do {                                                                        \
              mutex_enter((ifq)->ifq_lock);                                        \
              IF_PURGE(ifq);                                                        \
              mutex_exit((ifq)->ifq_lock);                                        \
      } while (/*CONSTCOND*/ 0)
      
      #define        IFQ_SET_READY(ifq)        /* nothing */
      
      #define        IFQ_CLASSIFY(ifq, m, af) /* nothing */
      
      #endif /* ALTQ */
      
      #define IFQ_LOCK_INIT(ifq)        (ifq)->ifq_lock =                        \
                  mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET)
      #define IFQ_LOCK_DESTROY(ifq)        mutex_obj_free((ifq)->ifq_lock)
      #define IFQ_LOCK(ifq)                mutex_enter((ifq)->ifq_lock)
      #define IFQ_UNLOCK(ifq)                mutex_exit((ifq)->ifq_lock)
      
      #define        IFQ_IS_EMPTY(ifq)                IF_IS_EMPTY(ifq)
      #define        IFQ_INC_LEN(ifq)                ((ifq)->ifq_len++)
      #define        IFQ_DEC_LEN(ifq)                (--(ifq)->ifq_len)
      #define        IFQ_INC_DROPS(ifq)                ((ifq)->ifq_drops++)
      #define        IFQ_SET_MAXLEN(ifq, len)        ((ifq)->ifq_maxlen = (len))
      
      #define        IFQ_ENQUEUE_ISR(ifq, m, isr)                                        \
      do {                                                                        \
              IFQ_LOCK(inq);                                                        \
              if (IF_QFULL(inq)) {                                                \
                      IF_DROP(inq);                                                \
                      IFQ_UNLOCK(inq);                                        \
                      m_freem(m);                                                \
              } else {                                                        \
                      IF_ENQUEUE(inq, m);                                        \
                      IFQ_UNLOCK(inq);                                        \
                      schednetisr(isr);                                        \
              }                                                                \
      } while (/*CONSTCOND*/ 0)
      
      #include <sys/mallocvar.h>
      MALLOC_DECLARE(M_IFADDR);
      MALLOC_DECLARE(M_IFMADDR);
      
      int ifreq_setaddr(u_long, struct ifreq *, const struct sockaddr *);
      
      struct ifnet *if_alloc(u_char);
      void if_free(struct ifnet *);
      void if_initname(struct ifnet *, const char *, int);
      struct ifaddr *if_dl_create(const struct ifnet *, const struct sockaddr_dl **);
      void if_activate_sadl(struct ifnet *, struct ifaddr *,
          const struct sockaddr_dl *);
      void        if_set_sadl(struct ifnet *, const void *, u_char, bool);
      void        if_alloc_sadl(struct ifnet *);
      void        if_free_sadl(struct ifnet *, int);
      int        if_initialize(struct ifnet *);
      void        if_register(struct ifnet *);
      int        if_attach(struct ifnet *); /* Deprecated. Use if_initialize and if_register */
      void        if_attachdomain(void);
      void        if_deactivate(struct ifnet *);
      bool        if_is_deactivated(const struct ifnet *);
      void        if_export_if_data(struct ifnet *, struct if_data *, bool);
      void        if_purgeaddrs(struct ifnet *, int, void (*)(struct ifaddr *));
      void        if_detach(struct ifnet *);
      void        if_down(struct ifnet *);
      void        if_down_locked(struct ifnet *);
      void        if_link_state_change(struct ifnet *, int);
      void        if_domain_link_state_change(struct ifnet *, int);
      void        if_up(struct ifnet *);
      void        ifinit(void);
      void        ifinit1(void);
      void        ifinit_post(void);
      int        ifaddrpref_ioctl(struct socket *, u_long, void *, struct ifnet *);
      extern int (*ifioctl)(struct socket *, u_long, void *, struct lwp *);
      int        ifioctl_common(struct ifnet *, u_long, void *);
      int        ifpromisc(struct ifnet *, int);
      int        ifpromisc_locked(struct ifnet *, int);
      int        if_addr_init(ifnet_t *, struct ifaddr *, bool);
      int        if_do_dad(struct ifnet *);
      int        if_mcast_op(ifnet_t *, const unsigned long, const struct sockaddr *);
      int        if_flags_set(struct ifnet *, const u_short);
      int        if_clone_list(int, char *, int *);
      
      struct        ifnet *ifunit(const char *);
      struct        ifnet *if_get(const char *, struct psref *);
      ifnet_t *if_byindex(u_int);
      ifnet_t *_if_byindex(u_int);
      ifnet_t *if_get_byindex(u_int, struct psref *);
      ifnet_t *if_get_bylla(const void *, unsigned char, struct psref *);
      void        if_put(const struct ifnet *, struct psref *);
      void        if_acquire(struct ifnet *, struct psref *);
      #define        if_release        if_put
      
      int if_tunnel_check_nesting(struct ifnet *, struct mbuf *, int);
      percpu_t *if_tunnel_alloc_ro_percpu(void);
      void if_tunnel_free_ro_percpu(percpu_t *);
      void if_tunnel_ro_percpu_rtcache_free(percpu_t *);
      
      struct tunnel_ro {
              struct route *tr_ro;
              kmutex_t *tr_lock;
      };
      
      static inline void
      if_tunnel_get_ro(percpu_t *ro_percpu, struct route **ro, kmutex_t **lock)
      {
              struct tunnel_ro *tro;
      
              tro = percpu_getref(ro_percpu);
              *ro = tro->tr_ro;
              *lock = tro->tr_lock;
              mutex_enter(*lock);
      }
      
      static inline void
      if_tunnel_put_ro(percpu_t *ro_percpu, kmutex_t *lock)
      {
      
              mutex_exit(lock);
              percpu_putref(ro_percpu);
      }
      
      static __inline if_index_t
      if_get_index(const struct ifnet *ifp)
      {
      
    6         return ifp != NULL ? ifp->if_index : 0;
      }
      
      bool        if_held(struct ifnet *);
      
      void        if_input(struct ifnet *, struct mbuf *);
      
      struct if_percpuq *
              if_percpuq_create(struct ifnet *);
      void        if_percpuq_destroy(struct if_percpuq *);
      void
              if_percpuq_enqueue(struct if_percpuq *, struct mbuf *);
      
      void        if_deferred_start_init(struct ifnet *, void (*)(struct ifnet *));
      void        if_schedule_deferred_start(struct ifnet *);
      
      void ifa_insert(struct ifnet *, struct ifaddr *);
      void ifa_remove(struct ifnet *, struct ifaddr *);
      
      void        ifa_psref_init(struct ifaddr *);
      void        ifa_acquire(struct ifaddr *, struct psref *);
      void        ifa_release(struct ifaddr *, struct psref *);
      bool        ifa_held(struct ifaddr *);
      bool        ifa_is_destroying(struct ifaddr *);
      
      void        ifaref(struct ifaddr *);
      void        ifafree(struct ifaddr *);
      
      struct        ifaddr *ifa_ifwithaddr(const struct sockaddr *);
      struct        ifaddr *ifa_ifwithaddr_psref(const struct sockaddr *, struct psref *);
      struct        ifaddr *ifa_ifwithaf(int);
      struct        ifaddr *ifa_ifwithdstaddr(const struct sockaddr *);
      struct        ifaddr *ifa_ifwithdstaddr_psref(const struct sockaddr *,
                  struct psref *);
      struct        ifaddr *ifa_ifwithnet(const struct sockaddr *);
      struct        ifaddr *ifa_ifwithnet_psref(const struct sockaddr *, struct psref *);
      struct        ifaddr *ifa_ifwithladdr(const struct sockaddr *);
      struct        ifaddr *ifa_ifwithladdr_psref(const struct sockaddr *, struct psref *);
      struct        ifaddr *ifaof_ifpforaddr(const struct sockaddr *, struct ifnet *);
      struct        ifaddr *ifaof_ifpforaddr_psref(const struct sockaddr *, struct ifnet *,
                  struct psref *);
      void        link_rtrequest(int, struct rtentry *, const struct rt_addrinfo *);
      void        p2p_rtrequest(int, struct rtentry *, const struct rt_addrinfo *);
      
      void        if_clone_attach(struct if_clone *);
      void        if_clone_detach(struct if_clone *);
      
      int        if_transmit_lock(struct ifnet *, struct mbuf *);
      
      int        ifq_enqueue(struct ifnet *, struct mbuf *);
      int        ifq_enqueue2(struct ifnet *, struct ifqueue *, struct mbuf *);
      
      int        loioctl(struct ifnet *, u_long, void *);
      void        loopattach(int);
      void        loopinit(void);
      int        looutput(struct ifnet *,
                 struct mbuf *, const struct sockaddr *, const struct rtentry *);
      
      /*
       * These are exported because they're an easy way to tell if
       * an interface is going away without having to burn a flag.
       */
      int        if_nulloutput(struct ifnet *, struct mbuf *,
                  const struct sockaddr *, const struct rtentry *);
      void        if_nullinput(struct ifnet *, struct mbuf *);
      void        if_nullstart(struct ifnet *);
      int        if_nulltransmit(struct ifnet *, struct mbuf *);
      int        if_nullioctl(struct ifnet *, u_long, void *);
      int        if_nullinit(struct ifnet *);
      void        if_nullstop(struct ifnet *, int);
      void        if_nullslowtimo(struct ifnet *);
      #define        if_nullwatchdog        if_nullslowtimo
      void        if_nulldrain(struct ifnet *);
      #else
      struct if_nameindex {
              unsigned int        if_index;        /* 1, 2, ... */
              char                *if_name;        /* null terminated name: "le0", ... */
      };
      
      #include <sys/cdefs.h>
      __BEGIN_DECLS
      unsigned int if_nametoindex(const char *);
      char *        if_indextoname(unsigned int, char *);
      struct        if_nameindex * if_nameindex(void);
      void        if_freenameindex(struct if_nameindex *);
      __END_DECLS
      #endif /* _KERNEL */ /* XXX really ALTQ? */
      
      #ifdef _KERNEL
      
      #define        IFADDR_FIRST(__ifp)                TAILQ_FIRST(&(__ifp)->if_addrlist)
      #define        IFADDR_NEXT(__ifa)                TAILQ_NEXT((__ifa), ifa_list)
      #define        IFADDR_FOREACH(__ifa, __ifp)        TAILQ_FOREACH(__ifa, \
                                                  &(__ifp)->if_addrlist, ifa_list)
      #define        IFADDR_FOREACH_SAFE(__ifa, __ifp, __nifa) \
                                                  TAILQ_FOREACH_SAFE(__ifa, \
                                                  &(__ifp)->if_addrlist, ifa_list, __nifa)
      #define        IFADDR_EMPTY(__ifp)                TAILQ_EMPTY(&(__ifp)->if_addrlist)
      
      #define IFADDR_ENTRY_INIT(__ifa)                                        \
              PSLIST_ENTRY_INIT((__ifa), ifa_pslist_entry)
      #define IFADDR_ENTRY_DESTROY(__ifa)                                        \
              PSLIST_ENTRY_DESTROY((__ifa), ifa_pslist_entry)
      #define IFADDR_READER_EMPTY(__ifp)                                        \
              (PSLIST_READER_FIRST(&(__ifp)->if_addr_pslist, struct ifaddr,        \
                                   ifa_pslist_entry) == NULL)
      #define IFADDR_READER_FIRST(__ifp)                                        \
              PSLIST_READER_FIRST(&(__ifp)->if_addr_pslist, struct ifaddr,        \
                                  ifa_pslist_entry)
      #define IFADDR_READER_NEXT(__ifa)                                        \
              PSLIST_READER_NEXT((__ifa), struct ifaddr, ifa_pslist_entry)
      #define IFADDR_READER_FOREACH(__ifa, __ifp)                                \
              PSLIST_READER_FOREACH((__ifa), &(__ifp)->if_addr_pslist, struct ifaddr,\
                                    ifa_pslist_entry)
      #define IFADDR_WRITER_INSERT_HEAD(__ifp, __ifa)                                \
              PSLIST_WRITER_INSERT_HEAD(&(__ifp)->if_addr_pslist, (__ifa),        \
                                        ifa_pslist_entry)
      #define IFADDR_WRITER_REMOVE(__ifa)                                        \
              PSLIST_WRITER_REMOVE((__ifa), ifa_pslist_entry)
      #define IFADDR_WRITER_FOREACH(__ifa, __ifp)                                \
              PSLIST_WRITER_FOREACH((__ifa), &(__ifp)->if_addr_pslist, struct ifaddr,\
                                    ifa_pslist_entry)
      #define IFADDR_WRITER_NEXT(__ifp)                                        \
              PSLIST_WRITER_NEXT((__ifp), struct ifaddr, ifa_pslist_entry)
      #define IFADDR_WRITER_INSERT_AFTER(__ifp, __new)                        \
              PSLIST_WRITER_INSERT_AFTER((__ifp), (__new), ifa_pslist_entry)
      #define IFADDR_WRITER_EMPTY(__ifp)                                        \
              (PSLIST_WRITER_FIRST(&(__ifp)->if_addr_pslist, struct ifaddr,        \
                                   ifa_pslist_entry) == NULL)
      #define IFADDR_WRITER_INSERT_TAIL(__ifp, __new)                                \
              do {                                                                \
                      if (IFADDR_WRITER_EMPTY(__ifp)) {                        \
                              IFADDR_WRITER_INSERT_HEAD((__ifp), (__new));        \
                      } else {                                                \
                              struct ifaddr *__ifa;                                \
                              IFADDR_WRITER_FOREACH(__ifa, (__ifp)) {                \
                                      if (IFADDR_WRITER_NEXT(__ifa) == NULL) {\
                                              IFADDR_WRITER_INSERT_AFTER(__ifa,\
                                                  (__new));                        \
                                              break;                                \
                                      }                                        \
                              }                                                \
                      }                                                        \
              } while (0)
      
      #define        IFNET_GLOBAL_LOCK()                        mutex_enter(&ifnet_mtx)
      #define        IFNET_GLOBAL_UNLOCK()                        mutex_exit(&ifnet_mtx)
      #define        IFNET_GLOBAL_LOCKED()                        mutex_owned(&ifnet_mtx)
      
      #define IFNET_READER_EMPTY() \
              (PSLIST_READER_FIRST(&ifnet_pslist, struct ifnet, if_pslist_entry) == NULL)
      #define IFNET_READER_FIRST() \
              PSLIST_READER_FIRST(&ifnet_pslist, struct ifnet, if_pslist_entry)
      #define IFNET_READER_NEXT(__ifp) \
              PSLIST_READER_NEXT((__ifp), struct ifnet, if_pslist_entry)
      #define IFNET_READER_FOREACH(__ifp) \
              PSLIST_READER_FOREACH((__ifp), &ifnet_pslist, struct ifnet, \
                                    if_pslist_entry)
      #define IFNET_WRITER_INSERT_HEAD(__ifp) \
              PSLIST_WRITER_INSERT_HEAD(&ifnet_pslist, (__ifp), if_pslist_entry)
      #define IFNET_WRITER_REMOVE(__ifp) \
              PSLIST_WRITER_REMOVE((__ifp), if_pslist_entry)
      #define IFNET_WRITER_FOREACH(__ifp) \
              PSLIST_WRITER_FOREACH((__ifp), &ifnet_pslist, struct ifnet, \
                                    if_pslist_entry)
      #define IFNET_WRITER_NEXT(__ifp) \
              PSLIST_WRITER_NEXT((__ifp), struct ifnet, if_pslist_entry)
      #define IFNET_WRITER_INSERT_AFTER(__ifp, __new) \
              PSLIST_WRITER_INSERT_AFTER((__ifp), (__new), if_pslist_entry)
      #define IFNET_WRITER_EMPTY() \
              (PSLIST_WRITER_FIRST(&ifnet_pslist, struct ifnet, if_pslist_entry) == NULL)
      #define IFNET_WRITER_INSERT_TAIL(__new)                                        \
              do {                                                                \
                      if (IFNET_WRITER_EMPTY()) {                                \
                              IFNET_WRITER_INSERT_HEAD(__new);                \
                      } else {                                                \
                              struct ifnet *__ifp;                                \
                              IFNET_WRITER_FOREACH(__ifp) {                        \
                                      if (IFNET_WRITER_NEXT(__ifp) == NULL) {        \
                                              IFNET_WRITER_INSERT_AFTER(__ifp,\
                                                  (__new));                        \
                                              break;                                \
                                      }                                        \
                              }                                                \
                      }                                                        \
              } while (0)
      
      #define IFNET_LOCK(ifp)                mutex_enter((ifp)->if_ioctl_lock)
      #define IFNET_UNLOCK(ifp)        mutex_exit((ifp)->if_ioctl_lock)
      #define IFNET_LOCKED(ifp)        mutex_owned((ifp)->if_ioctl_lock)
      
      #define IFNET_ASSERT_UNLOCKED(ifp)        \
              KDASSERT(mutex_ownable((ifp)->if_ioctl_lock))
      
      extern struct pslist_head ifnet_pslist;
      extern kmutex_t ifnet_mtx;
      
      extern struct ifnet *lo0ifp;
      
      /*
       * ifq sysctl support
       */
      int        sysctl_ifq(int *name, u_int namelen, void *oldp,
                             size_t *oldlenp, void *newp, size_t newlen,
                             struct ifqueue *ifq);
      /* symbolic names for terminal (per-protocol) CTL_IFQ_ nodes */
      #define IFQCTL_LEN        1
      #define IFQCTL_MAXLEN        2
      #define IFQCTL_PEAK        3
      #define IFQCTL_DROPS        4
      
      /* 
       * Hook for if_vlan - needed by if_agr
       */
      MODULE_HOOK(if_vlan_vlan_input_hook, void, (struct ifnet *, struct mbuf *));
      
      #endif /* _KERNEL */
      
      #endif /* !_NET_IF_H_ */
      /*        $NetBSD: utoppy.c,v 1.35 2020/03/14 02:35:34 christos Exp $        */
      
      /*-
       * Copyright (c) 2006 The NetBSD Foundation, Inc.
       * All rights reserved.
       *
       * This code is derived from software contributed to The NetBSD Foundation
       * by Steve C. Woodford.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       *
       * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
       * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
       * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
       * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      #include <sys/cdefs.h>
      __KERNEL_RCSID(0, "$NetBSD: utoppy.c,v 1.35 2020/03/14 02:35:34 christos Exp $");
      
      #ifdef _KERNEL_OPT
      #include "opt_usb.h"
      #endif
      
      #include <sys/param.h>
      #include <sys/systm.h>
      #include <sys/proc.h>
      #include <sys/kernel.h>
      #include <sys/fcntl.h>
      #include <sys/device.h>
      #include <sys/ioctl.h>
      #include <sys/uio.h>
      #include <sys/conf.h>
      #include <sys/vnode.h>
      #include <sys/bus.h>
      
      #include <lib/libkern/crc16.h>
      
      #include <dev/usb/usb.h>
      #include <dev/usb/usbdi.h>
      #include <dev/usb/usbdivar.h>
      #include <dev/usb/usbdi_util.h>
      #include <dev/usb/usbdevs.h>
      #include <dev/usb/usb_quirks.h>
      #include <dev/usb/utoppy.h>
      
      #include "ioconf.h"
      
      #undef UTOPPY_DEBUG
      #ifdef UTOPPY_DEBUG
      #define        UTOPPY_DBG_OPEN                0x0001
      #define        UTOPPY_DBG_CLOSE        0x0002
      #define        UTOPPY_DBG_READ                0x0004
      #define        UTOPPY_DBG_WRITE        0x0008
      #define        UTOPPY_DBG_IOCTL        0x0010
      #define        UTOPPY_DBG_SEND_PACKET        0x0020
      #define        UTOPPY_DBG_RECV_PACKET        0x0040
      #define        UTOPPY_DBG_ADDPATH        0x0080
      #define        UTOPPY_DBG_READDIR        0x0100
      #define        UTOPPY_DBG_DUMP                0x0200
      #define        DPRINTF(l, m)                                \
                      do {                                \
                              if (utoppy_debug & l)        \
                                      printf m;        \
                      } while (/*CONSTCOND*/0)
      static int utoppy_debug = 0;
      static void utoppy_dump_packet(const void *, size_t);
      #define        DDUMP_PACKET(p, l)                                        \
                      do {                                                \
                              if (utoppy_debug & UTOPPY_DBG_DUMP)        \
                                      utoppy_dump_packet((p), (l));        \
                      } while (/*CONSTCOND*/0)
      #else
      #define        DPRINTF(l, m)                /* nothing */
      #define        DDUMP_PACKET(p, l)        /* nothing */
      #endif
      
      
      #define        UTOPPY_CONFIG_NO        1
      #define        UTOPPY_NUMENDPOINTS        2
      
      #define        UTOPPY_BSIZE                0xffff
      #define        UTOPPY_FRAG_SIZE        0x1000
      #define        UTOPPY_HEADER_SIZE        8
      #define        UTOPPY_SHORT_TIMEOUT        (500)                /* 0.5 seconds */
      #define        UTOPPY_LONG_TIMEOUT        (10 * 1000)        /* 10 seconds */
      
      /* Protocol Commands and Responses */
      #define        UTOPPY_RESP_ERROR                0x0001
      #define        UTOPPY_CMD_ACK                        0x0002
      #define         UTOPPY_RESP_SUCCESS                UTOPPY_CMD_ACK
      #define        UTOPPY_CMD_CANCEL                0x0003
      #define        UTOPPY_CMD_READY                0x0100
      #define        UTOPPY_CMD_RESET                0x0101
      #define        UTOPPY_CMD_TURBO                0x0102
      #define        UTOPPY_CMD_STATS                0x1000
      #define  UTOPPY_RESP_STATS_DATA                0x1001
      #define        UTOPPY_CMD_READDIR                0x1002
      #define         UTOPPY_RESP_READDIR_DATA        0x1003
      #define         UTOPPY_RESP_READDIR_END        0x1004
      #define        UTOPPY_CMD_DELETE                0x1005
      #define        UTOPPY_CMD_RENAME                0x1006
      #define        UTOPPY_CMD_MKDIR                0x1007
      #define        UTOPPY_CMD_FILE                        0x1008
      #define  UTOPPY_FILE_WRITE                0
      #define  UTOPPY_FILE_READ                1
      #define         UTOPPY_RESP_FILE_HEADER        0x1009
      #define         UTOPPY_RESP_FILE_DATA                0x100a
      #define         UTOPPY_RESP_FILE_END                0x100b
      
      enum utoppy_state {
              UTOPPY_STATE_CLOSED,
              UTOPPY_STATE_OPENING,
              UTOPPY_STATE_IDLE,
              UTOPPY_STATE_READDIR,
              UTOPPY_STATE_READFILE,
              UTOPPY_STATE_WRITEFILE
      };
      
      struct utoppy_softc {
              device_t sc_dev;
              struct usbd_device *sc_udev;        /* device */
              struct usbd_interface *sc_iface;        /* interface */
              int sc_dying;
              int sc_refcnt;
      
              enum utoppy_state sc_state;
              u_int sc_turbo_mode;
      
              int sc_out;
              struct usbd_pipe *sc_out_pipe;        /* bulk out pipe */
              struct usbd_xfer *sc_out_xfer;
              void *sc_out_buf;
              void *sc_out_data;
              uint64_t sc_wr_offset;
              uint64_t sc_wr_size;
      
              int sc_in;
              struct usbd_pipe *sc_in_pipe;        /* bulk in pipe */
              struct usbd_xfer *sc_in_xfer;
              void *sc_in_buf;
              void *sc_in_data;
              size_t sc_in_len;
              u_int sc_in_offset;
      };
      
      struct utoppy_header {
              uint16_t h_len;
              uint16_t h_crc;
              uint16_t h_cmd2;
              uint16_t h_cmd;
              uint8_t h_data[0];
      };
      #define        UTOPPY_OUT_INIT(sc)                                        \
              do {                                                        \
                      struct utoppy_header *_h = sc->sc_out_data;        \
                      _h->h_len = 0;                                        \
              } while (/*CONSTCOND*/0)
      
      #define        UTOPPY_MJD_1970 40587u        /* MJD value for Jan 1 00:00:00 1970 */
      
      #define        UTOPPY_FTYPE_DIR        1
      #define        UTOPPY_FTYPE_FILE        2
      
      #define        UTOPPY_IN_DATA(sc)        \
       ((void*)&(((uint8_t*)(sc)->sc_in_data)[(sc)->sc_in_offset+UTOPPY_HEADER_SIZE]))
      
      static dev_type_open(utoppyopen);
      static dev_type_close(utoppyclose);
      static dev_type_read(utoppyread);
      static dev_type_write(utoppywrite);
      static dev_type_ioctl(utoppyioctl);
      
      const struct cdevsw utoppy_cdevsw = {
              .d_open = utoppyopen,
              .d_close = utoppyclose,
              .d_read = utoppyread,
              .d_write = utoppywrite,
              .d_ioctl = utoppyioctl,
              .d_stop = nostop,
              .d_tty = notty,
              .d_poll = nopoll,
              .d_mmap = nommap,
              .d_kqfilter = nokqfilter,
              .d_discard = nodiscard,
              .d_flag = D_OTHER
      };
      
      #define        UTOPPYUNIT(n)        (minor(n))
      
      static int        utoppy_match(device_t, cfdata_t, void *);
      static void        utoppy_attach(device_t, device_t, void *);
      static int        utoppy_detach(device_t, int);
      static int        utoppy_activate(device_t, enum devact);
      
      CFATTACH_DECL_NEW(utoppy, sizeof(struct utoppy_softc), utoppy_match,
          utoppy_attach, utoppy_detach, utoppy_activate);
      
      static int
      utoppy_match(device_t parent, cfdata_t match, void *aux)
      {
              struct usb_attach_arg *uaa = aux;
      
    4         if (uaa->uaa_vendor == USB_VENDOR_TOPFIELD &&
                  uaa->uaa_product == USB_PRODUCT_TOPFIELD_TF5000PVR)
                      return UMATCH_VENDOR_PRODUCT;
      
    4         return UMATCH_NONE;
      }
      
      static void
      utoppy_attach(device_t parent, device_t self, void *aux)
      {
              struct utoppy_softc *sc = device_private(self);
              struct usb_attach_arg *uaa = aux;
              struct usbd_device *dev = uaa->uaa_device;
              struct usbd_interface *iface;
              usb_endpoint_descriptor_t *ed;
              char *devinfop;
              uint8_t epcount;
              int i;
      
              sc->sc_dev = self;
      
              aprint_naive("\n");
              aprint_normal("\n");
      
              devinfop = usbd_devinfo_alloc(dev, 0);
              aprint_normal_dev(self, "%s\n", devinfop);
              usbd_devinfo_free(devinfop);
      
              sc->sc_dying = 0;
              sc->sc_refcnt = 0;
              sc->sc_udev = dev;
      
              if (usbd_set_config_index(dev, 0, 1)
                  || usbd_device2interface_handle(dev, 0, &iface)) {
                      aprint_error_dev(self, "Configuration failed\n");
                      return;
              }
      
              epcount = 0;
              (void) usbd_endpoint_count(iface, &epcount);
              if (epcount != UTOPPY_NUMENDPOINTS) {
                      aprint_error_dev(self, "Expected %d endpoints, got %d\n",
                          UTOPPY_NUMENDPOINTS, epcount);
                      return;
              }
      
              sc->sc_in = -1;
              sc->sc_out = -1;
      
              for (i = 0; i < epcount; i++) {
                      ed = usbd_interface2endpoint_descriptor(iface, i);
                      if (ed == NULL) {
                              aprint_error_dev(self, "couldn't get ep %d\n", i);
                              return;
                      }
      
                      if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
                          UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
                              sc->sc_in = ed->bEndpointAddress;
                      } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
                                 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
                              sc->sc_out = ed->bEndpointAddress;
                      }
              }
      
              if (sc->sc_out == -1 || sc->sc_in == -1) {
                      aprint_error_dev(self,
                          "could not find bulk in/out endpoints\n");
                      sc->sc_dying = 1;
                      return;
              }
      
              sc->sc_iface = iface;
              sc->sc_udev = dev;
      
              sc->sc_out_pipe = NULL;
              sc->sc_in_pipe = NULL;
      
              if (usbd_open_pipe(sc->sc_iface, sc->sc_out, 0, &sc->sc_out_pipe)) {
                      DPRINTF(UTOPPY_DBG_OPEN, ("%s: usbd_open_pipe(OUT) failed\n",
                          device_xname(sc->sc_dev)));
                      aprint_error_dev(self, "could not open OUT pipe\n");
                      sc->sc_dying = 1;
                      return;
              }
      
              if (usbd_open_pipe(sc->sc_iface, sc->sc_in, 0, &sc->sc_in_pipe)) {
                      DPRINTF(UTOPPY_DBG_OPEN, ("%s: usbd_open_pipe(IN) failed\n",
                          device_xname(sc->sc_dev)));
                      aprint_error_dev(self, "could not open IN pipe\n");
      
                      usbd_close_pipe(sc->sc_out_pipe);
                      sc->sc_out_pipe = NULL;
                      sc->sc_dying = 1;
                      return;
              }
      
              int error;
              error = usbd_create_xfer(sc->sc_out_pipe, UTOPPY_FRAG_SIZE, 0, 0,
                  &sc->sc_out_xfer);
              if (error) {
                      aprint_error_dev(self, "could not allocate bulk out xfer\n");
                      goto fail0;
              }
      
              error = usbd_create_xfer(sc->sc_in_pipe, UTOPPY_FRAG_SIZE,
                  0, 0, &sc->sc_in_xfer);
              if (error) {
                      aprint_error_dev(self, "could not allocate bulk in xfer\n");
                      goto fail1;
              }
      
              sc->sc_out_buf = usbd_get_buffer(sc->sc_out_xfer);
              sc->sc_in_buf = usbd_get_buffer(sc->sc_in_xfer);
      
              usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
      
              return;
      
       fail1:        usbd_destroy_xfer(sc->sc_out_xfer);
              sc->sc_out_xfer = NULL;
      
       fail0:        sc->sc_dying = 1;
              return;
      }
      
      static int
      utoppy_activate(device_t self, enum devact act)
      {
              struct utoppy_softc *sc = device_private(self);
      
              switch (act) {
              case DVACT_DEACTIVATE:
                      sc->sc_dying = 1;
                      return 0;
              default:
                      return EOPNOTSUPP;
              }
      }
      
      static int
      utoppy_detach(device_t self, int flags)
      {
              struct utoppy_softc *sc = device_private(self);
              int maj, mn;
              int s;
      
              sc->sc_dying = 1;
              if (sc->sc_out_pipe != NULL)
                      usbd_abort_pipe(sc->sc_out_pipe);
              if (sc->sc_in_pipe != NULL)
                      usbd_abort_pipe(sc->sc_in_pipe);
      
              if (sc->sc_in_xfer != NULL)
                      usbd_destroy_xfer(sc->sc_in_xfer);
              if (sc->sc_out_xfer != NULL)
                      usbd_destroy_xfer(sc->sc_out_xfer);
      
              if (sc->sc_out_pipe != NULL)
                      usbd_close_pipe(sc->sc_out_pipe);
              if (sc->sc_in_pipe != NULL)
                      usbd_close_pipe(sc->sc_in_pipe);
      
              s = splusb();
              if (--sc->sc_refcnt >= 0)
                      usb_detach_waitold(sc->sc_dev);
              splx(s);
      
              /* locate the major number */
              maj = cdevsw_lookup_major(&utoppy_cdevsw);
      
              /* Nuke the vnodes for any open instances (calls close). */
              mn = device_unit(self);
              vdevgone(maj, mn, mn, VCHR);
      
              usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
      
              return 0;
      }
      
      #define        UTOPPY_CRC16(ccrc,b)        crc16_byte((ccrc), (b)) /* from crc16.h */
      
      static const int utoppy_usbdstatus_lookup[] = {
              0,                /* USBD_NORMAL_COMPLETION */
              EINPROGRESS,        /* USBD_IN_PROGRESS */
              EALREADY,        /* USBD_PENDING_REQUESTS */
              EAGAIN,                /* USBD_NOT_STARTED */
              EINVAL,                /* USBD_INVAL */
              ENOMEM,                /* USBD_NOMEM */
              ECONNRESET,        /* USBD_CANCELLED */
              EFAULT,                /* USBD_BAD_ADDRESS */
              EBUSY,                /* USBD_IN_USE */
              EADDRNOTAVAIL,        /* USBD_NO_ADDR */
              ENETDOWN,        /* USBD_SET_ADDR_FAILED */
              EIO,                /* USBD_NO_POWER */
              EMLINK,                /* USBD_TOO_DEEP */
              EIO,                /* USBD_IOERROR */
              ENXIO,                /* USBD_NOT_CONFIGURED */
              ETIMEDOUT,        /* USBD_TIMEOUT */
              EBADMSG,        /* USBD_SHORT_XFER */
              EHOSTDOWN,        /* USBD_STALLED */
              EINTR                /* USBD_INTERRUPTED */
      };
      
      static __inline int
      utoppy_usbd_status2errno(usbd_status err)
      {
      
              if (err >= USBD_ERROR_MAX)
                      return EFAULT;
              return utoppy_usbdstatus_lookup[err];
      }
      
      #ifdef UTOPPY_DEBUG
      static const char *
      utoppy_state_string(enum utoppy_state state)
      {
              const char *str;
      
              switch (state) {
              case UTOPPY_STATE_CLOSED:
                      str = "CLOSED";
                      break;
              case UTOPPY_STATE_OPENING:
                      str = "OPENING";
                      break;
              case UTOPPY_STATE_IDLE:
                      str = "IDLE";
                      break;
              case UTOPPY_STATE_READDIR:
                      str = "READ DIRECTORY";
                      break;
              case UTOPPY_STATE_READFILE:
                      str = "READ FILE";
                      break;
              case UTOPPY_STATE_WRITEFILE:
                      str = "WRITE FILE";
                      break;
              default:
                      str = "INVALID!";
                      break;
              }
      
              return str;
      }
      
      static void
      utoppy_dump_packet(const void *b, size_t len)
      {
              const uint8_t *buf = b, *l;
              uint8_t c;
              size_t i, j;
      
              if (len == 0)
                      return;
      
              len = uimin(len, 256);
      
              printf("00: ");
      
              for (i = 0, l = buf; i < len; i++) {
                      printf("%02x ", *buf++);
      
                      if ((i % 16) == 15) {
                              for (j = 0; j < 16; j++) {
                                      c = *l++;
                                      if (c < ' ' || c > 0x7e)
                                              c = '.';
                                      printf("%c", c);
                              }
      
                              printf("\n");
                              l = buf;
      
                              if ((i + 1) < len)
                                      printf("%02x: ", (u_int)i + 1);
                      }
              }
      
              while ((i++ % 16) != 0)
                      printf("   ");
      
              if (l < buf) {
                      while (l < buf) {
                              c = *l++;
                              if (c < ' ' || c > 0x7e)
                                      c = '.';
                              printf("%c", c);
                      }
      
                      printf("\n");
              }
      }
      #endif
      
      static usbd_status
      utoppy_bulk_transfer(struct usbd_xfer *xfer, struct usbd_pipe *pipe,
          uint16_t flags, uint32_t timeout, void *buf, uint32_t *size)
      {
              usbd_status err;
      
              usbd_setup_xfer(xfer, 0, buf, *size, flags, timeout, NULL);
      
              err = usbd_sync_transfer_sig(xfer);
      
              usbd_get_xfer_status(xfer, NULL, NULL, size, NULL);
              return err;
      }
      
      static int
      utoppy_send_packet(struct utoppy_softc *sc, uint16_t cmd, uint32_t timeout)
      {
              struct utoppy_header *h;
              usbd_status err;
              uint32_t len;
              uint16_t dlen, crc;
              uint8_t *data, *e, t1, t2;
      
              h = sc->sc_out_data;
      
              DPRINTF(UTOPPY_DBG_SEND_PACKET, ("%s: utoppy_send_packet: cmd 0x%04x, "
                  "len %d\n", device_xname(sc->sc_dev), (u_int)cmd, h->h_len));
      
              dlen = h->h_len;
              len = dlen + UTOPPY_HEADER_SIZE;
      
              if (len & 1)
                      len++;
              if ((len % 64) == 0)
                      len += 2;
      
              if (len >= UTOPPY_BSIZE) {
                      DPRINTF(UTOPPY_DBG_SEND_PACKET, ("%s: utoppy_send_packet: "
                          "packet too big (%d)\n", device_xname(sc->sc_dev),
                          (int)len));
                      return EINVAL;
              }
      
              h->h_len = htole16(dlen + UTOPPY_HEADER_SIZE);
              h->h_cmd2 = 0;
              h->h_cmd = htole16(cmd);
      
              /* The command word is part of the CRC */
              crc = UTOPPY_CRC16(0,   0);
              crc = UTOPPY_CRC16(crc, 0);
              crc = UTOPPY_CRC16(crc, cmd >> 8);
              crc = UTOPPY_CRC16(crc, cmd);
      
              /*
               * If there is data following the header, calculate the CRC and
               * byte-swap as we go.
               */
              if (dlen) {
                      data = h->h_data;
                      e = data + (dlen & ~1);
      
                      do {
                              t1 = data[0];
                              t2 = data[1];
                              crc = UTOPPY_CRC16(crc, t1);
                              crc = UTOPPY_CRC16(crc, t2);
                              *data++ = t2;
                              *data++ = t1;
                      } while (data < e);
      
                      if (dlen & 1) {
                              t1 = data[0];
                              crc = UTOPPY_CRC16(crc, t1);
                              data[1] = t1;
                      }
              }
      
              h->h_crc = htole16(crc);
              data = sc->sc_out_data;
      
              DPRINTF(UTOPPY_DBG_SEND_PACKET, ("%s: utoppy_send_packet: total len "
                  "%d...\n", device_xname(sc->sc_dev), (int)len));
              DDUMP_PACKET(data, len);
      
              do {
                      uint32_t thislen;
      
                      thislen = uimin(len, UTOPPY_FRAG_SIZE);
      
                      memcpy(sc->sc_out_buf, data, thislen);
      
                      err = utoppy_bulk_transfer(sc->sc_out_xfer, sc->sc_out_pipe,
                          0, timeout, sc->sc_out_buf, &thislen);
      
                      if (thislen != uimin(len, UTOPPY_FRAG_SIZE)) {
                              DPRINTF(UTOPPY_DBG_SEND_PACKET, ("%s: "
                                  "utoppy_send_packet: sent %ld, err %d\n",
                                  device_xname(sc->sc_dev), (u_long)thislen, err));
                      }
      
                      if (err == 0) {
                              len -= thislen;
                              data += thislen;
                      }
              } while (err == 0 && len);
      
              DPRINTF(UTOPPY_DBG_SEND_PACKET, ("%s: utoppy_send_packet: "
                  "usbd_bulk_transfer() returned %d.\n",
                  device_xname(sc->sc_dev),err));
      
              return err ? utoppy_usbd_status2errno(err) : 0;
      }
      
      static int
      utoppy_recv_packet(struct utoppy_softc *sc, uint16_t *respp, uint32_t timeout)
      {
              struct utoppy_header *h;
              usbd_status err;
              uint32_t len, thislen, requested, bytesleft;
              uint16_t crc;
              uint8_t *data, *e, t1, t2;
      
              data = sc->sc_in_data;
              len = 0;
              bytesleft = UTOPPY_BSIZE;
      
              DPRINTF(UTOPPY_DBG_RECV_PACKET, ("%s: utoppy_recv_packet: ...\n",
                  device_xname(sc->sc_dev)));
      
              do {
                      requested = thislen = uimin(bytesleft, UTOPPY_FRAG_SIZE);
      
                      err = utoppy_bulk_transfer(sc->sc_in_xfer, sc->sc_in_pipe,
                          USBD_SHORT_XFER_OK, timeout, sc->sc_in_buf,
                          &thislen);
      
                      DPRINTF(UTOPPY_DBG_RECV_PACKET, ("%s: utoppy_recv_packet: "
                          "usbd_bulk_transfer() returned %d, thislen %d, data %p\n",
                          device_xname(sc->sc_dev), err, (u_int)thislen, data));
      
                      if (err == 0) {
                              memcpy(data, sc->sc_in_buf, thislen);
                              DDUMP_PACKET(data, thislen);
                              len += thislen;
                              bytesleft -= thislen;
                              data += thislen;
                      }
              } while (err == 0 && bytesleft && thislen == requested);
      
              if (err)
                      return utoppy_usbd_status2errno(err);
      
              h = sc->sc_in_data;
      
              DPRINTF(UTOPPY_DBG_RECV_PACKET, ("%s: utoppy_recv_packet: received %d "
                  "bytes in total to %p\n", device_xname(sc->sc_dev), (u_int)len, h));
              DDUMP_PACKET(h, len);
      
              if (len < UTOPPY_HEADER_SIZE || len < (uint32_t)le16toh(h->h_len)) {
                      DPRINTF(UTOPPY_DBG_RECV_PACKET, ("%s: utoppy_recv_packet: bad "
                          " length (len %d, h_len %d)\n", device_xname(sc->sc_dev),
                          (int)len, le16toh(h->h_len)));
                      return EIO;
              }
      
              len = h->h_len = le16toh(h->h_len);
              h->h_crc = le16toh(h->h_crc);
              *respp = h->h_cmd = le16toh(h->h_cmd);
              h->h_cmd2 = le16toh(h->h_cmd2);
      
              /*
               * To maximise data throughput when transferring files, acknowledge
               * data blocks as soon as we receive them. If we detect an error
               * later on, we can always cancel.
               */
              if (*respp == UTOPPY_RESP_FILE_DATA) {
                      DPRINTF(UTOPPY_DBG_RECV_PACKET, ("%s: utoppy_recv_packet: "
                          "ACKing file data\n", device_xname(sc->sc_dev)));
      
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_send_packet(sc, UTOPPY_CMD_ACK,
                          UTOPPY_SHORT_TIMEOUT);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_RECV_PACKET, ("%s: "
                                  "utoppy_recv_packet: failed to ACK file data: %d\n",
                                  device_xname(sc->sc_dev), err));
                              return err;
                      }
              }
      
              /* The command word is part of the CRC */
              crc = UTOPPY_CRC16(0,   h->h_cmd2 >> 8);
              crc = UTOPPY_CRC16(crc, h->h_cmd2);
              crc = UTOPPY_CRC16(crc, h->h_cmd >> 8);
              crc = UTOPPY_CRC16(crc, h->h_cmd);
      
              /*
               * Extract any payload, byte-swapping and calculating the CRC16
               * as we go.
               */
              if (len > UTOPPY_HEADER_SIZE) {
                      data = h->h_data;
                      e = data + ((len & ~1) - UTOPPY_HEADER_SIZE);
      
                      while (data < e) {
                              t1 = data[0];
                              t2 = data[1];
                              crc = UTOPPY_CRC16(crc, t2);
                              crc = UTOPPY_CRC16(crc, t1);
                              *data++ = t2;
                              *data++ = t1;
                      }
      
                      if (len & 1) {
                              t1 = data[1];
                              crc = UTOPPY_CRC16(crc, t1);
                              *data = t1;
                      }
              }
      
              sc->sc_in_len = (size_t) len - UTOPPY_HEADER_SIZE;
              sc->sc_in_offset = 0;
      
              DPRINTF(UTOPPY_DBG_RECV_PACKET, ("%s: utoppy_recv_packet: len %d, "
                  "crc 0x%04x, hdrcrc 0x%04x\n", device_xname(sc->sc_dev),
                  (int)len, crc, h->h_crc));
              DDUMP_PACKET(h, len);
      
              return (crc == h->h_crc) ? 0 : EBADMSG;
      }
      
      static __inline void *
      utoppy_current_ptr(void *b)
      {
              struct utoppy_header *h = b;
      
              return &h->h_data[h->h_len];
      }
      
      static __inline void
      utoppy_advance_ptr(void *b, size_t len)
      {
              struct utoppy_header *h = b;
      
              h->h_len += len;
      }
      
      static __inline void
      utoppy_add_8(struct utoppy_softc *sc, uint8_t v)
      {
              struct utoppy_header *h = sc->sc_out_data;
              uint8_t *p;
      
              p = utoppy_current_ptr(h);
              *p = v;
              utoppy_advance_ptr(h, sizeof(v));
      }
      
      static __inline void
      utoppy_add_16(struct utoppy_softc *sc, uint16_t v)
      {
              struct utoppy_header *h = sc->sc_out_data;
              uint8_t *p;
      
              p = utoppy_current_ptr(h);
              *p++ = (uint8_t)(v >> 8);
              *p = (uint8_t)v;
              utoppy_advance_ptr(h, sizeof(v));
      }
      
      static __inline void
      utoppy_add_32(struct utoppy_softc *sc, uint32_t v)
      {
              struct utoppy_header *h = sc->sc_out_data;
              uint8_t *p;
      
              p = utoppy_current_ptr(h);
              *p++ = (uint8_t)(v >> 24);
              *p++ = (uint8_t)(v >> 16);
              *p++ = (uint8_t)(v >> 8);
              *p = (uint8_t)v;
              utoppy_advance_ptr(h, sizeof(v));
      }
      
      static __inline void
      utoppy_add_64(struct utoppy_softc *sc, uint64_t v)
      {
              struct utoppy_header *h = sc->sc_out_data;
              uint8_t *p;
      
              p = utoppy_current_ptr(h);
              *p++ = (uint8_t)(v >> 56);
              *p++ = (uint8_t)(v >> 48);
              *p++ = (uint8_t)(v >> 40);
              *p++ = (uint8_t)(v >> 32);
              *p++ = (uint8_t)(v >> 24);
              *p++ = (uint8_t)(v >> 16);
              *p++ = (uint8_t)(v >> 8);
              *p = (uint8_t)v;
              utoppy_advance_ptr(h, sizeof(v));
      }
      
      static __inline void
      utoppy_add_string(struct utoppy_softc *sc, const char *str, size_t len)
      {
              struct utoppy_header *h = sc->sc_out_data;
              char *p;
      
              p = utoppy_current_ptr(h);
              memset(p, 0, len);
              strncpy(p, str, len);
              utoppy_advance_ptr(h, len);
      }
      
      static int
      utoppy_add_path(struct utoppy_softc *sc, const char *path, int putlen)
      {
              struct utoppy_header *h = sc->sc_out_data;
              uint8_t *p, *str, *s;
              size_t len;
              int err;
      
              p = utoppy_current_ptr(h);
      
              str = putlen ? (p + sizeof(uint16_t)) : p;
      
              err = copyinstr(path, str, UTOPPY_MAX_FILENAME_LEN, &len);
      
              DPRINTF(UTOPPY_DBG_ADDPATH, ("utoppy_add_path: err %d, len %d\n",
                  err, (int)len));
      
              if (err)
                      return err;
      
              if (len < 2)
                      return EINVAL;
      
              /*
               * copyinstr(9) has already copied the terminating NUL character,
               * but we append another one in case we have to pad the length
               * later on.
               */
              str[len] = '\0';
      
              /*
               * The Toppy uses backslash as the directory separator, so convert
               * all forward slashes.
               */
              for (s = &str[len - 2]; s >= str; s--)
                      if (*s == '/')
                              *s = '\\';
      
              if ((len + h->h_len) & 1)
                      len++;
      
              if (putlen)
                      utoppy_add_16(sc, len);
      
              utoppy_advance_ptr(h, len);
      
              DPRINTF(UTOPPY_DBG_ADDPATH, ("utoppy_add_path: final len %d\n",
                  (u_int)len));
      
              return 0;
      }
      
      static __inline int
      utoppy_get_8(struct utoppy_softc *sc, uint8_t *vp)
      {
              uint8_t *p;
      
              if (sc->sc_in_len < sizeof(*vp))
                      return 1;
      
              p = UTOPPY_IN_DATA(sc);
              *vp = *p;
              sc->sc_in_offset += sizeof(*vp);
              sc->sc_in_len -= sizeof(*vp);
              return 0;
      }
      
      static __inline int
      utoppy_get_16(struct utoppy_softc *sc, uint16_t *vp)
      {
              uint16_t v;
              uint8_t *p;
      
              if (sc->sc_in_len < sizeof(v))
                      return 1;
      
              p = UTOPPY_IN_DATA(sc);
              v = *p++;
              v = (v << 8) | *p;
              *vp = v;
              sc->sc_in_offset += sizeof(v);
              sc->sc_in_len -= sizeof(v);
              return 0;
      }
      
      static __inline int
      utoppy_get_32(struct utoppy_softc *sc, uint32_t *vp)
      {
              uint32_t v;
              uint8_t *p;
      
              if (sc->sc_in_len < sizeof(v))
                      return 1;
      
              p = UTOPPY_IN_DATA(sc);
              v = *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p;
              *vp = v;
              sc->sc_in_offset += sizeof(v);
              sc->sc_in_len -= sizeof(v);
              return 0;
      }
      
      static __inline int
      utoppy_get_64(struct utoppy_softc *sc, uint64_t *vp)
      {
              uint64_t v;
              uint8_t *p;
      
              if (sc->sc_in_len < sizeof(v))
                      return 1;
      
              p = UTOPPY_IN_DATA(sc);
              v = *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p++;
              v = (v << 8) | *p;
              *vp = v;
              sc->sc_in_offset += sizeof(v);
              sc->sc_in_len -= sizeof(v);
              return 0;
      }
      
      static __inline int
      utoppy_get_string(struct utoppy_softc *sc, char *str, size_t len)
      {
              char *p;
      
              if (sc->sc_in_len < len)
                      return 1;
      
              memset(str, 0, len);
              p = UTOPPY_IN_DATA(sc);
              strncpy(str, p, len);
              sc->sc_in_offset += len;
              sc->sc_in_len -= len;
              return 0;
      }
      
      static int
      utoppy_command(struct utoppy_softc *sc, uint16_t cmd, int timeout,
          uint16_t *presp)
      {
              int err;
      
              err = utoppy_send_packet(sc, cmd, timeout);
              if (err)
                      return err;
      
              err = utoppy_recv_packet(sc, presp, timeout);
              if (err == EBADMSG) {
                      UTOPPY_OUT_INIT(sc);
                      utoppy_send_packet(sc, UTOPPY_RESP_ERROR, timeout);
              }
      
              return err;
      }
      
      static int
      utoppy_timestamp_decode(struct utoppy_softc *sc, time_t *tp)
      {
              uint16_t mjd;
              uint8_t hour, minute, sec;
              uint32_t rv;
      
              if (utoppy_get_16(sc, &mjd) || utoppy_get_8(sc, &hour) ||
                  utoppy_get_8(sc, &minute) || utoppy_get_8(sc, &sec))
                      return 1;
      
              if (mjd == 0xffffu && hour == 0xffu && minute == 0xffu && sec == 0xffu){
                      *tp = 0;
                      return 0;
              }
      
              rv = (mjd < UTOPPY_MJD_1970) ? UTOPPY_MJD_1970 : (uint32_t) mjd;
      
              /* Calculate seconds since 1970 */
              rv = (rv - UTOPPY_MJD_1970) * 60 * 60 * 24;
      
              /* Add in the hours, minutes, and seconds */
              rv += (uint32_t)hour * 60 * 60;
              rv += (uint32_t)minute * 60;
              rv += sec;
              *tp = (time_t)rv;
      
              return 0;
      }
      
      static void
      utoppy_timestamp_encode(struct utoppy_softc *sc, time_t t)
      {
              u_int mjd, hour, minute;
      
              mjd = t / (60 * 60 * 24);
              t -= mjd * 60 * 60 * 24;
      
              hour = t / (60 * 60);
              t -= hour * 60 * 60;
      
              minute = t / 60;
              t -= minute * 60;
      
              utoppy_add_16(sc, mjd + UTOPPY_MJD_1970);
              utoppy_add_8(sc, hour);
              utoppy_add_8(sc, minute);
              utoppy_add_8(sc, t);
      }
      
      static int
      utoppy_turbo_mode(struct utoppy_softc *sc, int state)
      {
              uint16_t r;
              int err;
      
              UTOPPY_OUT_INIT(sc);
              utoppy_add_32(sc, state);
      
              err = utoppy_command(sc, UTOPPY_CMD_TURBO, UTOPPY_SHORT_TIMEOUT, &r);
              if (err)
                      return err;
      
              return (r == UTOPPY_RESP_SUCCESS) ? 0 : EIO;
      }
      
      static int
      utoppy_check_ready(struct utoppy_softc *sc)
      {
              uint16_t r;
              int err;
      
              UTOPPY_OUT_INIT(sc);
      
              err = utoppy_command(sc, UTOPPY_CMD_READY, UTOPPY_LONG_TIMEOUT, &r);
              if (err)
                      return err;
      
              return (r == UTOPPY_RESP_SUCCESS) ? 0 : EIO;
      }
      
      static int
      utoppy_cancel(struct utoppy_softc *sc)
      {
              uint16_t r;
              int err, i;
      
              /*
               * Issue the cancel command serveral times. the Toppy doesn't
               * always respond to the first.
               */
              for (i = 0; i < 3; i++) {
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_command(sc, UTOPPY_CMD_CANCEL,
                          UTOPPY_SHORT_TIMEOUT, &r);
                      if (err == 0 && r == UTOPPY_RESP_SUCCESS)
                              break;
                      err = ETIMEDOUT;
              }
      
              if (err)
                      return err;
      
              /*
               * Make sure turbo mode is off, otherwise the Toppy will not
               * respond to remote control input.
               */
              (void) utoppy_turbo_mode(sc, 0);
      
              sc->sc_state = UTOPPY_STATE_IDLE;
              return 0;
      }
      
      static int
      utoppy_stats(struct utoppy_softc *sc, struct utoppy_stats *us)
      {
              uint32_t hsize, hfree;
              uint16_t r;
              int err;
      
              UTOPPY_OUT_INIT(sc);
              err = utoppy_command(sc, UTOPPY_CMD_STATS, UTOPPY_LONG_TIMEOUT, &r);
              if (err)
                      return err;
      
              if (r != UTOPPY_RESP_STATS_DATA)
                      return EIO;
      
              if (utoppy_get_32(sc, &hsize) || utoppy_get_32(sc, &hfree))
                      return EIO;
      
              us->us_hdd_size = hsize;
              us->us_hdd_size *= 1024;
              us->us_hdd_free = hfree;
              us->us_hdd_free *= 1024;
      
              return 0;
      }
      
      static int
      utoppy_readdir_next(struct utoppy_softc *sc)
      {
              uint16_t resp;
              int err;
      
              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_next: running...\n",
                  device_xname(sc->sc_dev)));
      
              /*
               * Fetch the next READDIR response
               */
              err = utoppy_recv_packet(sc, &resp, UTOPPY_LONG_TIMEOUT);
              if (err) {
                      DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_next: "
                          "utoppy_recv_packet() returned %d\n",
                          device_xname(sc->sc_dev), err));
                      if (err == EBADMSG) {
                              UTOPPY_OUT_INIT(sc);
                              utoppy_send_packet(sc, UTOPPY_RESP_ERROR,
                                  UTOPPY_LONG_TIMEOUT);
                      }
                      utoppy_cancel(sc);
                      return err;
              }
      
              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_next: "
                  "utoppy_recv_packet() returned %d, len %ld\n",
                  device_xname(sc->sc_dev), err, (u_long)sc->sc_in_len));
      
              switch (resp) {
              case UTOPPY_RESP_READDIR_DATA:
                      DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_next: "
                          "UTOPPY_RESP_READDIR_DATA\n", device_xname(sc->sc_dev)));
      
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_send_packet(sc, UTOPPY_CMD_ACK,
                          UTOPPY_LONG_TIMEOUT);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_next: "
                                  "utoppy_send_packet(ACK) returned %d\n",
                                  device_xname(sc->sc_dev), err));
                              utoppy_cancel(sc);
                              return err;
                      }
                      sc->sc_state = UTOPPY_STATE_READDIR;
                      sc->sc_in_offset = 0;
                      break;
      
              case UTOPPY_RESP_READDIR_END:
                      DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_next: "
                          "UTOPPY_RESP_READDIR_END\n", device_xname(sc->sc_dev)));
      
                      UTOPPY_OUT_INIT(sc);
                      utoppy_send_packet(sc, UTOPPY_CMD_ACK, UTOPPY_SHORT_TIMEOUT);
                      sc->sc_state = UTOPPY_STATE_IDLE;
                      sc->sc_in_len = 0;
                      break;
      
              default:
                      DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_next: "
                          "bad response: %#x\n", device_xname(sc->sc_dev), resp));
                      sc->sc_state = UTOPPY_STATE_IDLE;
                      sc->sc_in_len = 0;
                      return EIO;
              }
      
              return 0;
      }
      
      static size_t
      utoppy_readdir_decode(struct utoppy_softc *sc, struct utoppy_dirent *ud)
      {
              uint8_t ftype;
      
              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_decode: bytes left"
                  " %d\n", device_xname(sc->sc_dev), (int)sc->sc_in_len));
      
              if (utoppy_timestamp_decode(sc, &ud->ud_mtime) ||
                  utoppy_get_8(sc, &ftype) || utoppy_get_64(sc, &ud->ud_size) ||
                  utoppy_get_string(sc, ud->ud_path, UTOPPY_MAX_FILENAME_LEN + 1) ||
                  utoppy_get_32(sc, &ud->ud_attributes)) {
                      DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_decode: no "
                          "more to decode\n", device_xname(sc->sc_dev)));
                      return 0;
              }
      
              switch (ftype) {
              case UTOPPY_FTYPE_DIR:
                      ud->ud_type = UTOPPY_DIRENT_DIRECTORY;
                      break;
              case UTOPPY_FTYPE_FILE:
                      ud->ud_type = UTOPPY_DIRENT_FILE;
                      break;
              default:
                      ud->ud_type = UTOPPY_DIRENT_UNKNOWN;
                      break;
              }
      
              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppy_readdir_decode: %s '%s', "
                  "size %lld, time 0x%08lx, attr 0x%08x\n", device_xname(sc->sc_dev),
                  (ftype == UTOPPY_FTYPE_DIR) ? "DIR" :
                  ((ftype == UTOPPY_FTYPE_FILE) ? "FILE" : "UNKNOWN"), ud->ud_path,
                  ud->ud_size, (u_long)ud->ud_mtime, ud->ud_attributes));
      
              return 1;
      }
      
      static int
      utoppy_readfile_next(struct utoppy_softc *sc)
      {
              uint64_t off;
              uint16_t resp;
              int err;
      
              err = utoppy_recv_packet(sc, &resp, UTOPPY_LONG_TIMEOUT);
              if (err) {
                      DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: "
                          "utoppy_recv_packet() returned %d\n",
                          device_xname(sc->sc_dev), err));
                      utoppy_cancel(sc);
                      return err;
              }
      
              switch (resp) {
              case UTOPPY_RESP_FILE_HEADER:
                      /* ACK it */
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_send_packet(sc, UTOPPY_CMD_ACK,
                          UTOPPY_LONG_TIMEOUT);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: "
                                  "utoppy_send_packet(UTOPPY_CMD_ACK) returned %d\n",
                                  device_xname(sc->sc_dev), err));
                              utoppy_cancel(sc);
                              return err;
                      }
      
                      sc->sc_in_len = 0;
                      DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: "
                          "FILE_HEADER done\n", device_xname(sc->sc_dev)));
                      break;
      
              case UTOPPY_RESP_FILE_DATA:
                      /* Already ACK'd */
                      if (utoppy_get_64(sc, &off)) {
                              DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: "
                                  "UTOPPY_RESP_FILE_DATA did not provide offset\n",
                                  device_xname(sc->sc_dev)));
                              utoppy_cancel(sc);
                              return EBADMSG;
                      }
      
                      DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: "
                          "UTOPPY_RESP_FILE_DATA: offset %lld, bytes left %ld\n",
                          device_xname(sc->sc_dev), off, (u_long)sc->sc_in_len));
                      break;
      
              case UTOPPY_RESP_FILE_END:
                      DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: "
                          "UTOPPY_RESP_FILE_END: sending ACK\n",
                          device_xname(sc->sc_dev)));
                      UTOPPY_OUT_INIT(sc);
                      utoppy_send_packet(sc, UTOPPY_CMD_ACK, UTOPPY_SHORT_TIMEOUT);
                      /*FALLTHROUGH*/
      
              case UTOPPY_RESP_SUCCESS:
                      sc->sc_state = UTOPPY_STATE_IDLE;
                      (void) utoppy_turbo_mode(sc, 0);
                      DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: all "
                          "done\n", device_xname(sc->sc_dev)));
                      break;
      
              case UTOPPY_RESP_ERROR:
              default:
                      DPRINTF(UTOPPY_DBG_READ, ("%s: utoppy_readfile_next: bad "
                          "response code 0x%0x\n", device_xname(sc->sc_dev), resp));
                      utoppy_cancel(sc);
                      return EIO;
              }
      
              return 0;
      }
      
      static int
      utoppyopen(dev_t dev, int flag, int mode,
          struct lwp *l)
      {
              struct utoppy_softc *sc;
              int error = 0;
      
    1         sc = device_lookup_private(&utoppy_cd, UTOPPYUNIT(dev));
              if (sc == NULL)
                      return ENXIO;
      
              if (sc == NULL || sc->sc_iface == NULL || sc->sc_dying)
                      return ENXIO;
      
              if (sc->sc_state != UTOPPY_STATE_CLOSED) {
                      DPRINTF(UTOPPY_DBG_OPEN, ("%s: utoppyopen: already open\n",
                          device_xname(sc->sc_dev)));
                      return EBUSY;
              }
      
              DPRINTF(UTOPPY_DBG_OPEN, ("%s: utoppyopen: opening...\n",
                  device_xname(sc->sc_dev)));
      
              sc->sc_refcnt++;
              sc->sc_state = UTOPPY_STATE_OPENING;
              sc->sc_turbo_mode = 0;
              sc->sc_out_data = kmem_alloc(UTOPPY_BSIZE + 1, KM_SLEEP);
              sc->sc_in_data = kmem_alloc(UTOPPY_BSIZE + 1, KM_SLEEP);
      
              if ((error = utoppy_cancel(sc)) != 0)
                      goto error;
      
              if ((error = utoppy_check_ready(sc)) != 0) {
                      DPRINTF(UTOPPY_DBG_OPEN, ("%s: utoppyopen: utoppy_check_ready()"
                          " returned %d\n", device_xname(sc->sc_dev), error));
              }
      
       error:
              sc->sc_state = error ? UTOPPY_STATE_CLOSED : UTOPPY_STATE_IDLE;
      
              DPRINTF(UTOPPY_DBG_OPEN, ("%s: utoppyopen: done. error %d, new state "
                  "'%s'\n", device_xname(sc->sc_dev), error,
                  utoppy_state_string(sc->sc_state)));
      
    1         if (--sc->sc_refcnt < 0)
                      usb_detach_wakeupold(sc->sc_dev);
      
              return error;
      }
      
      static int
      utoppyclose(dev_t dev, int flag, int mode, struct lwp *l)
      {
              struct utoppy_softc *sc;
              usbd_status err;
      
              sc = device_lookup_private(&utoppy_cd, UTOPPYUNIT(dev));
      
              DPRINTF(UTOPPY_DBG_CLOSE, ("%s: utoppyclose: closing...\n",
                  device_xname(sc->sc_dev)));
      
              if (sc->sc_state < UTOPPY_STATE_IDLE) {
                      /* We are being forced to close before the open completed. */
                      DPRINTF(UTOPPY_DBG_CLOSE, ("%s: utoppyclose: not properly "
                          "open: %s\n", device_xname(sc->sc_dev),
                          utoppy_state_string(sc->sc_state)));
                      return 0;
              }
      
              if (sc->sc_out_data)
                      (void) utoppy_cancel(sc);
      
              if (sc->sc_out_pipe != NULL) {
                      if ((err = usbd_abort_pipe(sc->sc_out_pipe)) != 0)
                              printf("usbd_abort_pipe(OUT) returned %d\n", err);
                      sc->sc_out_pipe = NULL;
              }
      
              if (sc->sc_in_pipe != NULL) {
                      if ((err = usbd_abort_pipe(sc->sc_in_pipe)) != 0)
                              printf("usbd_abort_pipe(IN) returned %d\n", err);
                      sc->sc_in_pipe = NULL;
              }
      
              if (sc->sc_out_data) {
                      kmem_free(sc->sc_out_data, UTOPPY_BSIZE + 1);
                      sc->sc_out_data = NULL;
              }
      
              if (sc->sc_in_data) {
                      kmem_free(sc->sc_in_data, UTOPPY_BSIZE + 1);
                      sc->sc_in_data = NULL;
              }
      
              sc->sc_state = UTOPPY_STATE_CLOSED;
      
              DPRINTF(UTOPPY_DBG_CLOSE, ("%s: utoppyclose: done.\n",
                  device_xname(sc->sc_dev)));
      
              return 0;
      }
      
      static int
      utoppyread(dev_t dev, struct uio *uio, int flags)
      {
              struct utoppy_softc *sc;
              struct utoppy_dirent ud;
              size_t len;
              int err;
      
              sc = device_lookup_private(&utoppy_cd, UTOPPYUNIT(dev));
      
              if (sc->sc_dying)
                      return EIO;
      
              sc->sc_refcnt++;
      
              DPRINTF(UTOPPY_DBG_READ, ("%s: utoppyread: reading: state '%s'\n",
                  device_xname(sc->sc_dev), utoppy_state_string(sc->sc_state)));
      
              switch (sc->sc_state) {
              case UTOPPY_STATE_READDIR:
                      err = 0;
                      while (err == 0 && uio->uio_resid >= sizeof(ud) &&
                          sc->sc_state != UTOPPY_STATE_IDLE) {
                              if (utoppy_readdir_decode(sc, &ud) == 0)
                                      err = utoppy_readdir_next(sc);
                              else
                              if ((err = uiomove(&ud, sizeof(ud), uio)) != 0)
                                      utoppy_cancel(sc);
                      }
                      break;
      
              case UTOPPY_STATE_READFILE:
                      err = 0;
                      while (err == 0 && uio->uio_resid > 0 &&
                          sc->sc_state != UTOPPY_STATE_IDLE) {
                              DPRINTF(UTOPPY_DBG_READ, ("%s: utoppyread: READFILE: "
                                  "resid %ld, bytes_left %ld\n",
                                  device_xname(sc->sc_dev), (u_long)uio->uio_resid,
                                  (u_long)sc->sc_in_len));
      
                              if (sc->sc_in_len == 0 &&
                                  (err = utoppy_readfile_next(sc)) != 0) {
                                      DPRINTF(UTOPPY_DBG_READ, ("%s: utoppyread: "
                                          "READFILE: utoppy_readfile_next returned "
                                          "%d\n", device_xname(sc->sc_dev), err));
                                      break;
                              }
      
                              len = uimin(uio->uio_resid, sc->sc_in_len);
                              if (len) {
                                      err = uiomove(UTOPPY_IN_DATA(sc), len, uio);
                                      if (err == 0) {
                                              sc->sc_in_offset += len;
                                              sc->sc_in_len -= len;
                                      }
                              }
                      }
                      break;
      
              case UTOPPY_STATE_IDLE:
                      err = 0;
                      break;
      
              case UTOPPY_STATE_WRITEFILE:
                      err = EBUSY;
                      break;
      
              default:
                      err = EIO;
                      break;
              }
      
              DPRINTF(UTOPPY_DBG_READ, ("%s: utoppyread: done. err %d, state '%s'\n",
                  device_xname(sc->sc_dev), err, utoppy_state_string(sc->sc_state)));
      
              if (--sc->sc_refcnt < 0)
                      usb_detach_wakeupold(sc->sc_dev);
      
              return err;
      }
      
      static int
      utoppywrite(dev_t dev, struct uio *uio, int flags)
      {
              struct utoppy_softc *sc;
              uint16_t resp;
              size_t len;
              int err;
      
              sc = device_lookup_private(&utoppy_cd, UTOPPYUNIT(dev));
      
              if (sc->sc_dying)
                      return EIO;
      
              switch(sc->sc_state) {
              case UTOPPY_STATE_WRITEFILE:
                      break;
      
              case UTOPPY_STATE_IDLE:
                      return 0;
      
              default:
                      return EIO;
              }
      
              sc->sc_refcnt++;
              err = 0;
      
              DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: PRE-WRITEFILE: resid "
                  "%ld, wr_size %lld, wr_offset %lld\n", device_xname(sc->sc_dev),
                  (u_long)uio->uio_resid, sc->sc_wr_size, sc->sc_wr_offset));
      
              while (sc->sc_state == UTOPPY_STATE_WRITEFILE &&
                  (len = uimin(uio->uio_resid, sc->sc_wr_size)) != 0) {
      
                      len = uimin(len, UTOPPY_BSIZE - (UTOPPY_HEADER_SIZE +
                          sizeof(uint64_t) + 3));
      
                      DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: uiomove(%ld)\n",
                          device_xname(sc->sc_dev), (u_long)len));
      
                      UTOPPY_OUT_INIT(sc);
                      utoppy_add_64(sc, sc->sc_wr_offset);
      
                      err = uiomove(utoppy_current_ptr(sc->sc_out_data), len, uio);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: uiomove()"
                                  " returned %d\n", device_xname(sc->sc_dev), err));
                              break;
                      }
      
                      utoppy_advance_ptr(sc->sc_out_data, len);
      
                      err = utoppy_command(sc, UTOPPY_RESP_FILE_DATA,
                          UTOPPY_LONG_TIMEOUT, &resp);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: "
                                  "utoppy_command(UTOPPY_RESP_FILE_DATA) "
                                  "returned %d\n", device_xname(sc->sc_dev), err));
                              break;
                      }
                      if (resp != UTOPPY_RESP_SUCCESS) {
                              DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: "
                                  "utoppy_command(UTOPPY_RESP_FILE_DATA) returned "
                                  "bad response %#x\n", device_xname(sc->sc_dev),
                                  resp));
                              utoppy_cancel(sc);
                              err = EIO;
                              break;
                      }
      
                      sc->sc_wr_offset += len;
                      sc->sc_wr_size -= len;
              }
      
              DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: POST-WRITEFILE: resid "
                  "%ld, wr_size %lld, wr_offset %lld, err %d\n",
                  device_xname(sc->sc_dev), (u_long)uio->uio_resid, sc->sc_wr_size,
                  sc->sc_wr_offset, err));
      
              if (err == 0 && sc->sc_wr_size == 0) {
                      DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: sending "
                          "FILE_END...\n", device_xname(sc->sc_dev)));
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_command(sc, UTOPPY_RESP_FILE_END,
                          UTOPPY_LONG_TIMEOUT, &resp);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: "
                                  "utoppy_command(UTOPPY_RESP_FILE_END) returned "
                                  "%d\n", device_xname(sc->sc_dev), err));
      
                              utoppy_cancel(sc);
                      }
      
                      sc->sc_state = UTOPPY_STATE_IDLE;
                      DPRINTF(UTOPPY_DBG_WRITE, ("%s: utoppywrite: state %s\n",
                          device_xname(sc->sc_dev),
                          utoppy_state_string(sc->sc_state)));
              }
      
              if (--sc->sc_refcnt < 0)
                      usb_detach_wakeupold(sc->sc_dev);
      
              return err;
      }
      
      static int
      utoppyioctl(dev_t dev, u_long cmd, void *data, int flag,
          struct lwp *l)
      {
              struct utoppy_softc *sc;
              struct utoppy_rename *ur;
              struct utoppy_readfile *urf;
              struct utoppy_writefile *uw;
              char uwf[UTOPPY_MAX_FILENAME_LEN + 1], *uwfp;
              uint16_t resp;
              int err;
      
              sc = device_lookup_private(&utoppy_cd, UTOPPYUNIT(dev));
      
              if (sc->sc_dying)
                      return EIO;
      
              DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: cmd 0x%08lx, state '%s'\n",
                  device_xname(sc->sc_dev), cmd, utoppy_state_string(sc->sc_state)));
      
              if (sc->sc_state != UTOPPY_STATE_IDLE && cmd != UTOPPYIOCANCEL) {
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: still busy.\n",
                          device_xname(sc->sc_dev)));
                      return EBUSY;
              }
      
              sc->sc_refcnt++;
      
              switch (cmd) {
              case UTOPPYIOTURBO:
                      err = 0;
                      sc->sc_turbo_mode = *((int *)data) ? 1 : 0;
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIOTURBO: "
                          "%s\n", device_xname(sc->sc_dev),
                          sc->sc_turbo_mode ? "On" : "Off"));
                      break;
      
              case UTOPPYIOCANCEL:
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIOCANCEL\n",
                          device_xname(sc->sc_dev)));
                      err = utoppy_cancel(sc);
                      break;
      
              case UTOPPYIOREBOOT:
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIOREBOOT\n",
                          device_xname(sc->sc_dev)));
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_command(sc, UTOPPY_CMD_RESET, UTOPPY_LONG_TIMEOUT,
                          &resp);
                      if (err)
                              break;
      
                      if (resp != UTOPPY_RESP_SUCCESS)
                              err = EIO;
                      break;
      
              case UTOPPYIOSTATS:
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIOSTATS\n",
                          device_xname(sc->sc_dev)));
                      err = utoppy_stats(sc, (struct utoppy_stats *)data);
                      break;
      
              case UTOPPYIORENAME:
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIORENAME\n",
                          device_xname(sc->sc_dev)));
                      ur = (struct utoppy_rename *)data;
                      UTOPPY_OUT_INIT(sc);
      
                      if ((err = utoppy_add_path(sc, ur->ur_old_path, 1)) != 0)
                              break;
                      if ((err = utoppy_add_path(sc, ur->ur_new_path, 1)) != 0)
                              break;
      
                      err = utoppy_command(sc, UTOPPY_CMD_RENAME,
                          UTOPPY_LONG_TIMEOUT, &resp);
                      if (err)
                              break;
      
                      if (resp != UTOPPY_RESP_SUCCESS)
                              err = EIO;
                      break;
      
              case UTOPPYIOMKDIR:
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIOMKDIR\n",
                          device_xname(sc->sc_dev)));
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_add_path(sc, *((const char **)data), 1);
                      if (err)
                              break;
      
                      err = utoppy_command(sc, UTOPPY_CMD_MKDIR, UTOPPY_LONG_TIMEOUT,
                          &resp);
                      if (err)
                              break;
      
                      if (resp != UTOPPY_RESP_SUCCESS)
                              err = EIO;
                      break;
      
              case UTOPPYIODELETE:
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIODELETE\n",
                          device_xname(sc->sc_dev)));
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_add_path(sc, *((const char **)data), 0);
                      if (err)
                              break;
      
                      err = utoppy_command(sc, UTOPPY_CMD_DELETE, UTOPPY_LONG_TIMEOUT,
                          &resp);
                      if (err)
                              break;
      
                      if (resp != UTOPPY_RESP_SUCCESS)
                              err = EIO;
                      break;
      
              case UTOPPYIOREADDIR:
                      DPRINTF(UTOPPY_DBG_IOCTL, ("%s: utoppyioctl: UTOPPYIOREADDIR\n",
                          device_xname(sc->sc_dev)));
                      UTOPPY_OUT_INIT(sc);
                      err = utoppy_add_path(sc, *((const char **)data), 0);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppyioctl: "
                                  "utoppy_add_path() returned %d\n",
                                  device_xname(sc->sc_dev), err));
                              break;
                      }
      
                      err = utoppy_send_packet(sc, UTOPPY_CMD_READDIR,
                          UTOPPY_LONG_TIMEOUT);
                      if (err != 0) {
                              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppyioctl: "
                                  "UTOPPY_CMD_READDIR returned %d\n",
                                  device_xname(sc->sc_dev), err));
                              break;
                      }
      
                      err = utoppy_readdir_next(sc);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_READDIR, ("%s: utoppyioctl: "
                                  "utoppy_readdir_next() returned %d\n",
                                  device_xname(sc->sc_dev), err));
                      }
                      break;
      
              case UTOPPYIOREADFILE:
                      urf = (struct utoppy_readfile *)data;
      
                      DPRINTF(UTOPPY_DBG_IOCTL,("%s: utoppyioctl: UTOPPYIOREADFILE "
                          "%s, offset %lld\n", device_xname(sc->sc_dev),
                          urf->ur_path, urf->ur_offset));
      
                      if ((err = utoppy_turbo_mode(sc, sc->sc_turbo_mode)) != 0)
                              break;
      
                      UTOPPY_OUT_INIT(sc);
                      utoppy_add_8(sc, UTOPPY_FILE_READ);
      
                      if ((err = utoppy_add_path(sc, urf->ur_path, 1)) != 0)
                              break;
      
                      utoppy_add_64(sc, urf->ur_offset);
      
                      sc->sc_state = UTOPPY_STATE_READFILE;
                      sc->sc_in_offset = 0;
      
                      err = utoppy_send_packet(sc, UTOPPY_CMD_FILE,
                          UTOPPY_LONG_TIMEOUT);
                      if (err == 0)
                              err = utoppy_readfile_next(sc);
                      break;
      
              case UTOPPYIOWRITEFILE:
                      uw = (struct utoppy_writefile *)data;
      
                      DPRINTF(UTOPPY_DBG_IOCTL,("%s: utoppyioctl: UTOPPYIOWRITEFILE "
                          "%s, size %lld, offset %lld\n", device_xname(sc->sc_dev),
                          uw->uw_path, uw->uw_size, uw->uw_offset));
      
                      if ((err = utoppy_turbo_mode(sc, sc->sc_turbo_mode)) != 0)
                              break;
      
                      UTOPPY_OUT_INIT(sc);
                      utoppy_add_8(sc, UTOPPY_FILE_WRITE);
                      uwfp = utoppy_current_ptr(sc->sc_out_data);
      
                      if ((err = utoppy_add_path(sc, uw->uw_path, 1)) != 0) {
                              DPRINTF(UTOPPY_DBG_WRITE,("%s: utoppyioctl: add_path()"
                                  " returned %d\n", device_xname(sc->sc_dev), err));
                              break;
                      }
      
                      strncpy(uwf, &uwfp[2], sizeof(uwf));
                      utoppy_add_64(sc, uw->uw_offset);
      
                      err = utoppy_command(sc, UTOPPY_CMD_FILE, UTOPPY_LONG_TIMEOUT,
                          &resp);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_WRITE,("%s: utoppyioctl: "
                                  "utoppy_command(UTOPPY_CMD_FILE) returned "
                                  "%d\n", device_xname(sc->sc_dev), err));
                              break;
                      }
                      if (resp != UTOPPY_RESP_SUCCESS) {
                              DPRINTF(UTOPPY_DBG_WRITE,("%s: utoppyioctl: "
                                  "utoppy_command(UTOPPY_CMD_FILE) returned "
                                  "bad response %#x\n", device_xname(sc->sc_dev),
                                  resp));
                              err = EIO;
                              break;
                      }
      
                      UTOPPY_OUT_INIT(sc);
                      utoppy_timestamp_encode(sc, uw->uw_mtime);
                      utoppy_add_8(sc, UTOPPY_FTYPE_FILE);
                      utoppy_add_64(sc, uw->uw_size);
                      utoppy_add_string(sc, uwf, sizeof(uwf));
                      utoppy_add_32(sc, 0);
      
                      err = utoppy_command(sc, UTOPPY_RESP_FILE_HEADER,
                          UTOPPY_LONG_TIMEOUT, &resp);
                      if (err) {
                              DPRINTF(UTOPPY_DBG_WRITE,("%s: utoppyioctl: "
                                  "utoppy_command(UTOPPY_RESP_FILE_HEADER) "
                                  "returned %d\n", device_xname(sc->sc_dev), err));
                              break;
                      }
                      if (resp != UTOPPY_RESP_SUCCESS) {
                              DPRINTF(UTOPPY_DBG_WRITE,("%s: utoppyioctl: "
                                  "utoppy_command(UTOPPY_RESP_FILE_HEADER) "
                                  "returned bad response %#x\n",
                                  device_xname(sc->sc_dev), resp));
                              err = EIO;
                              break;
                      }
      
                      sc->sc_wr_offset = uw->uw_offset;
                      sc->sc_wr_size = uw->uw_size;
                      sc->sc_state = UTOPPY_STATE_WRITEFILE;
      
                      DPRINTF(UTOPPY_DBG_WRITE,("%s: utoppyioctl: Changing state to "
                          "%s. wr_offset %lld, wr_size %lld\n",
                          device_xname(sc->sc_dev), utoppy_state_string(sc->sc_state),
                          sc->sc_wr_offset, sc->sc_wr_size));
                      break;
      
              default:
                      DPRINTF(UTOPPY_DBG_IOCTL,("%s: utoppyioctl: Invalid cmd\n",
                          device_xname(sc->sc_dev)));
                      err = ENODEV;
                      break;
              }
      
              DPRINTF(UTOPPY_DBG_IOCTL,("%s: utoppyioctl: done. err %d, state '%s'\n",
                  device_xname(sc->sc_dev), err, utoppy_state_string(sc->sc_state)));
      
              if (err)
                      utoppy_cancel(sc);
      
              if (--sc->sc_refcnt < 0)
                      usb_detach_wakeupold(sc->sc_dev);
      
              return err;
      }
      /*        $NetBSD: uvm_page.c,v 1.250 2020/12/20 11:11:34 skrll Exp $        */
      
      /*-
       * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
       * All rights reserved.
       *
       * This code is derived from software contributed to The NetBSD Foundation
       * by Andrew Doran.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       *
       * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
       * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
       * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
       * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      /*
       * Copyright (c) 1997 Charles D. Cranor and Washington University.
       * Copyright (c) 1991, 1993, The Regents of the University of California.
       *
       * All rights reserved.
       *
       * This code is derived from software contributed to Berkeley by
       * The Mach Operating System project at Carnegie-Mellon University.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the name of the University nor the names of its contributors
       *    may be used to endorse or promote products derived from this software
       *    without specific prior written permission.
       *
       * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
       * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
       * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
       * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
       * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
       * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
       * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
       * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
       * SUCH DAMAGE.
       *
       *        @(#)vm_page.c   8.3 (Berkeley) 3/21/94
       * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
       *
       *
       * Copyright (c) 1987, 1990 Carnegie-Mellon University.
       * All rights reserved.
       *
       * Permission to use, copy, modify and distribute this software and
       * its documentation is hereby granted, provided that both the copyright
       * notice and this permission notice appear in all copies of the
       * software, derivative works or modified versions, and any portions
       * thereof, and that both notices appear in supporting documentation.
       *
       * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
       * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
       * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
       *
       * Carnegie Mellon requests users of this software to return to
       *
       *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
       *  School of Computer Science
       *  Carnegie Mellon University
       *  Pittsburgh PA 15213-3890
       *
       * any improvements or extensions that they make and grant Carnegie the
       * rights to redistribute these changes.
       */
      
      /*
       * uvm_page.c: page ops.
       */
      
      #include <sys/cdefs.h>
      __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.250 2020/12/20 11:11:34 skrll Exp $");
      
      #include "opt_ddb.h"
      #include "opt_uvm.h"
      #include "opt_uvmhist.h"
      #include "opt_readahead.h"
      
      #include <sys/param.h>
      #include <sys/systm.h>
      #include <sys/sched.h>
      #include <sys/kernel.h>
      #include <sys/vnode.h>
      #include <sys/proc.h>
      #include <sys/radixtree.h>
      #include <sys/atomic.h>
      #include <sys/cpu.h>
      
      #include <uvm/uvm.h>
      #include <uvm/uvm_ddb.h>
      #include <uvm/uvm_pdpolicy.h>
      #include <uvm/uvm_pgflcache.h>
      
      /*
       * number of pages per-CPU to reserve for the kernel.
       */
      #ifndef        UVM_RESERVED_PAGES_PER_CPU
      #define        UVM_RESERVED_PAGES_PER_CPU        5
      #endif
      int vm_page_reserve_kernel = UVM_RESERVED_PAGES_PER_CPU;
      
      /*
       * physical memory size;
       */
      psize_t physmem;
      
      /*
       * local variables
       */
      
      /*
       * these variables record the values returned by vm_page_bootstrap,
       * for debugging purposes.  The implementation of uvm_pageboot_alloc
       * and pmap_startup here also uses them internally.
       */
      
      static vaddr_t      virtual_space_start;
      static vaddr_t      virtual_space_end;
      
      /*
       * we allocate an initial number of page colors in uvm_page_init(),
       * and remember them.  We may re-color pages as cache sizes are
       * discovered during the autoconfiguration phase.  But we can never
       * free the initial set of buckets, since they are allocated using
       * uvm_pageboot_alloc().
       */
      
      static size_t recolored_pages_memsize /* = 0 */;
      static char *recolored_pages_mem;
      
      /*
       * freelist locks - one per bucket.
       */
      
      union uvm_freelist_lock        uvm_freelist_locks[PGFL_MAX_BUCKETS]
          __cacheline_aligned;
      
      /*
       * basic NUMA information.
       */
      
      static struct uvm_page_numa_region {
              struct uvm_page_numa_region        *next;
              paddr_t                                start;
              paddr_t                                size;
              u_int                                numa_id;
      } *uvm_page_numa_region;
      
      #ifdef DEBUG
      kmutex_t uvm_zerochecklock __cacheline_aligned;
      vaddr_t uvm_zerocheckkva;
      #endif /* DEBUG */
      
      /*
       * These functions are reserved for uvm(9) internal use and are not
       * exported in the header file uvm_physseg.h
       *
       * Thus they are redefined here.
       */
      void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
      void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
      
      /* returns a pgs array */
      struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
      
      /*
       * inline functions
       */
      
      /*
       * uvm_pageinsert: insert a page in the object.
       *
       * => caller must lock object
       * => call should have already set pg's object and offset pointers
       *    and bumped the version counter
       */
      
      static inline void
      uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg)
      {
      
  354         KASSERT(uobj == pg->uobject);
  355         KASSERT(rw_write_held(uobj->vmobjlock));
  355         KASSERT((pg->flags & PG_TABLED) == 0);
      
  356         if ((pg->flags & PG_STAT) != 0) {
                      /* Cannot use uvm_pagegetdirty(): not yet in radix tree. */
   75                 const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
      
                      if ((pg->flags & PG_FILE) != 0) {
   54                         if (uobj->uo_npages == 0) {
                                      struct vnode *vp = (struct vnode *)uobj;
   37                                 mutex_enter(vp->v_interlock);
                                      KASSERT((vp->v_iflag & VI_PAGES) == 0);
   37                                 vp->v_iflag |= VI_PAGES;
                                      vholdl(vp);
                                      mutex_exit(vp->v_interlock);
                              }
   54                         if (UVM_OBJ_IS_VTEXT(uobj)) {
    3                                 cpu_count(CPU_COUNT_EXECPAGES, 1);
                              }
   54                         cpu_count(CPU_COUNT_FILEUNKNOWN + status, 1);
                      } else {
   21                         cpu_count(CPU_COUNT_ANONUNKNOWN + status, 1);
                      }
              }
  354         pg->flags |= PG_TABLED;
              uobj->uo_npages++;
      }
      
      static inline int
      uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
      {
              const uint64_t idx = pg->offset >> PAGE_SHIFT;
              int error;
      
              KASSERT(rw_write_held(uobj->vmobjlock));
      
  356         error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
              if (error != 0) {
                      return error;
              }
  354         if ((pg->flags & PG_CLEAN) == 0) {
                      uvm_obj_page_set_dirty(pg);
              }
  356         KASSERT(((pg->flags & PG_CLEAN) == 0) ==
                      uvm_obj_page_dirty_p(pg));
              return 0;
      }
      
      /*
       * uvm_page_remove: remove page from object.
       *
       * => caller must lock object
       */
      
      static inline void
      uvm_pageremove_object(struct uvm_object *uobj, struct vm_page *pg)
      {
      
              KASSERT(uobj == pg->uobject);
   31         KASSERT(rw_write_held(uobj->vmobjlock));
   59         KASSERT(pg->flags & PG_TABLED);
      
   59         if ((pg->flags & PG_STAT) != 0) {
                      /* Cannot use uvm_pagegetdirty(): no longer in radix tree. */
   31                 const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
      
                      if ((pg->flags & PG_FILE) != 0) {
   29                         if (uobj->uo_npages == 1) {
                                      struct vnode *vp = (struct vnode *)uobj;
   12                                 mutex_enter(vp->v_interlock);
                                      KASSERT((vp->v_iflag & VI_PAGES) != 0);
   12                                 vp->v_iflag &= ~VI_PAGES;
                                      holdrelel(vp);
                                      mutex_exit(vp->v_interlock);
                              }
   29                         if (UVM_OBJ_IS_VTEXT(uobj)) {
    1                                 cpu_count(CPU_COUNT_EXECPAGES, -1);
                              }
   29                         cpu_count(CPU_COUNT_FILEUNKNOWN + status, -1);
                      } else {
    2                         cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
                      }
              }
   59         uobj->uo_npages--;
              pg->flags &= ~PG_TABLED;
              pg->uobject = NULL;
      }
      
      static inline void
      uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
      {
              struct vm_page *opg __unused;
      
   59         KASSERT(rw_write_held(uobj->vmobjlock));
      
   59         opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
              KASSERT(pg == opg);
      }
      
      static void
      uvm_page_init_bucket(struct pgfreelist *pgfl, struct pgflbucket *pgb, int num)
      {
              int i;
      
              pgb->pgb_nfree = 0;
              for (i = 0; i < uvmexp.ncolors; i++) {
                      LIST_INIT(&pgb->pgb_colors[i]);
              }
              pgfl->pgfl_buckets[num] = pgb;
      }
      
      /*
       * uvm_page_init: init the page system.   called from uvm_init().
       *
       * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
       */
      
      void
      uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
      {
              static struct uvm_cpu boot_cpu __cacheline_aligned;
              psize_t freepages, pagecount, bucketsize, n;
              struct pgflbucket *pgb;
              struct vm_page *pagearray;
              char *bucketarray;
              uvm_physseg_t bank;
              int fl, b;
      
              KASSERT(ncpu <= 1);
      
              /*
               * init the page queues and free page queue locks, except the
               * free list; we allocate that later (with the initial vm_page
               * structures).
               */
      
              curcpu()->ci_data.cpu_uvm = &boot_cpu;
              uvmpdpol_init();
              for (b = 0; b < __arraycount(uvm_freelist_locks); b++) {
                      mutex_init(&uvm_freelist_locks[b].lock, MUTEX_DEFAULT, IPL_VM);
              }
      
              /*
               * allocate vm_page structures.
               */
      
              /*
               * sanity check:
               * before calling this function the MD code is expected to register
               * some free RAM with the uvm_page_physload() function.   our job
               * now is to allocate vm_page structures for this memory.
               */
      
              if (uvm_physseg_get_last() == UVM_PHYSSEG_TYPE_INVALID)
                      panic("uvm_page_bootstrap: no memory pre-allocated");
      
              /*
               * first calculate the number of free pages...
               *
               * note that we use start/end rather than avail_start/avail_end.
               * this allows us to allocate extra vm_page structures in case we
               * want to return some memory to the pool after booting.
               */
      
              freepages = 0;
      
              for (bank = uvm_physseg_get_first();
                   uvm_physseg_valid_p(bank) ;
                   bank = uvm_physseg_get_next(bank)) {
                      freepages += (uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank));
              }
      
              /*
               * Let MD code initialize the number of colors, or default
               * to 1 color if MD code doesn't care.
               */
              if (uvmexp.ncolors == 0)
                      uvmexp.ncolors = 1;
              uvmexp.colormask = uvmexp.ncolors - 1;
              KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0);
      
              /* We always start with only 1 bucket. */
              uvm.bucketcount = 1;
      
              /*
               * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
               * use.   for each page of memory we use we need a vm_page structure.
               * thus, the total number of pages we can use is the total size of
               * the memory divided by the PAGE_SIZE plus the size of the vm_page
               * structure.   we add one to freepages as a fudge factor to avoid
               * truncation errors (since we can only allocate in terms of whole
               * pages).
               */
              pagecount = ((freepages + 1) << PAGE_SHIFT) /
                  (PAGE_SIZE + sizeof(struct vm_page));
              bucketsize = offsetof(struct pgflbucket, pgb_colors[uvmexp.ncolors]);
              bucketsize = roundup2(bucketsize, coherency_unit);
              bucketarray = (void *)uvm_pageboot_alloc(
                  bucketsize * VM_NFREELIST +
                  pagecount * sizeof(struct vm_page));
              pagearray = (struct vm_page *)
                  (bucketarray + bucketsize * VM_NFREELIST);
      
              for (fl = 0; fl < VM_NFREELIST; fl++) {
                      pgb = (struct pgflbucket *)(bucketarray + bucketsize * fl);
                      uvm_page_init_bucket(&uvm.page_free[fl], pgb, 0);
              }
              memset(pagearray, 0, pagecount * sizeof(struct vm_page));
      
              /*
               * init the freelist cache in the disabled state.
               */
              uvm_pgflcache_init();
      
              /*
               * init the vm_page structures and put them in the correct place.
               */
              /* First init the extent */
      
              for (bank = uvm_physseg_get_first(),
                       uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
                   uvm_physseg_valid_p(bank);
                   bank = uvm_physseg_get_next(bank)) {
      
                      n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
                      uvm_physseg_seg_alloc_from_slab(bank, n);
                      uvm_physseg_init_seg(bank, pagearray);
      
                      /* set up page array pointers */
                      pagearray += n;
                      pagecount -= n;
              }
      
              /*
               * pass up the values of virtual_space_start and
               * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
               * layers of the VM.
               */
      
              *kvm_startp = round_page(virtual_space_start);
              *kvm_endp = trunc_page(virtual_space_end);
      
              /*
               * init various thresholds.
               */
      
              uvmexp.reserve_pagedaemon = 1;
              uvmexp.reserve_kernel = vm_page_reserve_kernel;
      
              /*
               * done!
               */
      
              uvm.page_init_done = true;
      }
      
      /*
       * uvm_pgfl_lock: lock all freelist buckets
       */
      
      void
      uvm_pgfl_lock(void)
      {
              int i;
      
              for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
                      mutex_spin_enter(&uvm_freelist_locks[i].lock);
              }
      }
      
      /*
       * uvm_pgfl_unlock: unlock all freelist buckets
       */
      
      void
      uvm_pgfl_unlock(void)
      {
              int i;
      
              for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
                      mutex_spin_exit(&uvm_freelist_locks[i].lock);
              }
      }
      
      /*
       * uvm_setpagesize: set the page size
       *
       * => sets page_shift and page_mask from uvmexp.pagesize.
       */
      
      void
      uvm_setpagesize(void)
      {
      
              /*
               * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
               * to be a constant (indicated by being a non-zero value).
               */
              if (uvmexp.pagesize == 0) {
                      if (PAGE_SIZE == 0)
                              panic("uvm_setpagesize: uvmexp.pagesize not set");
                      uvmexp.pagesize = PAGE_SIZE;
              }
              uvmexp.pagemask = uvmexp.pagesize - 1;
              if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
                      panic("uvm_setpagesize: page size %u (%#x) not a power of two",
                          uvmexp.pagesize, uvmexp.pagesize);
              for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
                      if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
                              break;
      }
      
      /*
       * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
       */
      
      vaddr_t
      uvm_pageboot_alloc(vsize_t size)
      {
              static bool initialized = false;
              vaddr_t addr;
      #if !defined(PMAP_STEAL_MEMORY)
              vaddr_t vaddr;
              paddr_t paddr;
      #endif
      
              /*
               * on first call to this function, initialize ourselves.
               */
              if (initialized == false) {
                      pmap_virtual_space(&virtual_space_start, &virtual_space_end);
      
                      /* round it the way we like it */
                      virtual_space_start = round_page(virtual_space_start);
                      virtual_space_end = trunc_page(virtual_space_end);
      
                      initialized = true;
              }
      
              /* round to page size */
              size = round_page(size);
              uvmexp.bootpages += atop(size);
      
      #if defined(PMAP_STEAL_MEMORY)
      
              /*
               * defer bootstrap allocation to MD code (it may want to allocate
               * from a direct-mapped segment).  pmap_steal_memory should adjust
               * virtual_space_start/virtual_space_end if necessary.
               */
      
              addr = pmap_steal_memory(size, &virtual_space_start,
                  &virtual_space_end);
      
              return addr;
      
      #else /* !PMAP_STEAL_MEMORY */
      
              /*
               * allocate virtual memory for this request
               */
              if (virtual_space_start == virtual_space_end ||
                  (virtual_space_end - virtual_space_start) < size)
                      panic("uvm_pageboot_alloc: out of virtual space");
      
              addr = virtual_space_start;
      
      #ifdef PMAP_GROWKERNEL
              /*
               * If the kernel pmap can't map the requested space,
               * then allocate more resources for it.
               */
              if (uvm_maxkaddr < (addr + size)) {
                      uvm_maxkaddr = pmap_growkernel(addr + size);
                      if (uvm_maxkaddr < (addr + size))
                              panic("uvm_pageboot_alloc: pmap_growkernel() failed");
              }
      #endif
      
              virtual_space_start += size;
      
              /*
               * allocate and mapin physical pages to back new virtual pages
               */
      
              for (vaddr = round_page(addr) ; vaddr < addr + size ;
                  vaddr += PAGE_SIZE) {
      
                      if (!uvm_page_physget(&paddr))
                              panic("uvm_pageboot_alloc: out of memory");
      
                      /*
                       * Note this memory is no longer managed, so using
                       * pmap_kenter is safe.
                       */
                      pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
              }
              pmap_update(pmap_kernel());
              return addr;
      #endif        /* PMAP_STEAL_MEMORY */
      }
      
      #if !defined(PMAP_STEAL_MEMORY)
      /*
       * uvm_page_physget: "steal" one page from the vm_physmem structure.
       *
       * => attempt to allocate it off the end of a segment in which the "avail"
       *    values match the start/end values.   if we can't do that, then we
       *    will advance both values (making them equal, and removing some
       *    vm_page structures from the non-avail area).
       * => return false if out of memory.
       */
      
      /* subroutine: try to allocate from memory chunks on the specified freelist */
      static bool uvm_page_physget_freelist(paddr_t *, int);
      
      static bool
      uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
      {
              uvm_physseg_t lcv;
      
              /* pass 1: try allocating from a matching end */
      #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
              for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
      #else
              for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
      #endif
              {
                      if (uvm.page_init_done == true)
                              panic("uvm_page_physget: called _after_ bootstrap");
      
                      /* Try to match at front or back on unused segment */
                      if (uvm_page_physunload(lcv, freelist, paddrp))
                              return true;
              }
      
              /* pass2: forget about matching ends, just allocate something */
      #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
              for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
      #else
              for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
      #endif
              {
                      /* Try the front regardless. */
                      if (uvm_page_physunload_force(lcv, freelist, paddrp))
                              return true;
              }
              return false;
      }
      
      bool
      uvm_page_physget(paddr_t *paddrp)
      {
              int i;
      
              /* try in the order of freelist preference */
              for (i = 0; i < VM_NFREELIST; i++)
                      if (uvm_page_physget_freelist(paddrp, i) == true)
                              return (true);
              return (false);
      }
      #endif /* PMAP_STEAL_MEMORY */
      
      /*
       * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
       * back from an I/O mapping (ugh!).   used in some MD code as well.
       */
      struct vm_page *
      uvm_phys_to_vm_page(paddr_t pa)
      {
  861         paddr_t pf = atop(pa);
              paddr_t        off;
              uvm_physseg_t        upm;
      
              upm = uvm_physseg_find(pf, &off);
              if (upm != UVM_PHYSSEG_TYPE_INVALID)
  862                 return uvm_physseg_get_pg(upm, off);
              return(NULL);
      }
      
      paddr_t
      uvm_vm_page_to_phys(const struct vm_page *pg)
      {
      
 1019         return pg->phys_addr & ~(PAGE_SIZE - 1);
      }
      
      /*
       * uvm_page_numa_load: load NUMA range description.
       */
      void
      uvm_page_numa_load(paddr_t start, paddr_t size, u_int numa_id)
      {
              struct uvm_page_numa_region *d;
      
              KASSERT(numa_id < PGFL_MAX_BUCKETS);
      
              d = kmem_alloc(sizeof(*d), KM_SLEEP);
              d->start = start;
              d->size = size;
              d->numa_id = numa_id;
              d->next = uvm_page_numa_region;
              uvm_page_numa_region = d;
      }
      
      /*
       * uvm_page_numa_lookup: lookup NUMA node for the given page.
       */
      static u_int
      uvm_page_numa_lookup(struct vm_page *pg)
      {
              struct uvm_page_numa_region *d;
              static bool warned;
              paddr_t pa;
      
              KASSERT(uvm_page_numa_region != NULL);
      
              pa = VM_PAGE_TO_PHYS(pg);
              for (d = uvm_page_numa_region; d != NULL; d = d->next) {
                      if (pa >= d->start && pa < d->start + d->size) {
                              return d->numa_id;
                      }
              }
      
              if (!warned) {
                      printf("uvm_page_numa_lookup: failed, first pg=%p pa=%#"
                          PRIxPADDR "\n", pg, VM_PAGE_TO_PHYS(pg));
                      warned = true;
              }
      
              return 0;
      }
      
      /*
       * uvm_page_redim: adjust freelist dimensions if they have changed.
       */
      
      static void
      uvm_page_redim(int newncolors, int newnbuckets)
      {
              struct pgfreelist npgfl;
              struct pgflbucket *opgb, *npgb;
              struct pgflist *ohead, *nhead;
              struct vm_page *pg;
              size_t bucketsize, bucketmemsize, oldbucketmemsize;
              int fl, ob, oc, nb, nc, obuckets, ocolors;
              char *bucketarray, *oldbucketmem, *bucketmem;
      
              KASSERT(((newncolors - 1) & newncolors) == 0);
      
              /* Anything to do? */
              if (newncolors <= uvmexp.ncolors &&
                  newnbuckets == uvm.bucketcount) {
                      return;
              }
              if (uvm.page_init_done == false) {
                      uvmexp.ncolors = newncolors;
                      return;
              }
      
              bucketsize = offsetof(struct pgflbucket, pgb_colors[newncolors]);
              bucketsize = roundup2(bucketsize, coherency_unit);
              bucketmemsize = bucketsize * newnbuckets * VM_NFREELIST +
                  coherency_unit - 1;
              bucketmem = kmem_zalloc(bucketmemsize, KM_SLEEP);
              bucketarray = (char *)roundup2((uintptr_t)bucketmem, coherency_unit);
      
              ocolors = uvmexp.ncolors;
              obuckets = uvm.bucketcount;
      
              /* Freelist cache musn't be enabled. */
              uvm_pgflcache_pause();
      
              /* Make sure we should still do this. */
              uvm_pgfl_lock();
              if (newncolors <= uvmexp.ncolors &&
                  newnbuckets == uvm.bucketcount) {
                      uvm_pgfl_unlock();
                      uvm_pgflcache_resume();
                      kmem_free(bucketmem, bucketmemsize);
                      return;
              }
      
              uvmexp.ncolors = newncolors;
              uvmexp.colormask = uvmexp.ncolors - 1;
              uvm.bucketcount = newnbuckets;
      
              for (fl = 0; fl < VM_NFREELIST; fl++) {
                      /* Init new buckets in new freelist. */
                      memset(&npgfl, 0, sizeof(npgfl));
                      for (nb = 0; nb < newnbuckets; nb++) {
                              npgb = (struct pgflbucket *)bucketarray;
                              uvm_page_init_bucket(&npgfl, npgb, nb);
                              bucketarray += bucketsize;
                      }
                      /* Now transfer pages from the old freelist. */
                      for (nb = ob = 0; ob < obuckets; ob++) {
                              opgb = uvm.page_free[fl].pgfl_buckets[ob];
                              for (oc = 0; oc < ocolors; oc++) {
                                      ohead = &opgb->pgb_colors[oc];
                                      while ((pg = LIST_FIRST(ohead)) != NULL) {
                                              LIST_REMOVE(pg, pageq.list);
                                              /*
                                               * Here we decide on the NEW color &
                                               * bucket for the page.  For NUMA
                                               * we'll use the info that the
                                               * hardware gave us.  For non-NUMA
                                               * assign take physical page frame
                                               * number and cache color into
                                               * account.  We do this to try and
                                               * avoid defeating any memory
                                               * interleaving in the hardware.
                                               */
                                              KASSERT(
                                                  uvm_page_get_bucket(pg) == ob);
                                              KASSERT(fl ==
                                                  uvm_page_get_freelist(pg));
                                              if (uvm_page_numa_region != NULL) {
                                                      nb = uvm_page_numa_lookup(pg);
                                              } else {
                                                      nb = atop(VM_PAGE_TO_PHYS(pg))
                                                          / uvmexp.ncolors / 8
                                                          % newnbuckets;
                                              }
                                              uvm_page_set_bucket(pg, nb);
                                              npgb = npgfl.pgfl_buckets[nb];
                                              npgb->pgb_nfree++;
                                              nc = VM_PGCOLOR(pg);
                                              nhead = &npgb->pgb_colors[nc];
                                              LIST_INSERT_HEAD(nhead, pg, pageq.list);
                                      }
                              }
                      }
                      /* Install the new freelist. */
                      memcpy(&uvm.page_free[fl], &npgfl, sizeof(npgfl));
              }
      
              /* Unlock and free the old memory. */
              oldbucketmemsize = recolored_pages_memsize;
              oldbucketmem = recolored_pages_mem;
              recolored_pages_memsize = bucketmemsize;
              recolored_pages_mem = bucketmem;
      
              uvm_pgfl_unlock();
              uvm_pgflcache_resume();
      
              if (oldbucketmemsize) {
                      kmem_free(oldbucketmem, oldbucketmemsize);
              }
      
              /*
               * this calls uvm_km_alloc() which may want to hold
               * uvm_freelist_lock.
               */
              uvm_pager_realloc_emerg();
      }
      
      /*
       * uvm_page_recolor: Recolor the pages if the new color count is
       * larger than the old one.
       */
      
      void
      uvm_page_recolor(int newncolors)
      {
      
              uvm_page_redim(newncolors, uvm.bucketcount);
      }
      
      /*
       * uvm_page_rebucket: Determine a bucket structure and redim the free
       * lists to match.
       */
      
      void
      uvm_page_rebucket(void)
      {
              u_int min_numa, max_numa, npackage, shift;
              struct cpu_info *ci, *ci2, *ci3;
              CPU_INFO_ITERATOR cii;
      
              /*
               * If we have more than one NUMA node, and the maximum NUMA node ID
               * is less than PGFL_MAX_BUCKETS, then we'll use NUMA distribution
               * for free pages.
               */
              min_numa = (u_int)-1;
              max_numa = 0;
              for (CPU_INFO_FOREACH(cii, ci)) {
                      if (ci->ci_numa_id < min_numa) {
                              min_numa = ci->ci_numa_id;
                      }
                      if (ci->ci_numa_id > max_numa) {
                              max_numa = ci->ci_numa_id;
                      }
              }
              if (min_numa != max_numa && max_numa < PGFL_MAX_BUCKETS) {
                      aprint_debug("UVM: using NUMA allocation scheme\n");
                      for (CPU_INFO_FOREACH(cii, ci)) {
                              ci->ci_data.cpu_uvm->pgflbucket = ci->ci_numa_id;
                      }
                       uvm_page_redim(uvmexp.ncolors, max_numa + 1);
                       return;
              }
      
              /*
               * Otherwise we'll go with a scheme to maximise L2/L3 cache locality
               * and minimise lock contention.  Count the total number of CPU
               * packages, and then try to distribute the buckets among CPU
               * packages evenly.
               */
              npackage = curcpu()->ci_nsibling[CPUREL_PACKAGE1ST];
      
              /*
               * Figure out how to arrange the packages & buckets, and the total
               * number of buckets we need.  XXX 2 may not be the best factor.
               */
              for (shift = 0; npackage > PGFL_MAX_BUCKETS; shift++) {
                      npackage >>= 1;
              }
               uvm_page_redim(uvmexp.ncolors, npackage);
      
               /*
                * Now tell each CPU which bucket to use.  In the outer loop, scroll
                * through all CPU packages.
                */
               npackage = 0;
              ci = curcpu();
              ci2 = ci->ci_sibling[CPUREL_PACKAGE1ST];
              do {
                      /*
                       * In the inner loop, scroll through all CPUs in the package
                       * and assign the same bucket ID.
                       */
                      ci3 = ci2;
                      do {
                              ci3->ci_data.cpu_uvm->pgflbucket = npackage >> shift;
                              ci3 = ci3->ci_sibling[CPUREL_PACKAGE];
                      } while (ci3 != ci2);
                      npackage++;
                      ci2 = ci2->ci_sibling[CPUREL_PACKAGE1ST];
              } while (ci2 != ci->ci_sibling[CPUREL_PACKAGE1ST]);
      
              aprint_debug("UVM: using package allocation scheme, "
                  "%d package(s) per bucket\n", 1 << shift);
      }
      
      /*
       * uvm_cpu_attach: initialize per-CPU data structures.
       */
      
      void
      uvm_cpu_attach(struct cpu_info *ci)
      {
              struct uvm_cpu *ucpu;
      
              /* Already done in uvm_page_init(). */
              if (!CPU_IS_PRIMARY(ci)) {
                      /* Add more reserve pages for this CPU. */
                      uvmexp.reserve_kernel += vm_page_reserve_kernel;
      
                      /* Allocate per-CPU data structures. */
                      ucpu = kmem_zalloc(sizeof(struct uvm_cpu) + coherency_unit - 1,
                          KM_SLEEP);
                      ucpu = (struct uvm_cpu *)roundup2((uintptr_t)ucpu,
                          coherency_unit);
                      ci->ci_data.cpu_uvm = ucpu;
              } else {
                      ucpu = ci->ci_data.cpu_uvm;
              }
      
              uvmpdpol_init_cpu(ucpu);
      
              /*
               * Attach RNG source for this CPU's VM events
               */
              rnd_attach_source(&ucpu->rs, ci->ci_data.cpu_name, RND_TYPE_VM,
                  RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE|
                  RND_FLAG_ESTIMATE_VALUE);
      }
      
      /*
       * uvm_availmem: fetch the total amount of free memory in pages.  this can
       * have a detrimental effect on performance due to false sharing; don't call
       * unless needed.
       *
       * some users can request the amount of free memory so often that it begins
       * to impact upon performance.  if calling frequently and an inexact value
       * is okay, call with cached = true.
       */
      
      int
      uvm_availmem(bool cached)
      {
              int64_t fp;
      
    4         cpu_count_sync(cached);
              if ((fp = cpu_count_get(CPU_COUNT_FREEPAGES)) < 0) {
                      /*
                       * XXXAD could briefly go negative because it's impossible
                       * to get a clean snapshot.  address this for other counters
                       * used as running totals before NetBSD 10 although less
                       * important for those.
                       */
                      fp = 0;
              }
              return (int)fp;
      }
      
      /*
       * uvm_pagealloc_pgb: helper routine that tries to allocate any color from a
       * specific freelist and specific bucket only.
       *
       * => must be at IPL_VM or higher to protect per-CPU data structures.
       */
      
      static struct vm_page *
      uvm_pagealloc_pgb(struct uvm_cpu *ucpu, int f, int b, int *trycolorp, int flags)
      {
              int c, trycolor, colormask;
              struct pgflbucket *pgb;
              struct vm_page *pg;
              kmutex_t *lock;
              bool fill;
      
              /*
               * Skip the bucket if empty, no lock needed.  There could be many
               * empty freelists/buckets.
               */
  481         pgb = uvm.page_free[f].pgfl_buckets[b];
              if (pgb->pgb_nfree == 0) {
                      return NULL;
              }
      
              /* Skip bucket if low on memory. */
  482         lock = &uvm_freelist_locks[b].lock;
              mutex_spin_enter(lock);
              if (__predict_false(pgb->pgb_nfree <= uvmexp.reserve_kernel)) {
                      if ((flags & UVM_PGA_USERESERVE) == 0 ||
                          (pgb->pgb_nfree <= uvmexp.reserve_pagedaemon &&
                           curlwp != uvm.pagedaemon_lwp)) {
                              mutex_spin_exit(lock);
                                   return NULL;
                      }
                      fill = false;
              } else {
                      fill = true;
              }
      
              /* Try all page colors as needed. */
  481         c = trycolor = *trycolorp;
              colormask = uvmexp.colormask;
              do {
  481                 pg = LIST_FIRST(&pgb->pgb_colors[c]);
                      if (__predict_true(pg != NULL)) {
                              /*
                               * Got a free page!  PG_FREE must be cleared under
                               * lock because of uvm_pglistalloc().
                               */
  482                         LIST_REMOVE(pg, pageq.list);
                              KASSERT(pg->flags == PG_FREE);
  482                         pg->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
                              pgb->pgb_nfree--;
  482                         CPU_COUNT(CPU_COUNT_FREEPAGES, -1);
      
                              /*
                               * While we have the bucket locked and our data
                               * structures fresh in L1 cache, we have an ideal
                               * opportunity to grab some pages for the freelist
                               * cache without causing extra contention.  Only do
                               * so if we found pages in this CPU's preferred
                               * bucket.
                               */
  482                         if (__predict_true(b == ucpu->pgflbucket && fill)) {
  480                                 uvm_pgflcache_fill(ucpu, f, b, c);
                              }
  482                         mutex_spin_exit(lock);
                              KASSERT(uvm_page_get_bucket(pg) == b);
  482                         CPU_COUNT(c == trycolor ?
                                  CPU_COUNT_COLORHIT : CPU_COUNT_COLORMISS, 1);
  482                         CPU_COUNT(CPU_COUNT_CPUMISS, 1);
                              *trycolorp = c;
  481                         return pg;
                      }
                      c = (c + 1) & colormask;
              } while (c != trycolor);
              mutex_spin_exit(lock);
      
              return NULL;
      }
      
      /*
       * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat that allocates
       * any color from any bucket, in a specific freelist.
       *
       * => must be at IPL_VM or higher to protect per-CPU data structures.
       */
      
      static struct vm_page *
      uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int f, int *trycolorp, int flags)
      {
              int b, trybucket, bucketcount;
              struct vm_page *pg;
      
              /* Try for the exact thing in the per-CPU cache. */
  817         if ((pg = uvm_pgflcache_alloc(ucpu, f, *trycolorp)) != NULL) {
  802                 CPU_COUNT(CPU_COUNT_CPUHIT, 1);
  801                 CPU_COUNT(CPU_COUNT_COLORHIT, 1);
                      return pg;
              }
      
              /* Walk through all buckets, trying our preferred bucket first. */
  481         trybucket = ucpu->pgflbucket;
              b = trybucket;
              bucketcount = uvm.bucketcount;
              do {
  481                 pg = uvm_pagealloc_pgb(ucpu, f, b, trycolorp, flags);
  816                 if (pg != NULL) {
                              return pg;
                      }
                      b = (b + 1 == bucketcount ? 0 : b + 1);
              } while (b != trybucket);
      
              return NULL;
      }
      
      /*
       * uvm_pagealloc_strat: allocate vm_page from a particular free list.
       *
       * => return null if no pages free
       * => wake up pagedaemon if number of free pages drops below low water mark
       * => if obj != NULL, obj must be locked (to put in obj's tree)
       * => if anon != NULL, anon must be locked (to put in anon)
       * => only one of obj or anon can be non-null
       * => caller must activate/deactivate page if it is not wired.
       * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
       * => policy decision: it is more important to pull a page off of the
       *        appropriate priority free list than it is to get a page from the
       *        correct bucket or color bin.  This is because we live with the
       *        consequences of a bad free list decision for the entire
       *        lifetime of the page, e.g. if the page comes from memory that
       *        is slower to access.
       */
      
      struct vm_page *
      uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
          int flags, int strat, int free_list)
      {
  817         int color, lcv, error, s;
              struct uvm_cpu *ucpu;
              struct vm_page *pg;
              lwp_t *l;
      
  355         KASSERT(obj == NULL || anon == NULL);
  784         KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
  816         KASSERT(off == trunc_page(off));
  621         KASSERT(obj == NULL || rw_write_held(obj->vmobjlock));
  817         KASSERT(anon == NULL || anon->an_lock == NULL ||
                  rw_write_held(anon->an_lock));
      
              /*
               * This implements a global round-robin page coloring
               * algorithm.
               */
      
  815         s = splvm();
              ucpu = curcpu()->ci_data.cpu_uvm;
              if (flags & UVM_FLAG_COLORMATCH) {
  817                 color = atop(off) & uvmexp.colormask;
              } else {
  311                 color = ucpu->pgflcolor;
              }
      
              /*
               * fail if any of these conditions is true:
               * [1]  there really are no free pages, or
               * [2]  only kernel "reserved" pages remain and
               *        reserved pages have not been requested.
               * [3]  only pagedaemon "reserved" pages remain and
               *        the requestor isn't the pagedaemon.
               * we make kernel reserve pages available if called by a
               * kernel thread.
               */
              l = curlwp;
  817         if (__predict_true(l != NULL) && (l->l_flag & LW_SYSTEM) != 0) {
    3                 flags |= UVM_PGA_USERESERVE;
              }
      
  816  again:
              switch (strat) {
              case UVM_PGA_STRAT_NORMAL:
                      /* Check freelists: descending priority (ascending id) order. */
                      for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
  817                         pg = uvm_pagealloc_pgfl(ucpu, lcv, &color, flags);
                              if (pg != NULL) {
                                      goto gotit;
                              }
                      }
      
                      /* No pages free!  Have pagedaemon free some memory. */
                      splx(s);
                      uvm_kick_pdaemon();
                      return NULL;
      
              case UVM_PGA_STRAT_ONLY:
              case UVM_PGA_STRAT_FALLBACK:
                      /* Attempt to allocate from the specified free list. */
                      KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
                      pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags);
                      if (pg != NULL) {
                              goto gotit;
                      }
      
                      /* Fall back, if possible. */
                      if (strat == UVM_PGA_STRAT_FALLBACK) {
                              strat = UVM_PGA_STRAT_NORMAL;
                              goto again;
                      }
      
                      /* No pages free!  Have pagedaemon free some memory. */
                      splx(s);
                      uvm_kick_pdaemon();
  815                 return NULL;
      
              case UVM_PGA_STRAT_NUMA:
                      /*
                       * NUMA strategy (experimental): allocating from the correct
                       * bucket is more important than observing freelist
                       * priority.  Look only to the current NUMA node; if that
                       * fails, we need to look to other NUMA nodes, so retry with
                       * the normal strategy.
                       */
                      for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
                              pg = uvm_pgflcache_alloc(ucpu, lcv, color);
                              if (pg != NULL) {
                                      CPU_COUNT(CPU_COUNT_CPUHIT, 1);
                                      CPU_COUNT(CPU_COUNT_COLORHIT, 1);
                                      goto gotit;
                              }
                              pg = uvm_pagealloc_pgb(ucpu, lcv,
                                  ucpu->pgflbucket, &color, flags);
                              if (pg != NULL) {
                                      goto gotit;
                              }
                      }
                      strat = UVM_PGA_STRAT_NORMAL;
                      goto again;
      
              default:
                      panic("uvm_pagealloc_strat: bad strat %d", strat);
                      /* NOTREACHED */
              }
      
  816  gotit:
              /*
               * We now know which color we actually allocated from; set
               * the next color accordingly.
               */
      
              ucpu->pgflcolor = (color + 1) & uvmexp.colormask;
      
              /*
               * while still at IPL_VM, update allocation statistics.
               */
      
              if (anon) {
  571                 CPU_COUNT(CPU_COUNT_ANONCLEAN, 1);
              }
  642         splx(s);
              KASSERT(pg->flags == (PG_BUSY|PG_CLEAN|PG_FAKE));
      
              /*
               * assign the page to the object.  as the page was free, we know
               * that pg->uobject and pg->uanon are NULL.  we only need to take
               * the page's interlock if we are changing the values.
               */
  642         if (anon != NULL || obj != NULL) {
  621                 mutex_enter(&pg->interlock);
              }
  388         pg->offset = off;
              pg->uobject = obj;
              pg->uanon = anon;
              KASSERT(uvm_page_owner_locked_p(pg, true));
  355         if (anon) {
  569                 anon->an_page = pg;
                      pg->flags |= PG_ANON;
                      mutex_exit(&pg->interlock);
  355         } else if (obj) {
                      /*
                       * set PG_FILE|PG_AOBJ before the first uvm_pageinsert.
                       */
  356                 if (UVM_OBJ_IS_VNODE(obj)) {
   54                         pg->flags |= PG_FILE;
  306                 } else if (UVM_OBJ_IS_AOBJ(obj)) {
   21                         pg->flags |= PG_AOBJ;
                      }
  356                 uvm_pageinsert_object(obj, pg);
                      mutex_exit(&pg->interlock);
  356                 error = uvm_pageinsert_tree(obj, pg);
                      if (error != 0) {
                              mutex_enter(&pg->interlock);
                              uvm_pageremove_object(obj, pg);
                              mutex_exit(&pg->interlock);
                              uvm_pagefree(pg);
                              return NULL;
                      }
              }
      
      #if defined(UVM_PAGE_TRKOWN)
              pg->owner_tag = NULL;
      #endif
              UVM_PAGE_OWN(pg, "new alloc");
      
  644         if (flags & UVM_PGA_ZERO) {
                      /* A zero'd page is not clean. */
                      if (obj != NULL || anon != NULL) {
  496                         uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
                      }
  499                 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
              }
      
              return(pg);
      }
      
      /*
       * uvm_pagereplace: replace a page with another
       *
       * => object must be locked
       * => page interlocks must be held
       */
      
      void
      uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
      {
              struct uvm_object *uobj = oldpg->uobject;
              struct vm_page *pg __diagused;
              uint64_t idx;
      
              KASSERT((oldpg->flags & PG_TABLED) != 0);
              KASSERT(uobj != NULL);
              KASSERT((newpg->flags & PG_TABLED) == 0);
              KASSERT(newpg->uobject == NULL);
              KASSERT(rw_write_held(uobj->vmobjlock));
              KASSERT(mutex_owned(&oldpg->interlock));
              KASSERT(mutex_owned(&newpg->interlock));
      
              newpg->uobject = uobj;
              newpg->offset = oldpg->offset;
              idx = newpg->offset >> PAGE_SHIFT;
              pg = radix_tree_replace_node(&uobj->uo_pages, idx, newpg);
              KASSERT(pg == oldpg);
              if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) {
                      if ((newpg->flags & PG_CLEAN) != 0) {
                              uvm_obj_page_clear_dirty(newpg);
                      } else {
                              uvm_obj_page_set_dirty(newpg);
                      }
              }
              /*
               * oldpg's PG_STAT is stable.  newpg is not reachable by others yet.
               */
              newpg->flags |=
                  (newpg->flags & ~PG_STAT) | (oldpg->flags & PG_STAT);
              uvm_pageinsert_object(uobj, newpg);
              uvm_pageremove_object(uobj, oldpg);
      }
      
      /*
       * uvm_pagerealloc: reallocate a page from one object to another
       *
       * => both objects must be locked
       */
      
      int
      uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
      {
              int error = 0;
      
              /*
               * remove it from the old object
               */
      
   30         if (pg->uobject) {
   30                 uvm_pageremove_tree(pg->uobject, pg);
   30                 uvm_pageremove_object(pg->uobject, pg);
              }
      
              /*
               * put it in the new object
               */
      
   30         if (newobj) {
                      mutex_enter(&pg->interlock);
                      pg->uobject = newobj;
                      pg->offset = newoff;
                      if (UVM_OBJ_IS_VNODE(newobj)) {
                              pg->flags |= PG_FILE;
                      } else if (UVM_OBJ_IS_AOBJ(newobj)) {
                              pg->flags |= PG_AOBJ;
                      }
                      uvm_pageinsert_object(newobj, pg);
                      mutex_exit(&pg->interlock);
                      error = uvm_pageinsert_tree(newobj, pg);
                      if (error != 0) {
                              mutex_enter(&pg->interlock);
                              uvm_pageremove_object(newobj, pg);
                              mutex_exit(&pg->interlock);
                      }
              }
      
   30         return error;
      }
      
      /*
       * uvm_pagefree: free page
       *
       * => erase page's identity (i.e. remove from object)
       * => put page on free list
       * => caller must lock owning object (either anon or uvm_object)
       * => assumes all valid mappings of pg are gone
       */
      
      void
      uvm_pagefree(struct vm_page *pg)
      {
              struct pgfreelist *pgfl;
              struct pgflbucket *pgb;
              struct uvm_cpu *ucpu;
              kmutex_t *lock;
              int bucket, s;
              bool locked;
      
      #ifdef DEBUG
  160         if (pg->uobject == (void *)0xdeadbeef &&
                  pg->uanon == (void *)0xdeadbeef) {
                      panic("uvm_pagefree: freeing free page %p", pg);
              }
      #endif /* DEBUG */
      
  161         KASSERT((pg->flags & PG_PAGEOUT) == 0);
  160         KASSERT(!(pg->flags & PG_FREE));
  160         KASSERT(pg->uobject == NULL || rw_write_held(pg->uobject->vmobjlock));
  161         KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
                      rw_write_held(pg->uanon->an_lock));
      
              /*
               * remove the page from the object's tree before acquiring any page
               * interlocks: this can acquire locks to free radixtree nodes.
               */
   35         if (pg->uobject != NULL) {
   31                 uvm_pageremove_tree(pg->uobject, pg);
              }
      
              /*
               * if the page is loaned, resolve the loan instead of freeing.
               */
      
  160         if (pg->loan_count) {
                      KASSERT(pg->wire_count == 0);
      
                      /*
                       * if the page is owned by an anon then we just want to
                       * drop anon ownership.  the kernel will free the page when
                       * it is done with it.  if the page is owned by an object,
                       * remove it from the object and mark it dirty for the benefit
                       * of possible anon owners.
                       *
                       * regardless of previous ownership, wakeup any waiters,
                       * unbusy the page, and we're done.
                       */
      
                      uvm_pagelock(pg);
                      locked = true;
                      if (pg->uobject != NULL) {
                              uvm_pageremove_object(pg->uobject, pg);
                              pg->flags &= ~(PG_FILE|PG_AOBJ);
                      } else if (pg->uanon != NULL) {
                              if ((pg->flags & PG_ANON) == 0) {
                                      pg->loan_count--;
                              } else {
                                      const unsigned status = uvm_pagegetdirty(pg);
                                      pg->flags &= ~PG_ANON;
                                      cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
                              }
                              pg->uanon->an_page = NULL;
                              pg->uanon = NULL;
                      }
                      if (pg->pqflags & PQ_WANTED) {
                              wakeup(pg);
                      }
                      pg->pqflags &= ~PQ_WANTED;
                      pg->flags &= ~(PG_BUSY|PG_RELEASED|PG_PAGER1);
      #ifdef UVM_PAGE_TRKOWN
                      pg->owner_tag = NULL;
      #endif
                      KASSERT((pg->flags & PG_STAT) == 0);
                      if (pg->loan_count) {
                              KASSERT(pg->uobject == NULL);
                              if (pg->uanon == NULL) {
                                      uvm_pagedequeue(pg);
                              }
                              uvm_pageunlock(pg);
                              return;
                      }
  161         } else if (pg->uobject != NULL || pg->uanon != NULL ||
  125                    pg->wire_count != 0) {
   62                 uvm_pagelock(pg);
                      locked = true;
              } else {
   35                 locked = false;
              }
      
              /*
               * remove page from its object or anon.
               */
   63         if (pg->uobject != NULL) {
   31                 uvm_pageremove_object(pg->uobject, pg);
  137         } else if (pg->uanon != NULL) {
   35                 const unsigned int status = uvm_pagegetdirty(pg);
                      pg->uanon->an_page = NULL;
                      pg->uanon = NULL;
                      cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
              }
      
              /*
               * if the page was wired, unwire it now.
               */
      
  161         if (pg->wire_count) {
    2                 pg->wire_count = 0;
                      atomic_dec_uint(&uvmexp.wired);
              }
  160         if (locked) {
                      /*
                       * wake anyone waiting on the page.
                       */
   63                 if ((pg->pqflags & PQ_WANTED) != 0) {
                              pg->pqflags &= ~PQ_WANTED;
                              wakeup(pg);
                      }
      
                      /*
                       * now remove the page from the queues.
                       */
   63                 uvm_pagedequeue(pg);
                      uvm_pageunlock(pg);
              } else {
  125                 KASSERT(!uvmpdpol_pageisqueued_p(pg));
              }
      
              /*
               * and put on free queue
               */
      
      #ifdef DEBUG
  160         pg->uobject = (void *)0xdeadbeef;
              pg->uanon = (void *)0xdeadbeef;
      #endif /* DEBUG */
      
              /* Try to send the page to the per-CPU cache. */
              s = splvm();
              ucpu = curcpu()->ci_data.cpu_uvm;
              bucket = uvm_page_get_bucket(pg);
  161         if (bucket == ucpu->pgflbucket && uvm_pgflcache_free(ucpu, pg)) {
  161                 splx(s);
                      return;
              }
      
              /* Didn't work.  Never mind, send it to a global bucket. */
              pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
              pgb = pgfl->pgfl_buckets[bucket];
              lock = &uvm_freelist_locks[bucket].lock;
      
              mutex_spin_enter(lock);
              /* PG_FREE must be set under lock because of uvm_pglistalloc(). */
              pg->flags = PG_FREE;
              LIST_INSERT_HEAD(&pgb->pgb_colors[VM_PGCOLOR(pg)], pg, pageq.list);
              pgb->pgb_nfree++;
                  CPU_COUNT(CPU_COUNT_FREEPAGES, 1);
              mutex_spin_exit(lock);
              splx(s);
      }
      
      /*
       * uvm_page_unbusy: unbusy an array of pages.
       *
       * => pages must either all belong to the same object, or all belong to anons.
       * => if pages are object-owned, object must be locked.
       * => if pages are anon-owned, anons must be locked.
       * => caller must make sure that anon-owned pages are not PG_RELEASED.
       */
      
      void
      uvm_page_unbusy(struct vm_page **pgs, int npgs)
      {
              struct vm_page *pg;
              int i, pageout_done;
              UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
      
              pageout_done = 0;
   61         for (i = 0; i < npgs; i++) {
   61                 pg = pgs[i];
                      if (pg == NULL || pg == PGO_DONTCARE) {
                              continue;
                      }
      
   60                 KASSERT(uvm_page_owner_locked_p(pg, true));
   61                 KASSERT(pg->flags & PG_BUSY);
      
   60                 if (pg->flags & PG_PAGEOUT) {
                              pg->flags &= ~PG_PAGEOUT;
                              pg->flags |= PG_RELEASED;
                              pageout_done++;
                              atomic_inc_uint(&uvmexp.pdfreed);
                      }
   61                 if (pg->flags & PG_RELEASED) {
                              UVMHIST_LOG(ubchist, "releasing pg %#jx",
                                  (uintptr_t)pg, 0, 0, 0);
   11                         KASSERT(pg->uobject != NULL ||
                                  (pg->uanon != NULL && pg->uanon->an_ref > 0));
   11                         pg->flags &= ~PG_RELEASED;
                              uvm_pagefree(pg);
                      } else {
                              UVMHIST_LOG(ubchist, "unbusying pg %#jx",
                                  (uintptr_t)pg, 0, 0, 0);
   60                         KASSERT((pg->flags & PG_FAKE) == 0);
   60                         pg->flags &= ~PG_BUSY;
                              uvm_pagelock(pg);
                              uvm_pagewakeup(pg);
                              uvm_pageunlock(pg);
                              UVM_PAGE_OWN(pg, NULL);
                      }
              }
   61         if (pageout_done != 0) {
                      uvm_pageout_done(pageout_done);
              }
      }
      
      /*
       * uvm_pagewait: wait for a busy page
       *
       * => page must be known PG_BUSY
       * => object must be read or write locked
       * => object will be unlocked on return
       */
      
      void
      uvm_pagewait(struct vm_page *pg, krwlock_t *lock, const char *wmesg)
      {
      
              KASSERT(rw_lock_held(lock));
              KASSERT((pg->flags & PG_BUSY) != 0);
              KASSERT(uvm_page_owner_locked_p(pg, false));
      
              mutex_enter(&pg->interlock);
              pg->pqflags |= PQ_WANTED;
              rw_exit(lock);
              UVM_UNLOCK_AND_WAIT(pg, &pg->interlock, false, wmesg, 0);
      }
      
      /*
       * uvm_pagewakeup: wake anyone waiting on a page
       *
       * => page interlock must be held
       */
      
      void
      uvm_pagewakeup(struct vm_page *pg)
      {
              UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
      
   77         KASSERT(mutex_owned(&pg->interlock));
      
              UVMHIST_LOG(ubchist, "waking pg %#jx", (uintptr_t)pg, 0, 0, 0);
      
   77         if ((pg->pqflags & PQ_WANTED) != 0) {
                      wakeup(pg);
                      pg->pqflags &= ~PQ_WANTED;
              }
      }
      
      /*
       * uvm_pagewanted_p: return true if someone is waiting on the page
       *
       * => object must be write locked (lock out all concurrent access)
       */
      
      bool
      uvm_pagewanted_p(struct vm_page *pg)
      {
      
              KASSERT(uvm_page_owner_locked_p(pg, true));
      
              return (atomic_load_relaxed(&pg->pqflags) & PQ_WANTED) != 0;
      }
      
      #if defined(UVM_PAGE_TRKOWN)
      /*
       * uvm_page_own: set or release page ownership
       *
       * => this is a debugging function that keeps track of who sets PG_BUSY
       *        and where they do it.   it can be used to track down problems
       *        such a process setting "PG_BUSY" and never releasing it.
       * => page's object [if any] must be locked
       * => if "tag" is NULL then we are releasing page ownership
       */
      void
      uvm_page_own(struct vm_page *pg, const char *tag)
      {
      
              KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
              KASSERT(uvm_page_owner_locked_p(pg, true));
      
              /* gain ownership? */
              if (tag) {
                      KASSERT((pg->flags & PG_BUSY) != 0);
                      if (pg->owner_tag) {
                              printf("uvm_page_own: page %p already owned "
                                  "by proc %d.%d [%s]\n", pg,
                                  pg->owner, pg->lowner, pg->owner_tag);
                              panic("uvm_page_own");
                      }
                      pg->owner = curproc->p_pid;
                      pg->lowner = curlwp->l_lid;
                      pg->owner_tag = tag;
                      return;
              }
      
              /* drop ownership */
              KASSERT((pg->flags & PG_BUSY) == 0);
              if (pg->owner_tag == NULL) {
                      printf("uvm_page_own: dropping ownership of an non-owned "
                          "page (%p)\n", pg);
                      panic("uvm_page_own");
              }
              pg->owner_tag = NULL;
      }
      #endif
      
      /*
       * uvm_pagelookup: look up a page
       *
       * => caller should lock object to keep someone from pulling the page
       *        out from under it
       */
      
      struct vm_page *
      uvm_pagelookup(struct uvm_object *obj, voff_t off)
      {
              struct vm_page *pg;
              bool ddb __diagused = false;
      #ifdef DDB
              extern int db_active;
  803         ddb = db_active != 0;
      #endif
      
  806         KASSERT(ddb || rw_lock_held(obj->vmobjlock));
      
  804         pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);
      
  724         KASSERT(pg == NULL || obj->uo_npages != 0);
  725         KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
                      (pg->flags & PG_BUSY) != 0);
  805         return pg;
      }
      
      /*
       * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
       *
       * => caller must lock objects
       * => caller must hold pg->interlock
       */
      
      void
      uvm_pagewire(struct vm_page *pg)
      {
      
   98         KASSERT(uvm_page_owner_locked_p(pg, true));
   98         KASSERT(mutex_owned(&pg->interlock));
      #if defined(READAHEAD_STATS)
              if ((pg->flags & PG_READAHEAD) != 0) {
                      uvm_ra_hit.ev_count++;
                      pg->flags &= ~PG_READAHEAD;
              }
      #endif /* defined(READAHEAD_STATS) */
   97         if (pg->wire_count == 0) {
   96                 uvm_pagedequeue(pg);
                      atomic_inc_uint(&uvmexp.wired);
              }
   98         pg->wire_count++;
              KASSERT(pg->wire_count > 0);        /* detect wraparound */
      }
      
      /*
       * uvm_pageunwire: unwire the page.
       *
       * => activate if wire count goes to zero.
       * => caller must lock objects
       * => caller must hold pg->interlock
       */
      
      void
      uvm_pageunwire(struct vm_page *pg)
      {
      
   39         KASSERT(uvm_page_owner_locked_p(pg, true));
   39         KASSERT(pg->wire_count != 0);
   39         KASSERT(!uvmpdpol_pageisqueued_p(pg));
   39         KASSERT(mutex_owned(&pg->interlock));
   39         pg->wire_count--;
              if (pg->wire_count == 0) {
   38                 uvm_pageactivate(pg);
                      KASSERT(uvmexp.wired != 0);
   38                 atomic_dec_uint(&uvmexp.wired);
              }
      }
      
      /*
       * uvm_pagedeactivate: deactivate page
       *
       * => caller must lock objects
       * => caller must check to make sure page is not wired
       * => object that page belongs to must be locked (so we can adjust pg->flags)
       * => caller must clear the reference on the page before calling
       * => caller must hold pg->interlock
       */
      
      void
      uvm_pagedeactivate(struct vm_page *pg)
      {
      
   14         KASSERT(uvm_page_owner_locked_p(pg, false));
   14         KASSERT(mutex_owned(&pg->interlock));
   14         if (pg->wire_count == 0) {
   13                 KASSERT(uvmpdpol_pageisqueued_p(pg));
   13                 uvmpdpol_pagedeactivate(pg);
              }
      }
      
      /*
       * uvm_pageactivate: activate page
       *
       * => caller must lock objects
       * => caller must hold pg->interlock
       */
      
      void
      uvm_pageactivate(struct vm_page *pg)
      {
      
  751         KASSERT(uvm_page_owner_locked_p(pg, false));
  751         KASSERT(mutex_owned(&pg->interlock));
      #if defined(READAHEAD_STATS)
              if ((pg->flags & PG_READAHEAD) != 0) {
                      uvm_ra_hit.ev_count++;
                      pg->flags &= ~PG_READAHEAD;
              }
      #endif /* defined(READAHEAD_STATS) */
  751         if (pg->wire_count == 0) {
  741                 uvmpdpol_pageactivate(pg);
              }
      }
      
      /*
       * uvm_pagedequeue: remove a page from any paging queue
       *
       * => caller must lock objects
       * => caller must hold pg->interlock
       */
      void
      uvm_pagedequeue(struct vm_page *pg)
      {
      
  141         KASSERT(uvm_page_owner_locked_p(pg, true));
  140         KASSERT(mutex_owned(&pg->interlock));
  141         if (uvmpdpol_pageisqueued_p(pg)) {
   75                 uvmpdpol_pagedequeue(pg);
              }
      }
      
      /*
       * uvm_pageenqueue: add a page to a paging queue without activating.
       * used where a page is not really demanded (yet).  eg. read-ahead
       *
       * => caller must lock objects
       * => caller must hold pg->interlock
       */
      void
      uvm_pageenqueue(struct vm_page *pg)
      {
      
   14         KASSERT(uvm_page_owner_locked_p(pg, false));
   14         KASSERT(mutex_owned(&pg->interlock));
   14         if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) {
   11                 uvmpdpol_pageenqueue(pg);
              }
      }
      
      /*
       * uvm_pagelock: acquire page interlock
       */
      void
      uvm_pagelock(struct vm_page *pg)
      {
      
  808         mutex_enter(&pg->interlock);
      }
      
      /*
       * uvm_pagelock2: acquire two page interlocks
       */
      void
      uvm_pagelock2(struct vm_page *pg1, struct vm_page *pg2)
      {
      
              if (pg1 < pg2) {
                      mutex_enter(&pg1->interlock);
                      mutex_enter(&pg2->interlock);
              } else {
                      mutex_enter(&pg2->interlock);
                      mutex_enter(&pg1->interlock);
              }
      }
      
      /*
       * uvm_pageunlock: release page interlock, and if a page replacement intent
       * is set on the page, pass it to uvmpdpol to make real.
       *
       * => caller must hold pg->interlock
       */
      void
      uvm_pageunlock(struct vm_page *pg)
      {
      
  808         if ((pg->pqflags & PQ_INTENT_SET) == 0 ||
                  (pg->pqflags & PQ_INTENT_QUEUED) != 0) {
  463                     mutex_exit(&pg->interlock);
                          return;
              }
  748         pg->pqflags |= PQ_INTENT_QUEUED;
              mutex_exit(&pg->interlock);
              uvmpdpol_pagerealize(pg);
      }
      
      /*
       * uvm_pageunlock2: release two page interlocks, and for both pages if a
       * page replacement intent is set on the page, pass it to uvmpdpol to make
       * real.
       *
       * => caller must hold pg->interlock
       */
      void
      uvm_pageunlock2(struct vm_page *pg1, struct vm_page *pg2)
      {
      
              if ((pg1->pqflags & PQ_INTENT_SET) == 0 ||
                  (pg1->pqflags & PQ_INTENT_QUEUED) != 0) {
                          mutex_exit(&pg1->interlock);
                          pg1 = NULL;
              } else {
                      pg1->pqflags |= PQ_INTENT_QUEUED;
                      mutex_exit(&pg1->interlock);
              }
      
              if ((pg2->pqflags & PQ_INTENT_SET) == 0 ||
                  (pg2->pqflags & PQ_INTENT_QUEUED) != 0) {
                          mutex_exit(&pg2->interlock);
                          pg2 = NULL;
              } else {
                      pg2->pqflags |= PQ_INTENT_QUEUED;
                      mutex_exit(&pg2->interlock);
              }
      
              if (pg1 != NULL) {
                      uvmpdpol_pagerealize(pg1);
              }
              if (pg2 != NULL) {
                      uvmpdpol_pagerealize(pg2);
              }
      }
      
      /*
       * uvm_pagezero: zero fill a page
       *
       * => if page is part of an object then the object should be locked
       *        to protect pg->flags.
       */
      
      void
      uvm_pagezero(struct vm_page *pg)
      {
      
              uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
              pmap_zero_page(VM_PAGE_TO_PHYS(pg));
      }
      
      /*
       * uvm_pagecopy: copy a page
       *
       * => if page is part of an object then the object should be locked
       *        to protect pg->flags.
       */
      
      void
      uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
      {
      
  135         uvm_pagemarkdirty(dst, UVM_PAGE_STATUS_DIRTY);
              pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
      }
      
      /*
       * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
       */
      
      bool
      uvm_pageismanaged(paddr_t pa)
      {
      
              return (uvm_physseg_find(atop(pa), NULL) != UVM_PHYSSEG_TYPE_INVALID);
      }
      
      /*
       * uvm_page_lookup_freelist: look up the free list for the specified page
       */
      
      int
      uvm_page_lookup_freelist(struct vm_page *pg)
      {
              uvm_physseg_t upm;
      
  821         upm = uvm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
              KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
  821         return uvm_physseg_get_free_list(upm);
      }
      
      /*
       * uvm_page_owner_locked_p: return true if object associated with page is
       * locked.  this is a weak check for runtime assertions only.
       */
      
      bool
 1015 uvm_page_owner_locked_p(struct vm_page *pg, bool exclusive)
      {
      
 1014         if (pg->uobject != NULL) {
  652                 return exclusive
                          ? rw_write_held(pg->uobject->vmobjlock)
                          : rw_lock_held(pg->uobject->vmobjlock);
              }
  821         if (pg->uanon != NULL) {
  603                 return exclusive
  640                     ? rw_write_held(pg->uanon->an_lock)
 1015                     : rw_lock_held(pg->uanon->an_lock);
              }
              return true;
      }
      
      /*
       * uvm_pagereadonly_p: return if the page should be mapped read-only
       */
      
      bool
      uvm_pagereadonly_p(struct vm_page *pg)
      {
  709         struct uvm_object * const uobj = pg->uobject;
      
  435         KASSERT(uobj == NULL || rw_lock_held(uobj->vmobjlock));
  478         KASSERT(uobj != NULL || rw_lock_held(pg->uanon->an_lock));
  710         if ((pg->flags & PG_RDONLY) != 0) {
  710                 return true;
              }
  705         if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
                      return true;
              }
  510         if (uobj == NULL) {
                      return false;
              }
   54         return UVM_OBJ_NEEDS_WRITEFAULT(uobj);
      }
      
      #ifdef PMAP_DIRECT
      /*
       * Call pmap to translate physical address into a virtual and to run a callback
       * for it. Used to avoid actually mapping the pages, pmap most likely uses direct map
       * or equivalent.
       */
      int
      uvm_direct_process(struct vm_page **pgs, u_int npages, voff_t off, vsize_t len,
                  int (*process)(void *, size_t, void *), void *arg)
      {
              int error = 0;
              paddr_t pa;
              size_t todo;
              voff_t pgoff = (off & PAGE_MASK);
              struct vm_page *pg;
      
              KASSERT(npages > 0 && len > 0);
      
              for (int i = 0; i < npages; i++) {
                      pg = pgs[i];
      
                      KASSERT(len > 0);
      
                      /*
                       * Caller is responsible for ensuring all the pages are
                       * available.
                       */
                      KASSERT(pg != NULL && pg != PGO_DONTCARE);
      
                      pa = VM_PAGE_TO_PHYS(pg);
                      todo = MIN(len, PAGE_SIZE - pgoff);
      
                      error = pmap_direct_process(pa, pgoff, todo, process, arg);
                      if (error)
                              break;
      
                      pgoff = 0;
                      len -= todo;
              }
      
              KASSERTMSG(error != 0 || len == 0, "len %lu != 0 for non-error", len);
              return error;
      }
      #endif /* PMAP_DIRECT */
      
      #if defined(DDB) || defined(DEBUGPRINT)
      
      /*
       * uvm_page_printit: actually print the page
       */
      
      static const char page_flagbits[] = UVM_PGFLAGBITS;
      static const char page_pqflagbits[] = UVM_PQFLAGBITS;
      
      void
      uvm_page_printit(struct vm_page *pg, bool full,
          void (*pr)(const char *, ...))
      {
              struct vm_page *tpg;
              struct uvm_object *uobj;
              struct pgflbucket *pgb;
              struct pgflist *pgl;
              char pgbuf[128];
      
              (*pr)("PAGE %p:\n", pg);
              snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
              (*pr)("  flags=%s\n", pgbuf);
              snprintb(pgbuf, sizeof(pgbuf), page_pqflagbits, pg->pqflags);
              (*pr)("  pqflags=%s\n", pgbuf);
              (*pr)("  uobject=%p, uanon=%p, offset=0x%llx\n",
                  pg->uobject, pg->uanon, (long long)pg->offset);
              (*pr)("  loan_count=%d wire_count=%d bucket=%d freelist=%d\n",
                  pg->loan_count, pg->wire_count, uvm_page_get_bucket(pg),
                  uvm_page_get_freelist(pg));
              (*pr)("  pa=0x%lx\n", (long)VM_PAGE_TO_PHYS(pg));
      #if defined(UVM_PAGE_TRKOWN)
              if (pg->flags & PG_BUSY)
                      (*pr)("  owning process = %d.%d, tag=%s\n",
                          pg->owner, pg->lowner, pg->owner_tag);
              else
                      (*pr)("  page not busy, no owner\n");
      #else
              (*pr)("  [page ownership tracking disabled]\n");
      #endif
      
              if (!full)
                      return;
      
              /* cross-verify object/anon */
              if ((pg->flags & PG_FREE) == 0) {
                      if (pg->flags & PG_ANON) {
                              if (pg->uanon == NULL || pg->uanon->an_page != pg)
                                  (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
                                      (pg->uanon) ? pg->uanon->an_page : NULL);
                              else
                                      (*pr)("  anon backpointer is OK\n");
                      } else {
                              uobj = pg->uobject;
                              if (uobj) {
                                      (*pr)("  checking object list\n");
                                      tpg = uvm_pagelookup(uobj, pg->offset);
                                      if (tpg)
                                              (*pr)("  page found on object list\n");
                                      else
                              (*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
                              }
                      }
              }
      
              /* cross-verify page queue */
              if (pg->flags & PG_FREE) {
                      int fl = uvm_page_get_freelist(pg);
                      int b = uvm_page_get_bucket(pg);
                      pgb = uvm.page_free[fl].pgfl_buckets[b];
                      pgl = &pgb->pgb_colors[VM_PGCOLOR(pg)];
                      (*pr)("  checking pageq list\n");
                      LIST_FOREACH(tpg, pgl, pageq.list) {
                              if (tpg == pg) {
                                      break;
                              }
                      }
                      if (tpg)
                              (*pr)("  page found on pageq list\n");
                      else
                              (*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
              }
      }
      
      /*
       * uvm_page_printall - print a summary of all managed pages
       */
      
      void
      uvm_page_printall(void (*pr)(const char *, ...))
      {
              uvm_physseg_t i;
              paddr_t pfn;
              struct vm_page *pg;
      
              (*pr)("%18s %4s %4s %18s %18s"
      #ifdef UVM_PAGE_TRKOWN
                  " OWNER"
      #endif
                  "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
              for (i = uvm_physseg_get_first();
                   uvm_physseg_valid_p(i);
                   i = uvm_physseg_get_next(i)) {
                      for (pfn = uvm_physseg_get_start(i);
                           pfn < uvm_physseg_get_end(i);
                           pfn++) {
                              pg = PHYS_TO_VM_PAGE(ptoa(pfn));
      
                              (*pr)("%18p %04x %08x %18p %18p",
                                  pg, pg->flags, pg->pqflags, pg->uobject,
                                  pg->uanon);
      #ifdef UVM_PAGE_TRKOWN
                              if (pg->flags & PG_BUSY)
                                      (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
      #endif
                              (*pr)("\n");
                      }
              }
      }
      
      /*
       * uvm_page_print_freelists - print a summary freelists
       */
      
      void
      uvm_page_print_freelists(void (*pr)(const char *, ...))
      {
              struct pgfreelist *pgfl;
              struct pgflbucket *pgb;
              int fl, b, c;
      
              (*pr)("There are %d freelists with %d buckets of %d colors.\n\n",
                  VM_NFREELIST, uvm.bucketcount, uvmexp.ncolors);
      
              for (fl = 0; fl < VM_NFREELIST; fl++) {
                      pgfl = &uvm.page_free[fl];
                      (*pr)("freelist(%d) @ %p\n", fl, pgfl);
                      for (b = 0; b < uvm.bucketcount; b++) {
                              pgb = uvm.page_free[fl].pgfl_buckets[b];
                              (*pr)("    bucket(%d) @ %p, nfree = %d, lock @ %p:\n",
                                  b, pgb, pgb->pgb_nfree,
                                  &uvm_freelist_locks[b].lock);
                              for (c = 0; c < uvmexp.ncolors; c++) {
                                      (*pr)("        color(%d) @ %p, ", c,
                                          &pgb->pgb_colors[c]);
                                      (*pr)("first page = %p\n",
                                          LIST_FIRST(&pgb->pgb_colors[c]));
                              }
                      }
              }
      }
      
      #endif /* DDB || DEBUGPRINT */
      /*        $NetBSD: usb_quirks.c,v 1.96 2020/12/26 22:15:37 jym Exp $        */
      /*        $FreeBSD: src/sys/dev/usb/usb_quirks.c,v 1.30 2003/01/02 04:15:55 imp Exp $        */
      
      /*
       * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
       * All rights reserved.
       *
       * This code is derived from software contributed to The NetBSD Foundation
       * by Lennart Augustsson (lennart@augustsson.net) at
       * Carlstedt Research & Technology.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       *
       * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
       * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
       * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
       * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      #include <sys/cdefs.h>
      __KERNEL_RCSID(0, "$NetBSD: usb_quirks.c,v 1.96 2020/12/26 22:15:37 jym Exp $");
      
      #ifdef _KERNEL_OPT
      #include "opt_usb.h"
      #endif
      
      #include <sys/param.h>
      #include <sys/systm.h>
      
      #include <dev/usb/usb.h>
      #include <dev/usb/usbdevs.h>
      #include <dev/usb/usbdi.h>
      #include <dev/usb/usbdivar.h>
      #include <dev/usb/usbhist.h>
      #include <dev/usb/usb_quirks.h>
      
      #ifdef USB_DEBUG
      extern int usbdebug;
      #endif
      
      #define DPRINTF(FMT,A,B,C,D)    USBHIST_LOG(usbdebug,FMT,A,B,C,D)
      
      #define ANY 0xffff
      #define _USETW(w) { (w) & 0x00ff, ((w) & 0xff00) >> 8 }
      
      /*
       * NXP PN533 NFC chip descriptors
       */
      static const usb_endpoint_descriptor_t desc_ep_pn533_in = {
              /* bLength */                sizeof(desc_ep_pn533_in),
              /* bDescriptorType */        UDESC_ENDPOINT,
              /* bEndpointAddress */        UE_DIR_IN | 0x04,
              /* bmAttributes */        UE_BULK,
              /* wMaxPacketSize */        _USETW(0x0040),
              /* bInterval */                0x04, /* 255ms */
      };
      
      static const usb_endpoint_descriptor_t desc_ep_pn533_out = {
              /* bLength */                sizeof(desc_ep_pn533_in),
              /* bDescriptorType */        UDESC_ENDPOINT,
              /* bEndpointAddress */        UE_DIR_OUT | 0x04,
              /* bmAttributes */        UE_BULK,
              /* wMaxPacketSize */        _USETW(0x0040),
              /* bInterval */                0x04, /* 255ms */
      };
      
      static const usb_interface_descriptor_t desc_iface_pn533 = {
              /* bLength */                sizeof(desc_iface_pn533),
              /* bDescriptorType */         UDESC_INTERFACE,
              /* bInterfaceNumber */         0,
              /* bAlternateSetting */         0,
              /* bNumEndpoints */         2,
              /* bInterfaceClass */         0xff,
              /* bInterfaceSubClass */ 0xff,
              /* bInterfaceProtocol */ 0xff,
              /* iInterface */         0,
      };
      
      static const usb_config_descriptor_t desc_conf_pn533 = {
              /* bLength */                 sizeof(desc_conf_pn533),
              /* bDescriptorType */         UDESC_CONFIG,
              /* wTotalLength         */         _USETW(sizeof(desc_conf_pn533) +
                                              sizeof(desc_iface_pn533) +
                                              sizeof(desc_ep_pn533_in) +
                                              sizeof(desc_ep_pn533_out)
                                       ),
              /* bNumInterfac        */         1,
              /* bConfigurationValue */1,
              /* iConfiguration */         0,
              /* bmAttributes        */         UC_ATTR_MBO,
              /* bMaxPower */                 0x32, /* 100mA */
      };
      
      static const usb_descriptor_t *desc_pn533[] = {
              (const usb_descriptor_t *)&desc_conf_pn533,
              (const usb_descriptor_t *)&desc_iface_pn533,
              (const usb_descriptor_t *)&desc_ep_pn533_out,
              (const usb_descriptor_t *)&desc_ep_pn533_in,
              NULL
      };
      
      
      usbd_status
      usbd_get_desc_fake(struct usbd_device *dev, int type, int index,
                         int len, void *desc)
      {
              USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
      #ifdef USB_DEBUG
              const usb_device_descriptor_t *dd = usbd_get_device_descriptor(dev);
      #endif
              const usb_descriptor_t *ub;
              int i = 0;
              int j = 0;
              usbd_status err = USBD_INVAL;
      
              if (dev->ud_quirks == NULL || dev->ud_quirks->desc == NULL) {
                      DPRINTF("%04jx/%04j: no fake descriptors",
                              UGETW(dd->idVendor), UGETW(dd->idProduct), 0, 0);
                      goto out;
              }
      
              for (j = 0; dev->ud_quirks->desc[j]; j++) {
                      ub = dev->ud_quirks->desc[j];
                      if (ub->bDescriptorType == type && i++ == index)
                              break;
              }
      
              if (dev->ud_quirks->desc[j] == NULL) {
                      DPRINTF("%04jx/%04jx: no fake descriptor type = %jd, len = %jd",
                             UGETW(dd->idVendor), UGETW(dd->idProduct), type, len);
                      goto out;
              }
      
              do {
                      ub = dev->ud_quirks->desc[j];
      
                      if (ub->bLength > len) {
                              DPRINTF("%04jx/%04jx: short buf len = %jd, bLength = %jd",
                                      UGETW(dd->idVendor), UGETW(dd->idProduct),
                                      type, ub->bLength);
                              goto out;
                      }
      
                      memcpy(desc, ub, ub->bLength);
                      DPRINTF("%04jx/%04jx: Use fake descriptor type %jd",
                              UGETW(dd->idVendor), UGETW(dd->idProduct),
                              type, 0);
      
                      desc = (char *)desc + ub->bLength;
                      len -= ub->bLength;
                      j++;
              } while (len && dev->ud_quirks->desc[j] &&
                       dev->ud_quirks->desc[j]->bDescriptorType != type);
      
              err = USBD_NORMAL_COMPLETION;
      
              DPRINTF("%04jx/%04jx: Using fake USB descriptors\n",
                      UGETW(dd->idVendor), UGETW(dd->idProduct), 0, 0);
      out:
              DPRINTF("return err = %jd", err, 0, 0, 0);
              return err;
      }
      
      Static const struct usbd_quirk_entry {
              uint16_t idVendor;
              uint16_t idProduct;
              uint16_t bcdDevice;
              struct usbd_quirks quirks;
      } usb_quirks[] = {
       /* Devices which should be ignored by uhid */
       { USB_VENDOR_APC,                USB_PRODUCT_APC_UPS,                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_CYBERPOWER,        USB_PRODUCT_CYBERPOWER_UPS,                ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_GRETAGMACBETH,        ANY,                                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_MGE,                USB_PRODUCT_MGE_UPS1,                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_MGE,                USB_PRODUCT_MGE_UPS2,                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_MICROCHIP,        USB_PRODUCT_MICROCHIP_PICKIT1,                ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_TRIPPLITE2,        ANY,                                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_MISC,                USB_PRODUCT_MISC_WISPY_24X,                ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_WELTREND,        USB_PRODUCT_WELTREND_HID,                ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_SILABS,                USB_PRODUCT_SILABS_EC3,                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_TI,                USB_PRODUCT_TI_MSP430,                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_XRITE,                ANY,                                        ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_WAYTECH,                USB_PRODUCT_WAYTECH_USB2SERIAL,                ANY,
              { UQ_HID_IGNORE, NULL }},
       { USB_VENDOR_KYE,                USB_PRODUCT_KYE_NICHE,                        0x100,
              { UQ_NO_SET_PROTO, NULL }},
       { USB_VENDOR_INSIDEOUT,        USB_PRODUCT_INSIDEOUT_EDGEPORT4,        0x094,
              { UQ_SWAP_UNICODE, NULL }},
       { USB_VENDOR_DALLAS,                USB_PRODUCT_DALLAS_J6502,                0x0a2,
              { UQ_BAD_ADC, NULL }},
       { USB_VENDOR_DALLAS,                USB_PRODUCT_DALLAS_J6502,                0x0a2,
              { UQ_AU_NO_XU, NULL }},
       { USB_VENDOR_ALTEC,                USB_PRODUCT_ALTEC_ADA70,                0x103,
              { UQ_BAD_ADC, NULL }},
       { USB_VENDOR_ALTEC,                USB_PRODUCT_ALTEC_ASC495,                0x000,
              { UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_SONY,                USB_PRODUCT_SONY_PS2EYETOY4,                0x000,
              { UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_SONY,                USB_PRODUCT_SONY_PS2EYETOY5,                0x000,
              { UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_PHILIPS,                USB_PRODUCT_PHILIPS_PCVC740K,                ANY,
              { UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_LOGITECH,                USB_PRODUCT_LOGITECH_QUICKCAMPRONB,        0x000,
              { UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_LOGITECH,                USB_PRODUCT_LOGITECH_QUICKCAMPRO4K,        0x000,
              { UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_LOGITECH,                USB_PRODUCT_LOGITECH_QUICKCAMMESS,        0x100,
              { UQ_BAD_ADC, NULL }},
       { USB_VENDOR_QTRONIX,                USB_PRODUCT_QTRONIX_980N,                0x110,
              { UQ_SPUR_BUT_UP, NULL }},
       { USB_VENDOR_ALCOR2,                USB_PRODUCT_ALCOR2_KBD_HUB,                0x001,
              { UQ_SPUR_BUT_UP, NULL }},
       { USB_VENDOR_METRICOM,                USB_PRODUCT_METRICOM_RICOCHET_GS,        0x100,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_SANYO,                USB_PRODUCT_SANYO_SCP4900,                0x000,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_MOTOROLA2,        USB_PRODUCT_MOTOROLA2_T720C,                0x001,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_EICON,                USB_PRODUCT_EICON_DIVA852,                0x100,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_SIEMENS2,                USB_PRODUCT_SIEMENS2_MC75,                0x000,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_TELEX,                USB_PRODUCT_TELEX_MIC1,                        0x009,
              { UQ_AU_NO_FRAC, NULL }},
       { USB_VENDOR_SILICONPORTALS,        USB_PRODUCT_SILICONPORTALS_YAPPHONE,        0x100,
              { UQ_AU_INP_ASYNC, NULL }},
       { USB_VENDOR_AVANCELOGIC,        USB_PRODUCT_AVANCELOGIC_USBAUDIO,        0x101,
              { UQ_AU_INP_ASYNC, NULL }},
       { USB_VENDOR_PLANTRONICS,        USB_PRODUCT_PLANTRONICS_HEADSET,        0x004,
              { UQ_AU_INP_ASYNC, NULL }},
       { USB_VENDOR_CMEDIA,                USB_PRODUCT_CMEDIA_USBAUDIO,                ANY,
              { UQ_AU_INP_ASYNC, NULL }},
      
       /* XXX These should have a revision number, but I don't know what they are. */
       { USB_VENDOR_HP,                USB_PRODUCT_HP_895C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_880C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_815C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_810C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_830C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_885C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_840C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_816C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_959C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_MTK,                USB_PRODUCT_MTK_GPS_RECEIVER,                ANY,
              { UQ_NO_UNION_NRM, NULL }},
       { USB_VENDOR_NEC,                USB_PRODUCT_NEC_PICTY900,                ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_NEC,                USB_PRODUCT_NEC_PICTY760,                ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_NEC,                USB_PRODUCT_NEC_PICTY920,                ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_NEC,                USB_PRODUCT_NEC_PICTY800,                ANY,
              { UQ_BROKEN_BIDIR, NULL }},
       { USB_VENDOR_HP,                USB_PRODUCT_HP_1220C,                        ANY,
              { UQ_BROKEN_BIDIR, NULL }},
      
       /* Apple internal notebook ISO keyboards have swapped keys */
       { USB_VENDOR_APPLE,                USB_PRODUCT_APPLE_FOUNTAIN_ISO,                ANY,
              { UQ_APPLE_ISO, NULL }},
       { USB_VENDOR_APPLE,                USB_PRODUCT_APPLE_GEYSER_ISO,                ANY,
              { UQ_APPLE_ISO, NULL }},
      
       /* HID and audio are both invalid on iPhone/iPod Touch */
       { USB_VENDOR_APPLE,                USB_PRODUCT_APPLE_IPHONE,                ANY,
              { UQ_HID_IGNORE | UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_APPLE,                USB_PRODUCT_APPLE_IPOD_TOUCH,                ANY,
              { UQ_HID_IGNORE | UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_APPLE,                USB_PRODUCT_APPLE_IPOD_TOUCH_4G,        ANY,
              { UQ_HID_IGNORE | UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_APPLE,                USB_PRODUCT_APPLE_IPHONE_3G,                ANY,
              { UQ_HID_IGNORE | UQ_BAD_AUDIO, NULL }},
       { USB_VENDOR_APPLE,                USB_PRODUCT_APPLE_IPHONE_3GS,                ANY,
              { UQ_HID_IGNORE | UQ_BAD_AUDIO, NULL }},
      
       { USB_VENDOR_LG,                USB_PRODUCT_LG_CDMA_MSM,                ANY,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_QUALCOMM2,        USB_PRODUCT_QUALCOMM2_CDMA_MSM,                ANY,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_HYUNDAI,                USB_PRODUCT_HYUNDAI_UM175,                ANY,
              { UQ_ASSUME_CM_OVER_DATA, NULL }},
       { USB_VENDOR_ZOOM,                USB_PRODUCT_ZOOM_3095,                        ANY,
              { UQ_LOST_CS_DESC, NULL }},
      
       /*
        * NXP PN533 bugs
        * 
        * 1. It corrupts its USB descriptors. The quirk is to provide hardcoded
        *    descriptors instead of getting them from the device.
        * 2. It mishandles the USB toggle bit. This causes some replies to be
        *    filered out by the USB host controller and be reported as timed out.
        *    NFC tool's libnfc workaround this bug by sending a dummy frame to
        *    resync the toggle bit, but in order to succeed, that operation must
        *    not be reported as failed. The quirk is therefore to pretend to 
        *    userland that output timeouts are successes.
        */
       { USB_VENDOR_PHILIPSSEMI,        USB_PRODUCT_PHILIPSSEMI_PN533,                ANY,
              { UQ_DESC_CORRUPT | UQ_MISS_OUT_ACK, desc_pn533 }},
       { USB_VENDOR_SHUTTLE,                USB_PRODUCT_SHUTTLE_SCL3711,                ANY,
              { UQ_DESC_CORRUPT | UQ_MISS_OUT_ACK, desc_pn533 }},
       { USB_VENDOR_SHUTTLE,                USB_PRODUCT_SHUTTLE_SCL3712,                ANY,
              { UQ_DESC_CORRUPT | UQ_MISS_OUT_ACK, desc_pn533 }},
       { 0, 0, 0, { 0, NULL } }
      };
      
      const struct usbd_quirks usbd_no_quirk = { 0 };
      
      const struct usbd_quirks *
      usbd_find_quirk(usb_device_descriptor_t *d)
      {
              const struct usbd_quirk_entry *t;
    4         uint16_t vendor = UGETW(d->idVendor);
              uint16_t product = UGETW(d->idProduct);
              uint16_t revision = UGETW(d->bcdDevice);
      
    4         for (t = usb_quirks; t->idVendor != 0; t++) {
    4                 if (t->idVendor == vendor &&
                          (t->idProduct == ANY || t->idProduct == product) &&
                          (t->bcdDevice == ANY || t->bcdDevice == revision))
                              break;
              }
      #ifdef USB_DEBUG
              if (usbdebug && t->quirks.uq_flags)
                      printf("usbd_find_quirk 0x%04x/0x%04x/%x: %d\n",
                                UGETW(d->idVendor), UGETW(d->idProduct),
                                UGETW(d->bcdDevice), t->quirks.uq_flags);
      #endif
    4         return &t->quirks;
      }
      /* $NetBSD: wsmouse.c,v 1.69 2020/12/27 16:09:33 tsutsui Exp $ */
      
      /*-
       * Copyright (c) 2006 The NetBSD Foundation, Inc.
       * All rights reserved.
       *
       * This code is derived from software contributed to The NetBSD Foundation
       * by Julio M. Merino Vidal.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       *
       * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
       * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
       * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
       * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      /*
       * Copyright (c) 1996, 1997 Christopher G. Demetriou.  All rights reserved.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. All advertising materials mentioning features or use of this software
       *    must display the following acknowledgement:
       *      This product includes software developed by Christopher G. Demetriou
       *        for the NetBSD Project.
       * 4. The name of the author may not be used to endorse or promote products
       *    derived from this software without specific prior written permission
       *
       * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
       * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
       * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
       * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
       * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
       * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
       * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
       * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
       * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
       * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
       */
      
      /*
       * Copyright (c) 1992, 1993
       *        The Regents of the University of California.  All rights reserved.
       *
       * This software was developed by the Computer Systems Engineering group
       * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
       * contributed to Berkeley.
       *
       * All advertising materials mentioning features or use of this software
       * must display the following acknowledgement:
       *        This product includes software developed by the University of
       *        California, Lawrence Berkeley Laboratory.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the name of the University nor the names of its contributors
       *    may be used to endorse or promote products derived from this software
       *    without specific prior written permission.
       *
       * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
       * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
       * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
       * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
       * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
       * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
       * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
       * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
       * SUCH DAMAGE.
       *
       *        @(#)ms.c        8.1 (Berkeley) 6/11/93
       */
      
      /*
       * Mouse driver.
       */
      
      #include <sys/cdefs.h>
      __KERNEL_RCSID(0, "$NetBSD: wsmouse.c,v 1.69 2020/12/27 16:09:33 tsutsui Exp $");
      
      #include "wsmouse.h"
      #include "wsdisplay.h"
      #include "wsmux.h"
      
      #include <sys/param.h>
      #include <sys/conf.h>
      #include <sys/ioctl.h>
      #include <sys/poll.h>
      #include <sys/fcntl.h>
      #include <sys/kernel.h>
      #include <sys/proc.h>
      #include <sys/syslog.h>
      #include <sys/systm.h>
      #include <sys/tty.h>
      #include <sys/signalvar.h>
      #include <sys/device.h>
      #include <sys/vnode.h>
      #include <sys/callout.h>
      
      #include <dev/wscons/wsconsio.h>
      #include <dev/wscons/wsmousevar.h>
      #include <dev/wscons/wseventvar.h>
      #include <dev/wscons/wsmuxvar.h>
      
      #include "ioconf.h"
      
      #if defined(WSMUX_DEBUG) && NWSMUX > 0
      #define DPRINTF(x)        if (wsmuxdebug) printf x
      #define DPRINTFN(n,x)        if (wsmuxdebug > (n)) printf x
      extern int wsmuxdebug;
      #else
      #define DPRINTF(x)
      #define DPRINTFN(n,x)
      #endif
      
      #define INVALID_X        INT_MAX
      #define INVALID_Y        INT_MAX
      #define INVALID_Z        INT_MAX
      #define INVALID_W        INT_MAX
      
      struct wsmouse_softc {
              struct wsevsrc        sc_base;
      
              const struct wsmouse_accessops *sc_accessops;
              void                *sc_accesscookie;
      
              u_int                sc_mb;                /* mouse button state */
              u_int                sc_ub;                /* user button state */
              int                sc_dx;                /* delta-x */
              int                sc_dy;                /* delta-y */
              int                sc_dz;                /* delta-z */
              int                sc_dw;                /* delta-w */
              int                sc_x;                /* absolute-x */
              int                sc_y;                /* absolute-y */
              int                sc_z;                /* absolute-z */
              int                sc_w;                /* absolute-w */
      
              int                sc_refcnt;
              u_char                sc_dying;        /* device is being detached */
      
              struct wsmouse_repeat        sc_repeat;
              int                        sc_repeat_button;
              callout_t                sc_repeat_callout;
              unsigned int                sc_repeat_delay;
      };
      
      static int  wsmouse_match(device_t, cfdata_t, void *);
      static void wsmouse_attach(device_t, device_t, void *);
      static int  wsmouse_detach(device_t, int);
      static int  wsmouse_activate(device_t, enum devact);
      
      static int  wsmouse_do_ioctl(struct wsmouse_softc *, u_long, void *,
                                   int, struct lwp *);
      
      #if NWSMUX > 0
      static int  wsmouse_mux_open(struct wsevsrc *, struct wseventvar *);
      static int  wsmouse_mux_close(struct wsevsrc *);
      #endif
      
      static int  wsmousedoioctl(device_t, u_long, void *, int, struct lwp *);
      
      static int  wsmousedoopen(struct wsmouse_softc *, struct wseventvar *);
      
      CFATTACH_DECL_NEW(wsmouse, sizeof (struct wsmouse_softc),
          wsmouse_match, wsmouse_attach, wsmouse_detach, wsmouse_activate);
      
      static void wsmouse_repeat(void *v);
      
      dev_type_open(wsmouseopen);
      dev_type_close(wsmouseclose);
      dev_type_read(wsmouseread);
      dev_type_ioctl(wsmouseioctl);
      dev_type_poll(wsmousepoll);
      dev_type_kqfilter(wsmousekqfilter);
      
      const struct cdevsw wsmouse_cdevsw = {
              .d_open = wsmouseopen,
              .d_close = wsmouseclose,
              .d_read = wsmouseread,
              .d_write = nowrite,
              .d_ioctl = wsmouseioctl,
              .d_stop = nostop,
              .d_tty = notty,
              .d_poll = wsmousepoll,
              .d_mmap = nommap,
              .d_kqfilter = wsmousekqfilter,
              .d_discard = nodiscard,
              .d_flag = D_OTHER
      };
      
      #if NWSMUX > 0
      struct wssrcops wsmouse_srcops = {
              WSMUX_MOUSE,
              wsmouse_mux_open, wsmouse_mux_close, wsmousedoioctl, NULL, NULL
      };
      #endif
      
      /*
       * Print function (for parent devices).
       */
      int
      wsmousedevprint(void *aux, const char *pnp)
      {
      
              if (pnp)
                      aprint_normal("wsmouse at %s", pnp);
              return (UNCONF);
      }
      
      int
      wsmouse_match(device_t parent, cfdata_t match, void *aux)
      {
              return (1);
      }
      
      void
      wsmouse_attach(device_t parent, device_t self, void *aux)
      {
              struct wsmouse_softc *sc = device_private(self);
              struct wsmousedev_attach_args *ap = aux;
      #if NWSMUX > 0
              int mux, error;
      #endif
      
              sc->sc_base.me_dv = self;
              sc->sc_accessops = ap->accessops;
              sc->sc_accesscookie = ap->accesscookie;
      
              /* Initialize button repeating. */
              memset(&sc->sc_repeat, 0, sizeof(sc->sc_repeat));
              sc->sc_repeat_button = -1;
              sc->sc_repeat_delay = 0;
              callout_init(&sc->sc_repeat_callout, 0);
              callout_setfunc(&sc->sc_repeat_callout, wsmouse_repeat, sc);
      
      #if NWSMUX > 0
              sc->sc_base.me_ops = &wsmouse_srcops;
              mux = device_cfdata(self)->wsmousedevcf_mux;
              if (mux >= 0) {
                      error = wsmux_attach_sc(wsmux_getmux(mux), &sc->sc_base);
                      if (error)
                              aprint_error(" attach error=%d", error);
                      else
                              aprint_normal(" mux %d", mux);
              }
      #else
              if (device_cfdata(self)->wsmousedevcf_mux >= 0)
                      aprint_normal(" (mux ignored)");
      #endif
      
              aprint_naive("\n");
              aprint_normal("\n");
      
              if (!pmf_device_register(self, NULL, NULL))
                      aprint_error_dev(self, "couldn't establish power handler\n");
      }
      
      int
      wsmouse_activate(device_t self, enum devact act)
      {
              struct wsmouse_softc *sc = device_private(self);
      
              if (act == DVACT_DEACTIVATE)
                      sc->sc_dying = 1;
              return (0);
      }
      
      /*
       * Detach a mouse.  To keep track of users of the softc we keep
       * a reference count that's incremented while inside, e.g., read.
       * If the mouse is active and the reference count is > 0 (0 is the
       * normal state) we post an event and then wait for the process
       * that had the reference to wake us up again.  Then we blow away the
       * vnode and return (which will deallocate the softc).
       */
      int
      wsmouse_detach(device_t self, int flags)
      {
              struct wsmouse_softc *sc = device_private(self);
              struct wseventvar *evar;
              int maj, mn;
              int s;
      
      #if NWSMUX > 0
              /* Tell parent mux we're leaving. */
              if (sc->sc_base.me_parent != NULL) {
                      DPRINTF(("wsmouse_detach:\n"));
                      wsmux_detach_sc(&sc->sc_base);
              }
      #endif
      
              /* If we're open ... */
              evar = sc->sc_base.me_evp;
              if (evar != NULL && evar->io != NULL) {
                      s = spltty();
                      if (--sc->sc_refcnt >= 0) {
                              struct wscons_event event;
      
                              /* Wake everyone by generating a dummy event. */
                              event.type = 0;
                              event.value = 0;
                              if (wsevent_inject(evar, &event, 1) != 0)
                                      wsevent_wakeup(evar);
      
                              /* Wait for processes to go away. */
                              if (tsleep(sc, PZERO, "wsmdet", hz * 60))
                                      printf("wsmouse_detach: %s didn't detach\n",
                                             device_xname(self));
                      }
                      splx(s);
              }
      
              /* locate the major number */
              maj = cdevsw_lookup_major(&wsmouse_cdevsw);
      
              /* Nuke the vnodes for any open instances (calls close). */
              mn = device_unit(self);
              vdevgone(maj, mn, mn, VCHR);
      
              return (0);
      }
      
      void
      wsmouse_input(device_t wsmousedev, u_int btns /* 0 is up */,
              int x, int y, int z, int w, u_int flags)
      {
              struct wsmouse_softc *sc = device_private(wsmousedev);
              struct wseventvar *evar;
              int mb, ub, d, nevents;
              /* one for each dimension (4) + a bit for each button */
              struct wscons_event events[4 + sizeof(d) * 8];
      
              /*
               * Discard input if not open.
               */
              evar = sc->sc_base.me_evp;
              if (evar == NULL)
                      return;
      
      #ifdef DIAGNOSTIC
              if (evar->q == NULL) {
                      printf("wsmouse_input: evar->q=NULL\n");
                      return;
              }
      #endif
      
      #if NWSMUX > 0
              DPRINTFN(5,("wsmouse_input: %s mux=%p, evar=%p\n",
                          device_xname(sc->sc_base.me_dv),
                          sc->sc_base.me_parent, evar));
      #endif
      
              sc->sc_mb = btns;
              if (!(flags & WSMOUSE_INPUT_ABSOLUTE_X))
                      sc->sc_dx += x;
              if (!(flags & WSMOUSE_INPUT_ABSOLUTE_Y))
                      sc->sc_dy += y;
              if (!(flags & WSMOUSE_INPUT_ABSOLUTE_Z))
                      sc->sc_dz += z;
              if (!(flags & WSMOUSE_INPUT_ABSOLUTE_W))
                      sc->sc_dw += w;
      
              /*
               * We have at least one event (mouse button, delta-X, or
               * delta-Y; possibly all three, and possibly three separate
               * button events).  Deliver these events until we are out
               * of changes or out of room.  As events get delivered,
               * mark them `unchanged'.
               */
              ub = sc->sc_ub;
              nevents = 0;
      
              if (flags & WSMOUSE_INPUT_ABSOLUTE_X) {
                      if (sc->sc_x != x) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_ABSOLUTE_X;
                              events[nevents].value = x;
                              nevents++;
                      }
              } else {
                      if (sc->sc_dx) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_DELTA_X;
                              events[nevents].value = sc->sc_dx;
                              nevents++;
                      }
              }
              if (flags & WSMOUSE_INPUT_ABSOLUTE_Y) {
                      if (sc->sc_y != y) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_ABSOLUTE_Y;
                              events[nevents].value = y;
                              nevents++;
                      }
              } else {
                      if (sc->sc_dy) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_DELTA_Y;
                              events[nevents].value = sc->sc_dy;
                              nevents++;
                      }
              }
              if (flags & WSMOUSE_INPUT_ABSOLUTE_Z) {
                      if (sc->sc_z != z) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_ABSOLUTE_Z;
                              events[nevents].value = z;
                              nevents++;
                      }
              } else {
                      if (sc->sc_dz) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_DELTA_Z;
                              events[nevents].value = sc->sc_dz;
                              nevents++;
                      }
              }
              if (flags & WSMOUSE_INPUT_ABSOLUTE_W) {
                      if (sc->sc_w != w) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_ABSOLUTE_W;
                              events[nevents].value = w;
                              nevents++;
                      }
              } else {
                      if (sc->sc_dw) {
                              events[nevents].type = WSCONS_EVENT_MOUSE_DELTA_W;
                              events[nevents].value = sc->sc_dw;
                              nevents++;
                      }
              }
      
              mb = sc->sc_mb;
              while ((d = mb ^ ub) != 0) {
                      int btnno;
      
                      /*
                       * Cancel button repeating if button status changed.
                       */
                      if (sc->sc_repeat_button != -1) {
                              KASSERT(sc->sc_repeat_button >= 0);
                              KASSERT(sc->sc_repeat.wr_buttons &
                                  (1 << sc->sc_repeat_button));
                              ub &= ~(1 << sc->sc_repeat_button);
                              sc->sc_repeat_button = -1;
                              callout_stop(&sc->sc_repeat_callout);
                      }
      
                      /*
                       * Mouse button change.  Find the first change and drop
                       * it into the event queue.
                       */
                      btnno = ffs(d) - 1;
                      KASSERT(btnno >= 0);
      
                      if (nevents >= __arraycount(events)) {
                              aprint_error_dev(sc->sc_base.me_dv,
                                  "Event queue full (button status mb=0x%x"
                                  " ub=0x%x)\n", mb, ub);
                              break;
                      }
      
                      events[nevents].type =
                          (mb & d) ? WSCONS_EVENT_MOUSE_DOWN : WSCONS_EVENT_MOUSE_UP;
                      events[nevents].value = btnno;
                      nevents++;
      
                      ub ^= (1 << btnno);
      
                      /*
                       * Program button repeating if configured for this button.
                       */
                      if ((mb & d) && (sc->sc_repeat.wr_buttons & (1 << btnno)) &&
                          sc->sc_repeat.wr_delay_first > 0) {
                              sc->sc_repeat_button = btnno;
                              sc->sc_repeat_delay = sc->sc_repeat.wr_delay_first;
                              callout_schedule(&sc->sc_repeat_callout,
                                  mstohz(sc->sc_repeat_delay));
                      }
              }
      
              if (nevents == 0 || wsevent_inject(evar, events, nevents) == 0) {
                      /* All events were correctly injected into the queue.
                       * Synchronize the mouse's status with what the user
                       * has received. */
                      sc->sc_x = x; sc->sc_dx = 0;
                      sc->sc_y = y; sc->sc_dy = 0;
                      sc->sc_z = z; sc->sc_dz = 0;
                      sc->sc_w = w; sc->sc_dw = 0;
                      sc->sc_ub = ub;
      #if NWSMUX > 0
                      DPRINTFN(5,("wsmouse_input: %s wakeup evar=%p\n",
                                  device_xname(sc->sc_base.me_dv), evar));
      #endif
              }
      }
      
      static void
      wsmouse_repeat(void *v)
      {
              int oldspl;
              unsigned int newdelay;
              struct wsmouse_softc *sc;
              struct wscons_event events[2];
      
              oldspl = spltty();
              sc = (struct wsmouse_softc *)v;
      
              if (sc->sc_repeat_button == -1) {
                      /* Race condition: a "button up" event came in when
                       * this function was already called but did not do
                       * spltty() yet. */
                      splx(oldspl);
                      return;
              }
              KASSERT(sc->sc_repeat_button >= 0);
      
              KASSERT(sc->sc_repeat.wr_buttons & (1 << sc->sc_repeat_button));
      
              newdelay = sc->sc_repeat_delay;
      
              events[0].type = WSCONS_EVENT_MOUSE_UP;
              events[0].value = sc->sc_repeat_button;
              events[1].type = WSCONS_EVENT_MOUSE_DOWN;
              events[1].value = sc->sc_repeat_button;
      
              if (wsevent_inject(sc->sc_base.me_evp, events, 2) == 0) {
                      sc->sc_ub = 1 << sc->sc_repeat_button;
      
                      if (newdelay - sc->sc_repeat.wr_delay_decrement <
                          sc->sc_repeat.wr_delay_minimum)
                              newdelay = sc->sc_repeat.wr_delay_minimum;
                      else if (newdelay > sc->sc_repeat.wr_delay_minimum)
                              newdelay -= sc->sc_repeat.wr_delay_decrement;
                      KASSERT(newdelay >= sc->sc_repeat.wr_delay_minimum &&
                          newdelay <= sc->sc_repeat.wr_delay_first);
              }
      
              /*
               * Reprogram the repeating event.
               */
              sc->sc_repeat_delay = newdelay;
              callout_schedule(&sc->sc_repeat_callout, mstohz(newdelay));
      
              splx(oldspl);
      }
      
      int
      wsmouseopen(dev_t dev, int flags, int mode, struct lwp *l)
      {
              struct wsmouse_softc *sc;
              struct wseventvar *evar;
              int error;
      
    6         sc = device_lookup_private(&wsmouse_cd, minor(dev));
              if (sc == NULL)
                      return ENXIO;
      
      #if NWSMUX > 0
              DPRINTF(("wsmouseopen: %s mux=%p p=%p\n", device_xname(sc->sc_base.me_dv),
                       sc->sc_base.me_parent, l));
      #endif
      
    5         if (sc->sc_dying)
                      return (EIO);
      
    5         if ((flags & (FREAD | FWRITE)) == FWRITE)
                      return (0);                        /* always allow open for write
                                                         so ioctl() is possible. */
      
    4         if (sc->sc_base.me_evp != NULL)
    6                 return (EBUSY);
      
    4         evar = &sc->sc_base.me_evar;
              wsevent_init(evar, l->l_proc);
              sc->sc_base.me_evp = evar;
      
              error = wsmousedoopen(sc, evar);
              if (error) {
                      DPRINTF(("wsmouseopen: %s open failed\n",
                               device_xname(sc->sc_base.me_dv)));
                      sc->sc_base.me_evp = NULL;
                      wsevent_fini(evar);
              }
              return (error);
      }
      
      int
      wsmouseclose(dev_t dev, int flags, int mode,
          struct lwp *l)
      {
              struct wsmouse_softc *sc =
    2             device_lookup_private(&wsmouse_cd, minor(dev));
              struct wseventvar *evar = sc->sc_base.me_evp;
      
              if (evar == NULL)
                      /* not open for read */
                      return (0);
    1         sc->sc_base.me_evp = NULL;
              (*sc->sc_accessops->disable)(sc->sc_accesscookie);
              wsevent_fini(evar);
      
    2         return (0);
      }
      
      int
      wsmousedoopen(struct wsmouse_softc *sc, struct wseventvar *evp)
      {
    4         sc->sc_base.me_evp = evp;
              sc->sc_x = INVALID_X;
              sc->sc_y = INVALID_Y;
              sc->sc_z = INVALID_Z;
              sc->sc_w = INVALID_W;
      
              /* Stop button repeating when messing with the device. */
              if (sc->sc_repeat_button != -1) {
                      KASSERT(sc->sc_repeat_button >= 0);
                      sc->sc_repeat_button = -1;
                      callout_stop(&sc->sc_repeat_callout);
              }
      
              /* enable the device, and punt if that's not possible */
    4         return (*sc->sc_accessops->enable)(sc->sc_accesscookie);
      }
      
      int
      wsmouseread(dev_t dev, struct uio *uio, int flags)
      {
              struct wsmouse_softc *sc =
    1             device_lookup_private(&wsmouse_cd, minor(dev));
              int error;
      
              if (sc->sc_dying)
                      return (EIO);
      
      #ifdef DIAGNOSTIC
    1         if (sc->sc_base.me_evp == NULL) {
                      printf("wsmouseread: evp == NULL\n");
                      return (EINVAL);
              }
      #endif
      
    1         sc->sc_refcnt++;
              error = wsevent_read(sc->sc_base.me_evp, uio, flags);
    1         if (--sc->sc_refcnt < 0) {
                      wakeup(sc);
                      error = EIO;
              }
              return (error);
      }
      
      int
      wsmouseioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
      {
    6         return (wsmousedoioctl(device_lookup(&wsmouse_cd, minor(dev)),
                                     cmd, data, flag, l));
      }
      
      /* A wrapper around the ioctl() workhorse to make reference counting easy. */
      int
      wsmousedoioctl(device_t dv, u_long cmd, void *data, int flag,
                     struct lwp *l)
      {
    6         struct wsmouse_softc *sc = device_private(dv);
              int error;
      
              sc->sc_refcnt++;
    6         error = wsmouse_do_ioctl(sc, cmd, data, flag, l);
    6         if (--sc->sc_refcnt < 0)
                      wakeup(sc);
    6         return (error);
      }
      
      int
      wsmouse_do_ioctl(struct wsmouse_softc *sc, u_long cmd, void *data,
                       int flag, struct lwp *l)
      {
              int error;
              struct wsmouse_repeat *wr;
      
              if (sc->sc_dying)
                      return (EIO);
      
              /*
               * Try the generic ioctls that the wsmouse interface supports.
               */
    6         switch (cmd) {
              case FIONBIO:                /* we will remove this someday (soon???) */
                      return (0);
      
              case FIOASYNC:
    2                 if (sc->sc_base.me_evp == NULL)
                              return (EINVAL);
    1                 sc->sc_base.me_evp->async = *(int *)data != 0;
                      return (0);
      
              case FIOSETOWN:
    1                 if (sc->sc_base.me_evp == NULL)
                              return (EINVAL);
    1                 if (-*(int *)data != sc->sc_base.me_evp->io->p_pgid
    1                     && *(int *)data != sc->sc_base.me_evp->io->p_pid)
                              return (EPERM);
                      return (0);
      
              case TIOCSPGRP:
    1                 if (sc->sc_base.me_evp == NULL)
                              return (EINVAL);
    1                 if (*(int *)data != sc->sc_base.me_evp->io->p_pgid)
                              return (EPERM);
                      return (0);
              }
      
              /*
               * Try the wsmouse specific ioctls.
               */
    2         switch (cmd) {
              case WSMOUSEIO_GETREPEAT:
                      wr = (struct wsmouse_repeat *)data;
                      memcpy(wr, &sc->sc_repeat, sizeof(sc->sc_repeat));
                      return 0;
      
              case WSMOUSEIO_SETREPEAT:
                      if ((flag & FWRITE) == 0)
                              return EACCES;
      
                      /* Validate input data. */
                      wr = (struct wsmouse_repeat *)data;
                      if (wr->wr_delay_first != 0 &&
                          (wr->wr_delay_first < wr->wr_delay_decrement ||
                           wr->wr_delay_first < wr->wr_delay_minimum ||
                           wr->wr_delay_first < wr->wr_delay_minimum +
                           wr->wr_delay_decrement))
                              return EINVAL;
      
                      /* Stop current repeating and set new data. */
                      sc->sc_repeat_button = -1;
                      callout_stop(&sc->sc_repeat_callout);
                      memcpy(&sc->sc_repeat, wr, sizeof(sc->sc_repeat));
      
                      return 0;
      
              case WSMOUSEIO_SETVERSION:
                      return wsevent_setversion(sc->sc_base.me_evp, *(int *)data);
              }
      
              /*
               * Try the mouse driver for WSMOUSEIO ioctls.  It returns -1
               * if it didn't recognize the request.
               */
    2         error = (*sc->sc_accessops->ioctl)(sc->sc_accesscookie, cmd,
                  data, flag, l);
              return (error); /* may be EPASSTHROUGH */
      }
      
      int
      wsmousepoll(dev_t dev, int events, struct lwp *l)
      {
              struct wsmouse_softc *sc =
    1             device_lookup_private(&wsmouse_cd, minor(dev));
      
              if (sc->sc_base.me_evp == NULL)
                      return (POLLERR);
    1         return (wsevent_poll(sc->sc_base.me_evp, events, l));
      }
      
      int
      wsmousekqfilter(dev_t dev, struct knote *kn)
      {
              struct wsmouse_softc *sc =
                  device_lookup_private(&wsmouse_cd, minor(dev));
      
              if (sc->sc_base.me_evp == NULL)
                      return (1);
              return (wsevent_kqfilter(sc->sc_base.me_evp, kn));
      }
      
      #if NWSMUX > 0
      int
      wsmouse_mux_open(struct wsevsrc *me, struct wseventvar *evp)
      {
              struct wsmouse_softc *sc = (struct wsmouse_softc *)me;
      
              if (sc->sc_base.me_evp != NULL)
                      return (EBUSY);
      
              return wsmousedoopen(sc, evp);
      }
      
      int
      wsmouse_mux_close(struct wsevsrc *me)
      {
              struct wsmouse_softc *sc = (struct wsmouse_softc *)me;
      
              sc->sc_base.me_evp = NULL;
              (*sc->sc_accessops->disable)(sc->sc_accesscookie);
      
              return (0);
      }
      
      int
      wsmouse_add_mux(int unit, struct wsmux_softc *muxsc)
      {
              struct wsmouse_softc *sc;
      
              sc = device_lookup_private(&wsmouse_cd, unit);
              if (sc == NULL)
                      return ENXIO;
      
              if (sc->sc_base.me_parent != NULL || sc->sc_base.me_evp != NULL)
                      return (EBUSY);
      
              return (wsmux_attach_sc(muxsc, &sc->sc_base));
      }
      #endif /* NWSMUX > 0 */
      /*        $NetBSD: uipaq.c,v 1.28 2020/03/13 18:17:41 christos Exp $        */
      /*        $OpenBSD: uipaq.c,v 1.1 2005/06/17 23:50:33 deraadt Exp $        */
      
      /*
       * Copyright (c) 2000-2005 The NetBSD Foundation, Inc.
       * All rights reserved.
       *
       * This code is derived from software contributed to The NetBSD Foundation
       * by Lennart Augustsson (lennart@augustsson.net) at
       * Carlstedt Research & Technology.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       *
       * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
       * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
       * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
       * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      /*
       * iPAQ driver
       *
       * 19 July 2003:        Incorporated changes suggested by Sam Lawrance from
       *                         the uppc module
       *
       *
       * Contact isis@cs.umd.edu if you have any questions/comments about this driver
       */
      
      #include <sys/cdefs.h>
      __KERNEL_RCSID(0, "$NetBSD: uipaq.c,v 1.28 2020/03/13 18:17:41 christos Exp $");
      
      #ifdef _KERNEL_OPT
      #include "opt_usb.h"
      #endif
      
      #include <sys/param.h>
      #include <sys/systm.h>
      #include <sys/kernel.h>
      #include <sys/device.h>
      #include <sys/conf.h>
      #include <sys/tty.h>
      
      #include <dev/usb/usb.h>
      
      #include <dev/usb/usbcdc.h>        /*UCDC_* stuff */
      
      #include <dev/usb/usbdi.h>
      #include <dev/usb/usbdi_util.h>
      #include <dev/usb/usbdevs.h>
      
      #include <dev/usb/ucomvar.h>
      
      #ifdef UIPAQ_DEBUG
      #define DPRINTF(x)        if (uipaqdebug) printf x
      #define DPRINTFN(n,x)        if (uipaqdebug>(n)) printf x
      int uipaqdebug = 0;
      #else
      #define DPRINTF(x)
      #define DPRINTFN(n,x)
      #endif
      
      #define UIPAQ_CONFIG_NO                1
      #define UIPAQ_IFACE_INDEX        0
      
      #define UIPAQIBUFSIZE 1024
      #define UIPAQOBUFSIZE 1024
      
      struct uipaq_softc {
              device_t                sc_dev;                /* base device */
              struct usbd_device *        sc_udev;        /* device */
              struct usbd_interface *        sc_iface;        /* interface */
      
              device_t                sc_subdev;        /* ucom uses that */
              uint16_t                sc_lcr;                /* state for DTR/RTS */
      
              uint16_t                sc_flags;
      
              bool                        sc_dying;
      };
      
      /* Callback routines */
      static void        uipaq_set(void *, int, int, int);
      static int        uipaq_open(void *, int);
      
      
      /* Support routines. */
      /* based on uppc module by Sam Lawrance */
      static void        uipaq_dtr(struct uipaq_softc *, int);
      static void        uipaq_rts(struct uipaq_softc *, int);
      static void        uipaq_break(struct uipaq_softc *, int);
      
      
      static const struct ucom_methods uipaq_methods = {
              .ucom_set = uipaq_set,
              .ucom_open = uipaq_open,
      };
      
      struct uipaq_type {
              struct usb_devno        uv_dev;
              uint16_t                uv_flags;
      };
      
      static const struct uipaq_type uipaq_devs[] = {
              {{ USB_VENDOR_HP, USB_PRODUCT_HP_2215 }, 0 },
              {{ USB_VENDOR_HP, USB_PRODUCT_HP_568J }, 0},
              {{ USB_VENDOR_COMPAQ, USB_PRODUCT_COMPAQ_IPAQPOCKETPC} , 0},
              {{ USB_VENDOR_CASIO, USB_PRODUCT_CASIO_BE300} , 0},
              {{ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_WS007SH} , 0},
              {{ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_WS011SH} , 0}
      };
      
      #define uipaq_lookup(v, p) ((const struct uipaq_type *)usb_lookup(uipaq_devs, v, p))
      
      static int uipaq_match(device_t, cfdata_t, void *);
      static void uipaq_attach(device_t, device_t, void *);
      static void uipaq_childdet(device_t, device_t);
      static int uipaq_detach(device_t, int);
      
      CFATTACH_DECL2_NEW(uipaq, sizeof(struct uipaq_softc), uipaq_match,
          uipaq_attach, uipaq_detach, NULL, NULL, uipaq_childdet);
      
      static int
      uipaq_match(device_t parent, cfdata_t match, void *aux)
      {
              struct usb_attach_arg *uaa = aux;
      
              DPRINTFN(20,("uipaq: vendor=%#x, product=%#x\n",
                  uaa->uaa_vendor, uaa->uaa_product));
      
    4         return uipaq_lookup(uaa->uaa_vendor, uaa->uaa_product) != NULL ?
    3             UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
      }
      
      static void
      uipaq_attach(device_t parent, device_t self, void *aux)
      {
              struct uipaq_softc *sc = device_private(self);
              struct usb_attach_arg *uaa = aux;
              struct usbd_device *dev = uaa->uaa_device;
              struct usbd_interface *iface;
              usb_interface_descriptor_t *id;
              usb_endpoint_descriptor_t *ed;
              char *devinfop;
              const char *devname = device_xname(self);
              int i;
              usbd_status err;
              struct ucom_attach_args ucaa;
      
              DPRINTFN(10,("\nuipaq_attach: sc=%p\n", sc));
      
              sc->sc_dev = self;
              sc->sc_dying = false;
      
              aprint_naive("\n");
              aprint_normal("\n");
      
              devinfop = usbd_devinfo_alloc(dev, 0);
              aprint_normal_dev(self, "%s\n", devinfop);
              usbd_devinfo_free(devinfop);
      
              /* Move the device into the configured state. */
              err = usbd_set_config_no(dev, UIPAQ_CONFIG_NO, 1);
              if (err) {
                      aprint_error_dev(self, "failed to set configuration, err=%s\n",
                          usbd_errstr(err));
                      goto bad;
              }
      
              err = usbd_device2interface_handle(dev, UIPAQ_IFACE_INDEX, &iface);
              if (err) {
                      aprint_error("\n%s: failed to get interface, err=%s\n",
                          devname, usbd_errstr(err));
                      goto bad;
              }
      
              sc->sc_flags = uipaq_lookup(uaa->uaa_vendor, uaa->uaa_product)->uv_flags;
      
              id = usbd_get_interface_descriptor(iface);
      
              sc->sc_udev = dev;
              sc->sc_iface = iface;
      
              ucaa.ucaa_ibufsize = UIPAQIBUFSIZE;
              ucaa.ucaa_obufsize = UIPAQOBUFSIZE;
              ucaa.ucaa_ibufsizepad = UIPAQIBUFSIZE;
              ucaa.ucaa_opkthdrlen = 0;
              ucaa.ucaa_device = dev;
              ucaa.ucaa_iface = iface;
              ucaa.ucaa_methods = &uipaq_methods;
              ucaa.ucaa_arg = sc;
              ucaa.ucaa_portno = UCOM_UNK_PORTNO;
              ucaa.ucaa_info = "Generic";
      
      /*        err = uipaq_init(sc);
              if (err) {
                      printf("%s: init failed, %s\n", device_xname(sc->sc_dev),
                          usbd_errstr(err));
                      goto bad;
              }*/
      
              usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
      
              ucaa.ucaa_bulkin = ucaa.ucaa_bulkout = -1;
              for (i=0; i<id->bNumEndpoints; i++) {
                      ed = usbd_interface2endpoint_descriptor(iface, i);
                      if (ed == NULL) {
                              aprint_error_dev(self,
                                  "no endpoint descriptor for %d\n", i);
                              goto bad;
                      }
                      if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
                          (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) {
                              ucaa.ucaa_bulkin = ed->bEndpointAddress;
                      } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
                          (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) {
                              ucaa.ucaa_bulkout = ed->bEndpointAddress;
                      }
              }
              if (ucaa.ucaa_bulkin == -1 || ucaa.ucaa_bulkout == -1) {
                      aprint_error_dev(self, "no proper endpoints found (%d,%d) \n",
                          ucaa.ucaa_bulkin, ucaa.ucaa_bulkout);
                      return;
              }
      
              sc->sc_subdev = config_found_sm_loc(self, "ucombus", NULL, &ucaa,
                                                  ucomprint, ucomsubmatch);
      
              return;
      
      bad:
              DPRINTF(("uipaq_attach: ATTACH ERROR\n"));
              sc->sc_dying = true;
              return;
      }
      
      
      void
      uipaq_dtr(struct uipaq_softc* sc, int onoff)
      {
              usb_device_request_t req;
              usbd_status err;
              int retries = 3;
      
              DPRINTF(("%s: uipaq_dtr: onoff=%x\n", device_xname(sc->sc_dev), onoff));
      
              /* Avoid sending unnecessary requests */
              if (onoff && (sc->sc_lcr & UCDC_LINE_DTR))
                      return;
              if (!onoff && !(sc->sc_lcr & UCDC_LINE_DTR))
                      return;
      
              /* Other parameters depend on reg */
              req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
              req.bRequest = UCDC_SET_CONTROL_LINE_STATE;
              sc->sc_lcr = onoff ? sc->sc_lcr | UCDC_LINE_DTR
                  : sc->sc_lcr & ~UCDC_LINE_DTR;
              USETW(req.wValue, sc->sc_lcr);
              USETW(req.wIndex, 0x0);
              USETW(req.wLength, 0);
      
              /* Fire off the request a few times if necessary */
              while (retries) {
                      err = usbd_do_request(sc->sc_udev, &req, NULL);
                      if (!err)
                              break;
                      retries--;
              }
      }
      
      
      void
      uipaq_rts(struct uipaq_softc* sc, int onoff)
      {
              usb_device_request_t req;
              usbd_status err;
              int retries = 3;
      
              DPRINTF(("%s: uipaq_rts: onoff=%x\n", device_xname(sc->sc_dev), onoff));
      
              /* Avoid sending unnecessary requests */
              if (onoff && (sc->sc_lcr & UCDC_LINE_RTS)) return;
              if (!onoff && !(sc->sc_lcr & UCDC_LINE_RTS)) return;
      
              req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
              req.bRequest = UCDC_SET_CONTROL_LINE_STATE;
              sc->sc_lcr = onoff ? sc->sc_lcr | UCDC_LINE_RTS
                  : sc->sc_lcr & ~UCDC_LINE_RTS;
              USETW(req.wValue, sc->sc_lcr);
              USETW(req.wIndex, 0x0);
              USETW(req.wLength, 0);
      
              while (retries) {
                      err = usbd_do_request(sc->sc_udev, &req, NULL);
                      if (!err)
                              break;
                      retries--;
              }
      }
      
      
      void
      uipaq_break(struct uipaq_softc* sc, int onoff)
      {
              usb_device_request_t req;
              usbd_status err;
              int retries = 3;
      
              DPRINTF(("%s: uipaq_break: onoff=%x\n", device_xname(sc->sc_dev),
                  onoff));
      
              req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
              req.bRequest = UCDC_SEND_BREAK;
      
              USETW(req.wValue, onoff ? UCDC_BREAK_ON : UCDC_BREAK_OFF);
              USETW(req.wIndex, 0x0);
              USETW(req.wLength, 0);
      
              while (retries) {
                      err = usbd_do_request(sc->sc_udev, &req, NULL);
                      if (!err)
                              break;
                      retries--;
              }
      }
      
      
      void
      uipaq_set(void *addr, int portno, int reg, int onoff)
      {
              struct uipaq_softc* sc = addr;
      
              if (sc->sc_dying)
                      return;
      
              switch (reg) {
              case UCOM_SET_DTR:
                      uipaq_dtr(addr, onoff);
                      break;
              case UCOM_SET_RTS:
                      uipaq_rts(addr, onoff);
                      break;
              case UCOM_SET_BREAK:
                      uipaq_break(addr, onoff);
                      break;
              default:
                      aprint_error_dev(sc->sc_dev,
                          "unhandled set request: reg=%x onoff=%x\n", reg, onoff);
                      return;
              }
      }
      
      static int
      uipaq_open(void *arg, int portno)
      {
              struct uipaq_softc *sc = arg;
      
              if (sc->sc_dying)
                      return EIO;
      
              return 0;
      }
      
      static void
      uipaq_childdet(device_t self, device_t child)
      {
              struct uipaq_softc *sc = device_private(self);
      
              KASSERT(sc->sc_subdev == child);
              sc->sc_subdev = NULL;
      }
      
      static int
      uipaq_detach(device_t self, int flags)
      {
              struct uipaq_softc *sc = device_private(self);
              int rv = 0;
      
              DPRINTF(("uipaq_detach: sc=%p flags=%d\n", sc, flags));
      
              sc->sc_dying = true;
      
              if (sc->sc_subdev != NULL) {
                      rv |= config_detach(sc->sc_subdev, flags);
                      sc->sc_subdev = NULL;
              }
              if (sc->sc_udev != NULL)
                      usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
                          sc->sc_dev);
      
              return rv;
      }
      /*        $NetBSD: subr_log.c,v 1.60 2020/12/11 03:00:09 thorpej Exp $        */
      
      /*-
       * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
       * All rights reserved.
       *
       * This code is derived from software contributed to The NetBSD Foundation
       * by Andrew Doran.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       *
       * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
       * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
       * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
       * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
       * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
       * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
       * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
       * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
       * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
       * POSSIBILITY OF SUCH DAMAGE.
       */
      
      /*
       * Copyright (c) 1982, 1986, 1993
       *        The Regents of the University of California.  All rights reserved.
       *
       * Redistribution and use in source and binary forms, with or without
       * modification, are permitted provided that the following conditions
       * are met:
       * 1. Redistributions of source code must retain the above copyright
       *    notice, this list of conditions and the following disclaimer.
       * 2. Redistributions in binary form must reproduce the above copyright
       *    notice, this list of conditions and the following disclaimer in the
       *    documentation and/or other materials provided with the distribution.
       * 3. Neither the name of the University nor the names of its contributors
       *    may be used to endorse or promote products derived from this software
       *    without specific prior written permission.
       *
       * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
       * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
       * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
       * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
       * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
       * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
       * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
       * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
       * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
       * SUCH DAMAGE.
       *
       *        @(#)subr_log.c        8.3 (Berkeley) 2/14/95
       */
      
      /*
       * Error log buffer for kernel printf's.
       */
      
      #include <sys/cdefs.h>
      __KERNEL_RCSID(0, "$NetBSD: subr_log.c,v 1.60 2020/12/11 03:00:09 thorpej Exp $");
      
      #include <sys/param.h>
      #include <sys/systm.h>
      #include <sys/kernel.h>
      #include <sys/proc.h>
      #include <sys/vnode.h>
      #include <sys/ioctl.h>
      #include <sys/msgbuf.h>
      #include <sys/file.h>
      #include <sys/syslog.h>
      #include <sys/conf.h>
      #include <sys/select.h>
      #include <sys/poll.h> 
      #include <sys/intr.h>
      #include <sys/sysctl.h>
      #include <sys/ktrace.h>
      
      static int sysctl_msgbuf(SYSCTLFN_PROTO);
      
      static void        logsoftintr(void *);
      
      static bool        log_async;
      static struct selinfo log_selp;                /* process waiting on select call */
      static pid_t        log_pgid;                /* process/group for async I/O */
      static kcondvar_t log_cv;
      static void        *log_sih;
      
      kmutex_t log_lock;
      int        log_open;                        /* also used in log() */
      int        msgbufmapped;                        /* is the message buffer mapped */
      int        msgbufenabled;                        /* is logging to the buffer enabled */
      struct        kern_msgbuf *msgbufp;                /* the mapped buffer, itself. */
      
      void
      initmsgbuf(void *bf, size_t bufsize)
      {
              struct kern_msgbuf *mbp;
              long new_bufs;
      
              /* Sanity-check the given size. */
              if (bufsize < sizeof(struct kern_msgbuf))
                      return;
      
              mbp = msgbufp = (struct kern_msgbuf *)bf;
      
              new_bufs = bufsize - offsetof(struct kern_msgbuf, msg_bufc);
              if ((mbp->msg_magic != MSG_MAGIC) || (mbp->msg_bufs != new_bufs) ||
                  (mbp->msg_bufr < 0) || (mbp->msg_bufr >= mbp->msg_bufs) ||
                  (mbp->msg_bufx < 0) || (mbp->msg_bufx >= mbp->msg_bufs)) {
                      /*
                       * If the buffer magic number is wrong, has changed
                       * size (which shouldn't happen often), or is
                       * internally inconsistent, initialize it.
                       */
      
                      memset(bf, 0, bufsize);
                      mbp->msg_magic = MSG_MAGIC;
                      mbp->msg_bufs = new_bufs;
              }
      
              /* mark it as ready for use. */
              msgbufmapped = msgbufenabled = 1;
      }
      
      void
      loginit(void)
      {
      
              mutex_init(&log_lock, MUTEX_DEFAULT, IPL_VM);
              selinit(&log_selp);
              cv_init(&log_cv, "klog");
              log_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
                  logsoftintr, NULL);
      
              sysctl_createv(NULL, 0, NULL, NULL,
                             CTLFLAG_PERMANENT,
                             CTLTYPE_INT, "msgbufsize",
                             SYSCTL_DESCR("Size of the kernel message buffer"),
                             sysctl_msgbuf, 0, NULL, 0,
                             CTL_KERN, KERN_MSGBUFSIZE, CTL_EOL);
              sysctl_createv(NULL, 0, NULL, NULL,
                             CTLFLAG_PERMANENT,
                             CTLTYPE_INT, "msgbuf",
                             SYSCTL_DESCR("Kernel message buffer"),
                             sysctl_msgbuf, 0, NULL, 0,
                             CTL_KERN, KERN_MSGBUF, CTL_EOL);
      }
      
      /*ARGSUSED*/
      static int
      logopen(dev_t dev, int flags, int mode, struct lwp *l)
      {
    1         struct kern_msgbuf *mbp = msgbufp;
              int error = 0;
      
              mutex_spin_enter(&log_lock);
              if (log_open) {
                      error = EBUSY;
              } else {
                      log_open = 1;
                      log_pgid = l->l_proc->p_pid;        /* signal process only */
                      /*
                       * The message buffer is initialized during system
                       * configuration.  If it's been clobbered, note that
                       * and return an error.  (This allows a user to read
                       * the buffer via /dev/kmem, and try to figure out
                       * what clobbered it.
                       */
                      if (mbp->msg_magic != MSG_MAGIC) {
                              msgbufenabled = 0;
                              error = ENXIO;
                      }
              }
    1         mutex_spin_exit(&log_lock);
      
              return error;
      }
      
      /*ARGSUSED*/
      static int
      logclose(dev_t dev, int flag, int mode, struct lwp *l)
      {
      
              mutex_spin_enter(&log_lock);
              log_pgid = 0;
              log_open = 0;
              log_async = 0;
              mutex_spin_exit(&log_lock);
      
              return 0;
      }
      
      /*ARGSUSED*/
      static int
      logread(dev_t dev, struct uio *uio, int flag)
      {
              struct kern_msgbuf *mbp = msgbufp;
              long l;
              int error = 0;
      
              mutex_spin_enter(&log_lock);
              while (mbp->msg_bufr == mbp->msg_bufx) {
                      if (flag & IO_NDELAY) {
                              mutex_spin_exit(&log_lock);
                              return EWOULDBLOCK;
                      }
                      error = cv_wait_sig(&log_cv, &log_lock);
                      if (error) {
                              mutex_spin_exit(&log_lock);
                              return error;
                      }
              }
              while (uio->uio_resid > 0) {
                      l = mbp->msg_bufx - mbp->msg_bufr;
                      if (l < 0)
                              l = mbp->msg_bufs - mbp->msg_bufr;
                      l = uimin(l, uio->uio_resid);
                      if (l == 0)
                              break;
                      mutex_spin_exit(&log_lock);
                      error = uiomove(&mbp->msg_bufc[mbp->msg_bufr], (int)l, uio);
                      mutex_spin_enter(&log_lock);
                      if (error)
                              break;
                      mbp->msg_bufr += l;
                      if (mbp->msg_bufr < 0 || mbp->msg_bufr >= mbp->msg_bufs)
                              mbp->msg_bufr = 0;
              }
              mutex_spin_exit(&log_lock);
      
              return error;
      }
      
      /*ARGSUSED*/
      static int
      logpoll(dev_t dev, int events, struct lwp *l)
      {
              int revents = 0;
      
              if (events & (POLLIN | POLLRDNORM)) {
                      mutex_spin_enter(&log_lock);
                      if (msgbufp->msg_bufr != msgbufp->msg_bufx)
                              revents |= events & (POLLIN | POLLRDNORM);
                      else
                              selrecord(l, &log_selp);
                      mutex_spin_exit(&log_lock);
              }
      
              return revents;
      }
      
      static void
      filt_logrdetach(struct knote *kn)
      {
      
              mutex_spin_enter(&log_lock);
              selremove_knote(&log_selp, kn);
              mutex_spin_exit(&log_lock);
      }
      
      static int
      filt_logread(struct knote *kn, long hint)
      {
              int rv;
      
    7         if ((hint & NOTE_SUBMIT) == 0)
                      mutex_spin_enter(&log_lock);
    7         if (msgbufp->msg_bufr == msgbufp->msg_bufx) {
                      rv = 0;
    7         } else if (msgbufp->msg_bufr < msgbufp->msg_bufx) {
    7                 kn->kn_data = msgbufp->msg_bufx - msgbufp->msg_bufr;
                      rv = 1;
              } else {
                      kn->kn_data = (msgbufp->msg_bufs - msgbufp->msg_bufr) +
                          msgbufp->msg_bufx;
    6                 rv = 1;
              }
              if ((hint & NOTE_SUBMIT) == 0)
                      mutex_spin_exit(&log_lock);
      
    7         return rv;
      }
      
      static const struct filterops logread_filtops = {
              .f_isfd = 1,
              .f_attach = NULL,
              .f_detach = filt_logrdetach,
              .f_event = filt_logread,
      };
      
      static int
      logkqfilter(dev_t dev, struct knote *kn)
      {
      
              switch (kn->kn_filter) {
              case EVFILT_READ:
                      kn->kn_fop = &logread_filtops;
                      break;
      
              default:
                      return (EINVAL);
              }
      
              kn->kn_hook = NULL;
      
              mutex_spin_enter(&log_lock);
              selrecord_knote(&log_selp, kn);
              mutex_spin_exit(&log_lock);
      
              return (0);
      }
      
      void
      logwakeup(void)
      {
      
    7         if (!cold && log_open) {
    7                 mutex_spin_enter(&log_lock);
                      selnotify(&log_selp, 0, NOTE_SUBMIT);
                      if (log_async)
                              softint_schedule(log_sih);
    7                 cv_broadcast(&log_cv);
                      mutex_spin_exit(&log_lock);
              }
      }
      
      static void
      logsoftintr(void *cookie)
      {
              pid_t pid;
      
              if ((pid = log_pgid) != 0)
                      fownsignal(pid, SIGIO, 0, 0, NULL);
      }
      
      /*ARGSUSED*/
      static int
      logioctl(dev_t dev, u_long com, void *data, int flag, struct lwp *lwp)
      {
              long l;
      
              switch (com) {
      
              /* return number of characters immediately available */