// SPDX-License-Identifier: GPL-2.0-only
      /*
       * LED Class Core
       *
       * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
       * Copyright (C) 2005-2007 Richard Purdie <rpurdie@openedhand.com>
       */
      
      #include <linux/ctype.h>
      #include <linux/device.h>
      #include <linux/err.h>
      #include <linux/init.h>
      #include <linux/kernel.h>
      #include <linux/leds.h>
      #include <linux/list.h>
      #include <linux/module.h>
      #include <linux/property.h>
      #include <linux/slab.h>
      #include <linux/spinlock.h>
      #include <linux/timer.h>
      #include <uapi/linux/uleds.h>
      #include <linux/of.h>
      #include "leds.h"
      
      static struct class *leds_class;
      
      static ssize_t brightness_show(struct device *dev,
                      struct device_attribute *attr, char *buf)
      {
              struct led_classdev *led_cdev = dev_get_drvdata(dev);
      
              /* no lock needed for this */
              led_update_brightness(led_cdev);
      
              return sprintf(buf, "%u\n", led_cdev->brightness);
      }
      
      static ssize_t brightness_store(struct device *dev,
                      struct device_attribute *attr, const char *buf, size_t size)
      {
              struct led_classdev *led_cdev = dev_get_drvdata(dev);
              unsigned long state;
              ssize_t ret;
      
              mutex_lock(&led_cdev->led_access);
      
              if (led_sysfs_is_disabled(led_cdev)) {
                      ret = -EBUSY;
                      goto unlock;
              }
      
              ret = kstrtoul(buf, 10, &state);
              if (ret)
                      goto unlock;
      
              if (state == LED_OFF)
                      led_trigger_remove(led_cdev);
              led_set_brightness(led_cdev, state);
              flush_work(&led_cdev->set_brightness_work);
      
              ret = size;
      unlock:
              mutex_unlock(&led_cdev->led_access);
              return ret;
      }
      static DEVICE_ATTR_RW(brightness);
      
      static ssize_t max_brightness_show(struct device *dev,
                      struct device_attribute *attr, char *buf)
      {
              struct led_classdev *led_cdev = dev_get_drvdata(dev);
      
              return sprintf(buf, "%u\n", led_cdev->max_brightness);
      }
      static DEVICE_ATTR_RO(max_brightness);
      
      #ifdef CONFIG_LEDS_TRIGGERS
      static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
      static struct bin_attribute *led_trigger_bin_attrs[] = {
              &bin_attr_trigger,
              NULL,
      };
      static const struct attribute_group led_trigger_group = {
              .bin_attrs = led_trigger_bin_attrs,
      };
      #endif
      
      static struct attribute *led_class_attrs[] = {
              &dev_attr_brightness.attr,
              &dev_attr_max_brightness.attr,
              NULL,
      };
      
      static const struct attribute_group led_group = {
              .attrs = led_class_attrs,
      };
      
      static const struct attribute_group *led_groups[] = {
              &led_group,
      #ifdef CONFIG_LEDS_TRIGGERS
              &led_trigger_group,
      #endif
              NULL,
      };
      
      #ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
      static ssize_t brightness_hw_changed_show(struct device *dev,
                      struct device_attribute *attr, char *buf)
      {
              struct led_classdev *led_cdev = dev_get_drvdata(dev);
      
              if (led_cdev->brightness_hw_changed == -1)
                      return -ENODATA;
      
              return sprintf(buf, "%u\n", led_cdev->brightness_hw_changed);
      }
      
      static DEVICE_ATTR_RO(brightness_hw_changed);
      
      static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
      {
              struct device *dev = led_cdev->dev;
              int ret;
      
              ret = device_create_file(dev, &dev_attr_brightness_hw_changed);
              if (ret) {
                      dev_err(dev, "Error creating brightness_hw_changed\n");
                      return ret;
              }
      
              led_cdev->brightness_hw_changed_kn =
                      sysfs_get_dirent(dev->kobj.sd, "brightness_hw_changed");
              if (!led_cdev->brightness_hw_changed_kn) {
                      dev_err(dev, "Error getting brightness_hw_changed kn\n");
                      device_remove_file(dev, &dev_attr_brightness_hw_changed);
                      return -ENXIO;
              }
      
              return 0;
      }
      
      static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
      {
              sysfs_put(led_cdev->brightness_hw_changed_kn);
              device_remove_file(led_cdev->dev, &dev_attr_brightness_hw_changed);
      }
      
      void led_classdev_notify_brightness_hw_changed(struct led_classdev *led_cdev,
                                                     enum led_brightness brightness)
      {
              if (WARN_ON(!led_cdev->brightness_hw_changed_kn))
                      return;
      
              led_cdev->brightness_hw_changed = brightness;
              sysfs_notify_dirent(led_cdev->brightness_hw_changed_kn);
      }
      EXPORT_SYMBOL_GPL(led_classdev_notify_brightness_hw_changed);
      #else
      static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
      {
              return 0;
      }
      static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
      {
      }
      #endif
      
      /**
       * led_classdev_suspend - suspend an led_classdev.
       * @led_cdev: the led_classdev to suspend.
       */
      void led_classdev_suspend(struct led_classdev *led_cdev)
      {
              led_cdev->flags |= LED_SUSPENDED;
              led_set_brightness_nopm(led_cdev, 0);
              flush_work(&led_cdev->set_brightness_work);
      }
      EXPORT_SYMBOL_GPL(led_classdev_suspend);
      
      /**
       * led_classdev_resume - resume an led_classdev.
       * @led_cdev: the led_classdev to resume.
       */
      void led_classdev_resume(struct led_classdev *led_cdev)
      {
              led_set_brightness_nopm(led_cdev, led_cdev->brightness);
      
              if (led_cdev->flash_resume)
                      led_cdev->flash_resume(led_cdev);
      
              led_cdev->flags &= ~LED_SUSPENDED;
      }
      EXPORT_SYMBOL_GPL(led_classdev_resume);
      
      #ifdef CONFIG_PM_SLEEP
      static int led_suspend(struct device *dev)
      {
              struct led_classdev *led_cdev = dev_get_drvdata(dev);
      
              if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
                      led_classdev_suspend(led_cdev);
      
              return 0;
      }
      
      static int led_resume(struct device *dev)
      {
              struct led_classdev *led_cdev = dev_get_drvdata(dev);
      
              if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
                      led_classdev_resume(led_cdev);
      
              return 0;
      }
      #endif
      
      static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
      
      /**
       * of_led_get() - request a LED device via the LED framework
       * @np: device node to get the LED device from
       * @index: the index of the LED
       *
       * Returns the LED device parsed from the phandle specified in the "leds"
       * property of a device tree node or a negative error-code on failure.
       */
      struct led_classdev *of_led_get(struct device_node *np, int index)
      {
              struct device *led_dev;
              struct led_classdev *led_cdev;
              struct device_node *led_node;
      
              led_node = of_parse_phandle(np, "leds", index);
              if (!led_node)
                      return ERR_PTR(-ENOENT);
      
              led_dev = class_find_device_by_of_node(leds_class, led_node);
              of_node_put(led_node);
      
              if (!led_dev)
                      return ERR_PTR(-EPROBE_DEFER);
      
              led_cdev = dev_get_drvdata(led_dev);
      
              if (!try_module_get(led_cdev->dev->parent->driver->owner))
                      return ERR_PTR(-ENODEV);
      
              return led_cdev;
      }
      EXPORT_SYMBOL_GPL(of_led_get);
      
      /**
       * led_put() - release a LED device
       * @led_cdev: LED device
       */
      void led_put(struct led_classdev *led_cdev)
      {
              module_put(led_cdev->dev->parent->driver->owner);
      }
      EXPORT_SYMBOL_GPL(led_put);
      
      static void devm_led_release(struct device *dev, void *res)
      {
              struct led_classdev **p = res;
      
              led_put(*p);
      }
      
      /**
       * devm_of_led_get - Resource-managed request of a LED device
       * @dev:        LED consumer
       * @index:        index of the LED to obtain in the consumer
       *
       * The device node of the device is parse to find the request LED device.
       * The LED device returned from this function is automatically released
       * on driver detach.
       *
       * @return a pointer to a LED device or ERR_PTR(errno) on failure.
       */
      struct led_classdev *__must_check devm_of_led_get(struct device *dev,
                                                        int index)
      {
              struct led_classdev *led;
              struct led_classdev **dr;
      
              if (!dev)
                      return ERR_PTR(-EINVAL);
      
              /* Not using device tree? */
              if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
                      return ERR_PTR(-ENOTSUPP);
      
              led = of_led_get(dev->of_node, index);
              if (IS_ERR(led))
                      return led;
      
              dr = devres_alloc(devm_led_release, sizeof(struct led_classdev *),
                                GFP_KERNEL);
              if (!dr) {
                      led_put(led);
                      return ERR_PTR(-ENOMEM);
              }
      
              *dr = led;
              devres_add(dev, dr);
      
              return led;
      }
      EXPORT_SYMBOL_GPL(devm_of_led_get);
      
      static int led_classdev_next_name(const char *init_name, char *name,
                                        size_t len)
      {
              unsigned int i = 0;
              int ret = 0;
              struct device *dev;
      
   24         strlcpy(name, init_name, len);
      
              while ((ret < len) &&
   24                (dev = class_find_device_by_name(leds_class, name))) {
                      put_device(dev);
                      ret = snprintf(name, len, "%s_%u", init_name, ++i);
              }
      
              if (ret >= len)
                      return -ENOMEM;
      
   24         return i;
      }
      
      /**
       * led_classdev_register_ext - register a new object of led_classdev class
       *                               with init data.
       *
       * @parent: parent of LED device
       * @led_cdev: the led_classdev structure for this device.
       * @init_data: LED class device initialization data
       */
      int led_classdev_register_ext(struct device *parent,
                                    struct led_classdev *led_cdev,
                                    struct led_init_data *init_data)
      {
              char composed_name[LED_MAX_NAME_SIZE];
              char final_name[LED_MAX_NAME_SIZE];
              const char *proposed_name = composed_name;
              int ret;
      
   24         if (init_data) {
                      if (init_data->devname_mandatory && !init_data->devicename) {
                              dev_err(parent, "Mandatory device name is missing");
                              return -EINVAL;
                      }
                      ret = led_compose_name(parent, init_data, composed_name);
                      if (ret < 0)
                              return ret;
              } else {
   24                 proposed_name = led_cdev->name;
              }
      
   24         ret = led_classdev_next_name(proposed_name, final_name, sizeof(final_name));
              if (ret < 0)
                      return ret;
      
   24         mutex_init(&led_cdev->led_access);
              mutex_lock(&led_cdev->led_access);
              led_cdev->dev = device_create_with_groups(leds_class, parent, 0,
                                      led_cdev, led_cdev->groups, "%s", final_name);
              if (IS_ERR(led_cdev->dev)) {
                      mutex_unlock(&led_cdev->led_access);
                      return PTR_ERR(led_cdev->dev);
              }
   24         if (init_data && init_data->fwnode) {
                      led_cdev->dev->fwnode = init_data->fwnode;
                      led_cdev->dev->of_node = to_of_node(init_data->fwnode);
              }
      
   24         if (ret)
                      dev_warn(parent, "Led %s renamed to %s due to name collision",
                                      proposed_name, dev_name(led_cdev->dev));
      
              if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
                      ret = led_add_brightness_hw_changed(led_cdev);
                      if (ret) {
                              device_unregister(led_cdev->dev);
                              led_cdev->dev = NULL;
                              mutex_unlock(&led_cdev->led_access);
                              return ret;
                      }
              }
      
   24         led_cdev->work_flags = 0;
      #ifdef CONFIG_LEDS_TRIGGERS
              init_rwsem(&led_cdev->trigger_lock);
      #endif
      #ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
              led_cdev->brightness_hw_changed = -1;
      #endif
              /* add to the list of leds */
              down_write(&leds_list_lock);
   24         list_add_tail(&led_cdev->node, &leds_list);
   24         up_write(&leds_list_lock);
      
              if (!led_cdev->max_brightness)
   15                 led_cdev->max_brightness = LED_FULL;
      
   24         led_update_brightness(led_cdev);
      
              led_init_core(led_cdev);
      
      #ifdef CONFIG_LEDS_TRIGGERS
              led_trigger_set_default(led_cdev);
      #endif
      
              mutex_unlock(&led_cdev->led_access);
      
   23         dev_dbg(parent, "Registered led device: %s\n",
                              led_cdev->name);
      
   23         return 0;
      }
      EXPORT_SYMBOL_GPL(led_classdev_register_ext);
      
      /**
       * led_classdev_unregister - unregisters a object of led_properties class.
       * @led_cdev: the led device to unregister
       *
       * Unregisters a previously registered via led_classdev_register object.
       */
      void led_classdev_unregister(struct led_classdev *led_cdev)
      {
   84         if (IS_ERR_OR_NULL(led_cdev->dev))
                      return;
      
      #ifdef CONFIG_LEDS_TRIGGERS
   84         down_write(&led_cdev->trigger_lock);
              if (led_cdev->trigger)
   52                 led_trigger_set(led_cdev, NULL);
   84         up_write(&led_cdev->trigger_lock);
      #endif
      
              led_cdev->flags |= LED_UNREGISTERING;
      
              /* Stop blinking */
              led_stop_software_blink(led_cdev);
      
              led_set_brightness(led_cdev, LED_OFF);
      
              flush_work(&led_cdev->set_brightness_work);
      
              if (led_cdev->flags & LED_BRIGHT_HW_CHANGED)
                      led_remove_brightness_hw_changed(led_cdev);
      
              device_unregister(led_cdev->dev);
      
              down_write(&leds_list_lock);
   84         list_del(&led_cdev->node);
              up_write(&leds_list_lock);
      
              mutex_destroy(&led_cdev->led_access);
      }
      EXPORT_SYMBOL_GPL(led_classdev_unregister);
      
      static void devm_led_classdev_release(struct device *dev, void *res)
      {
    4         led_classdev_unregister(*(struct led_classdev **)res);
      }
      
      /**
       * devm_led_classdev_register_ext - resource managed led_classdev_register_ext()
       *
       * @parent: parent of LED device
       * @led_cdev: the led_classdev structure for this device.
       * @init_data: LED class device initialization data
       */
      int devm_led_classdev_register_ext(struct device *parent,
                                         struct led_classdev *led_cdev,
                                         struct led_init_data *init_data)
      {
              struct led_classdev **dr;
              int rc;
      
    2         dr = devres_alloc(devm_led_classdev_release, sizeof(*dr), GFP_KERNEL);
              if (!dr)
                      return -ENOMEM;
      
    2         rc = led_classdev_register_ext(parent, led_cdev, init_data);
              if (rc) {
                      devres_free(dr);
                      return rc;
              }
      
    2         *dr = led_cdev;
              devres_add(parent, dr);
      
    2         return 0;
      }
      EXPORT_SYMBOL_GPL(devm_led_classdev_register_ext);
      
      static int devm_led_classdev_match(struct device *dev, void *res, void *data)
      {
              struct led_classdev **p = res;
      
              if (WARN_ON(!p || !*p))
                      return 0;
      
              return *p == data;
      }
      
      /**
       * devm_led_classdev_unregister() - resource managed led_classdev_unregister()
       * @parent: The device to unregister.
       * @led_cdev: the led_classdev structure for this device.
       */
      void devm_led_classdev_unregister(struct device *dev,
                                        struct led_classdev *led_cdev)
      {
              WARN_ON(devres_release(dev,
                                     devm_led_classdev_release,
                                     devm_led_classdev_match, led_cdev));
      }
      EXPORT_SYMBOL_GPL(devm_led_classdev_unregister);
      
      static int __init leds_init(void)
      {
              leds_class = class_create(THIS_MODULE, "leds");
              if (IS_ERR(leds_class))
                      return PTR_ERR(leds_class);
              leds_class->pm = &leds_class_dev_pm_ops;
              leds_class->dev_groups = led_groups;
              return 0;
      }
      
      static void __exit leds_exit(void)
      {
              class_destroy(leds_class);
      }
      
      subsys_initcall(leds_init);
      module_exit(leds_exit);
      
      MODULE_AUTHOR("John Lenz, Richard Purdie");
      MODULE_LICENSE("GPL");
      MODULE_DESCRIPTION("LED Class Interface");
      /* SPDX-License-Identifier: GPL-2.0 */
      /* Freezer declarations */
      
      #ifndef FREEZER_H_INCLUDED
      #define FREEZER_H_INCLUDED
      
      #include <linux/debug_locks.h>
      #include <linux/sched.h>
      #include <linux/wait.h>
      #include <linux/atomic.h>
      
      #ifdef CONFIG_FREEZER
      extern atomic_t system_freezing_cnt;        /* nr of freezing conds in effect */
      extern bool pm_freezing;                /* PM freezing in effect */
      extern bool pm_nosig_freezing;                /* PM nosig freezing in effect */
      
      /*
       * Timeout for stopping processes
       */
      extern unsigned int freeze_timeout_msecs;
      
      /*
       * Check if a process has been frozen
       */
      static inline bool frozen(struct task_struct *p)
      {
              return p->flags & PF_FROZEN;
      }
      
      extern bool freezing_slow_path(struct task_struct *p);
      
      /*
       * Check if there is a request to freeze a process
       */
      static inline bool freezing(struct task_struct *p)
      {
              if (likely(!atomic_read(&system_freezing_cnt)))
                      return false;
              return freezing_slow_path(p);
      }
      
      /* Takes and releases task alloc lock using task_lock() */
      extern void __thaw_task(struct task_struct *t);
      
      extern bool __refrigerator(bool check_kthr_stop);
      extern int freeze_processes(void);
      extern int freeze_kernel_threads(void);
      extern void thaw_processes(void);
      extern void thaw_kernel_threads(void);
      
      /*
       * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
       * If try_to_freeze causes a lockdep warning it means the caller may deadlock
       */
      static inline bool try_to_freeze_unsafe(void)
      {
  654         might_sleep();
              if (likely(!freezing(current)))
                      return false;
              return __refrigerator(false);
      }
      
      static inline bool try_to_freeze(void)
      {
   14         if (!(current->flags & PF_NOFREEZE))
  654                 debug_check_no_locks_held();
  654         return try_to_freeze_unsafe();
      }
      
      extern bool freeze_task(struct task_struct *p);
      extern bool set_freezable(void);
      
      #ifdef CONFIG_CGROUP_FREEZER
      extern bool cgroup_freezing(struct task_struct *task);
      #else /* !CONFIG_CGROUP_FREEZER */
      static inline bool cgroup_freezing(struct task_struct *task)
      {
              return false;
      }
      #endif /* !CONFIG_CGROUP_FREEZER */
      
      /*
       * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
       * calls wait_for_completion(&vfork) and reset right after it returns from this
       * function.  Next, the parent should call try_to_freeze() to freeze itself
       * appropriately in case the child has exited before the freezing of tasks is
       * complete.  However, we don't want kernel threads to be frozen in unexpected
       * places, so we allow them to block freeze_processes() instead or to set
       * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
       * parent won't really block freeze_processes(), since ____call_usermodehelper()
       * (the child) does a little before exec/exit and it can't be frozen before
       * waking up the parent.
       */
      
      
      /**
       * freezer_do_not_count - tell freezer to ignore %current
       *
       * Tell freezers to ignore the current task when determining whether the
       * target frozen state is reached.  IOW, the current task will be
       * considered frozen enough by freezers.
       *
       * The caller shouldn't do anything which isn't allowed for a frozen task
       * until freezer_cont() is called.  Usually, freezer[_do_not]_count() pair
       * wrap a scheduling operation and nothing much else.
       */
      static inline void freezer_do_not_count(void)
      {
  645         current->flags |= PF_FREEZER_SKIP;
      }
      
      /**
       * freezer_count - tell freezer to stop ignoring %current
       *
       * Undo freezer_do_not_count().  It tells freezers that %current should be
       * considered again and tries to freeze if freezing condition is already in
       * effect.
       */
      static inline void freezer_count(void)
      {
              current->flags &= ~PF_FREEZER_SKIP;
              /*
               * If freezing is in progress, the following paired with smp_mb()
               * in freezer_should_skip() ensures that either we see %true
               * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
               */
              smp_mb();
  645         try_to_freeze();
      }
      
      /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
      static inline void freezer_count_unsafe(void)
      {
              current->flags &= ~PF_FREEZER_SKIP;
              smp_mb();
              try_to_freeze_unsafe();
      }
      
      /**
       * freezer_should_skip - whether to skip a task when determining frozen
       *                         state is reached
       * @p: task in quesion
       *
       * This function is used by freezers after establishing %true freezing() to
       * test whether a task should be skipped when determining the target frozen
       * state is reached.  IOW, if this function returns %true, @p is considered
       * frozen enough.
       */
      static inline bool freezer_should_skip(struct task_struct *p)
      {
              /*
               * The following smp_mb() paired with the one in freezer_count()
               * ensures that either freezer_count() sees %true freezing() or we
               * see cleared %PF_FREEZER_SKIP and return %false.  This makes it
               * impossible for a task to slip frozen state testing after
               * clearing %PF_FREEZER_SKIP.
               */
              smp_mb();
              return p->flags & PF_FREEZER_SKIP;
      }
      
      /*
       * These functions are intended to be used whenever you want allow a sleeping
       * task to be frozen. Note that neither return any clear indication of
       * whether a freeze event happened while in this function.
       */
      
      /* Like schedule(), but should not block the freezer. */
      static inline void freezable_schedule(void)
      {
  645         freezer_do_not_count();
              schedule();
  645         freezer_count();
      }
      
      /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
      static inline void freezable_schedule_unsafe(void)
      {
              freezer_do_not_count();
              schedule();
              freezer_count_unsafe();
      }
      
      /*
       * Like schedule_timeout(), but should not block the freezer.  Do not
       * call this with locks held.
       */
      static inline long freezable_schedule_timeout(long timeout)
      {
              long __retval;
              freezer_do_not_count();
              __retval = schedule_timeout(timeout);
              freezer_count();
              return __retval;
      }
      
      /*
       * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
       * call this with locks held.
       */
      static inline long freezable_schedule_timeout_interruptible(long timeout)
      {
              long __retval;
              freezer_do_not_count();
              __retval = schedule_timeout_interruptible(timeout);
              freezer_count();
              return __retval;
      }
      
      /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
      static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
      {
              long __retval;
      
              freezer_do_not_count();
              __retval = schedule_timeout_interruptible(timeout);
              freezer_count_unsafe();
              return __retval;
      }
      
      /* Like schedule_timeout_killable(), but should not block the freezer. */
      static inline long freezable_schedule_timeout_killable(long timeout)
      {
              long __retval;
              freezer_do_not_count();
              __retval = schedule_timeout_killable(timeout);
              freezer_count();
              return __retval;
      }
      
      /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
      static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
      {
              long __retval;
              freezer_do_not_count();
              __retval = schedule_timeout_killable(timeout);
              freezer_count_unsafe();
              return __retval;
      }
      
      /*
       * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
       * call this with locks held.
       */
      static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
                      u64 delta, const enum hrtimer_mode mode)
      {
              int __retval;
              freezer_do_not_count();
              __retval = schedule_hrtimeout_range(expires, delta, mode);
              freezer_count();
              return __retval;
      }
      
      /*
       * Freezer-friendly wrappers around wait_event_interruptible(),
       * wait_event_killable() and wait_event_interruptible_timeout(), originally
       * defined in <linux/wait.h>
       */
      
      /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
      #define wait_event_freezekillable_unsafe(wq, condition)                        \
      ({                                                                        \
              int __retval;                                                        \
              freezer_do_not_count();                                                \
              __retval = wait_event_killable(wq, (condition));                \
              freezer_count_unsafe();                                                \
              __retval;                                                        \
      })
      
      #else /* !CONFIG_FREEZER */
      static inline bool frozen(struct task_struct *p) { return false; }
      static inline bool freezing(struct task_struct *p) { return false; }
      static inline void __thaw_task(struct task_struct *t) {}
      
      static inline bool __refrigerator(bool check_kthr_stop) { return false; }
      static inline int freeze_processes(void) { return -ENOSYS; }
      static inline int freeze_kernel_threads(void) { return -ENOSYS; }
      static inline void thaw_processes(void) {}
      static inline void thaw_kernel_threads(void) {}
      
      static inline bool try_to_freeze_nowarn(void) { return false; }
      static inline bool try_to_freeze(void) { return false; }
      
      static inline void freezer_do_not_count(void) {}
      static inline void freezer_count(void) {}
      static inline int freezer_should_skip(struct task_struct *p) { return 0; }
      static inline void set_freezable(void) {}
      
      #define freezable_schedule()  schedule()
      
      #define freezable_schedule_unsafe()  schedule()
      
      #define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
      
      #define freezable_schedule_timeout_interruptible(timeout)                \
              schedule_timeout_interruptible(timeout)
      
      #define freezable_schedule_timeout_interruptible_unsafe(timeout)        \
              schedule_timeout_interruptible(timeout)
      
      #define freezable_schedule_timeout_killable(timeout)                        \
              schedule_timeout_killable(timeout)
      
      #define freezable_schedule_timeout_killable_unsafe(timeout)                \
              schedule_timeout_killable(timeout)
      
      #define freezable_schedule_hrtimeout_range(expires, delta, mode)        \
              schedule_hrtimeout_range(expires, delta, mode)
      
      #define wait_event_freezekillable_unsafe(wq, condition)                        \
                      wait_event_killable(wq, condition)
      
      #endif /* !CONFIG_FREEZER */
      
      #endif        /* FREEZER_H_INCLUDED */
      // SPDX-License-Identifier: GPL-2.0-only
      /*
       * Event char devices, giving access to raw input device events.
       *
       * Copyright (c) 1999-2002 Vojtech Pavlik
       */
      
      #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      
      #define EVDEV_MINOR_BASE        64
      #define EVDEV_MINORS                32
      #define EVDEV_MIN_BUFFER_SIZE        64U
      #define EVDEV_BUF_PACKETS        8
      
      #include <linux/poll.h>
      #include <linux/sched.h>
      #include <linux/slab.h>
      #include <linux/vmalloc.h>
      #include <linux/mm.h>
      #include <linux/module.h>
      #include <linux/init.h>
      #include <linux/input/mt.h>
      #include <linux/major.h>
      #include <linux/device.h>
      #include <linux/cdev.h>
      #include "input-compat.h"
      
      struct evdev {
              int open;
              struct input_handle handle;
              wait_queue_head_t wait;
              struct evdev_client __rcu *grab;
              struct list_head client_list;
              spinlock_t client_lock; /* protects client_list */
              struct mutex mutex;
              struct device dev;
              struct cdev cdev;
              bool exist;
      };
      
      struct evdev_client {
              unsigned int head;
              unsigned int tail;
              unsigned int packet_head; /* [future] position of the first element of next packet */
              spinlock_t buffer_lock; /* protects access to buffer, head and tail */
              struct fasync_struct *fasync;
              struct evdev *evdev;
              struct list_head node;
              enum input_clock_type clk_type;
              bool revoked;
              unsigned long *evmasks[EV_CNT];
              unsigned int bufsize;
              struct input_event buffer[];
      };
      
      static size_t evdev_get_mask_cnt(unsigned int type)
      {
              static const size_t counts[EV_CNT] = {
                      /* EV_SYN==0 is EV_CNT, _not_ SYN_CNT, see EVIOCGBIT */
                      [EV_SYN]        = EV_CNT,
                      [EV_KEY]        = KEY_CNT,
                      [EV_REL]        = REL_CNT,
                      [EV_ABS]        = ABS_CNT,
                      [EV_MSC]        = MSC_CNT,
                      [EV_SW]                = SW_CNT,
                      [EV_LED]        = LED_CNT,
                      [EV_SND]        = SND_CNT,
                      [EV_FF]                = FF_CNT,
              };
      
   63         return (type < EV_CNT) ? counts[type] : 0;
      }
      
      /* requires the buffer lock to be held */
      static bool __evdev_is_filtered(struct evdev_client *client,
                                      unsigned int type,
                                      unsigned int code)
      {
              unsigned long *mask;
              size_t cnt;
      
              /* EV_SYN and unknown codes are never filtered */
              if (type == EV_SYN || type >= EV_CNT)
                      return false;
      
              /* first test whether the type is filtered */
              mask = client->evmasks[0];
    1         if (mask && !test_bit(type, mask))
                      return true;
      
              /* unknown values are never filtered */
   27         cnt = evdev_get_mask_cnt(type);
              if (!cnt || code >= cnt)
                      return false;
      
   22         mask = client->evmasks[type];
    2         return mask && !test_bit(code, mask);
      }
      
      /* flush queued events of type @type, caller must hold client->buffer_lock */
      static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
      {
              unsigned int i, head, num;
              unsigned int mask = client->bufsize - 1;
              bool is_report;
              struct input_event *ev;
      
              BUG_ON(type == EV_SYN);
      
              head = client->tail;
              client->packet_head = client->tail;
      
              /* init to 1 so a leading SYN_REPORT will not be dropped */
              num = 1;
      
    2         for (i = client->tail; i != client->head; i = (i + 1) & mask) {
                      ev = &client->buffer[i];
    2                 is_report = ev->type == EV_SYN && ev->code == SYN_REPORT;
      
    2                 if (ev->type == type) {
                              /* drop matched entry */
                              continue;
    2                 } else if (is_report && !num) {
                              /* drop empty SYN_REPORT groups */
                              continue;
                      } else if (head != i) {
                              /* move entry to fill the gap */
    1                         client->buffer[head] = *ev;
                      }
      
    1                 num++;
    2                 head = (head + 1) & mask;
      
                      if (is_report) {
                              num = 0;
    2                         client->packet_head = head;
                      }
              }
      
   12         client->head = head;
      }
      
      static void __evdev_queue_syn_dropped(struct evdev_client *client)
      {
    2         ktime_t *ev_time = input_get_timestamp(client->evdev->handle.dev);
              struct timespec64 ts = ktime_to_timespec64(ev_time[client->clk_type]);
              struct input_event ev;
      
              ev.input_event_sec = ts.tv_sec;
              ev.input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
              ev.type = EV_SYN;
              ev.code = SYN_DROPPED;
              ev.value = 0;
      
              client->buffer[client->head++] = ev;
              client->head &= client->bufsize - 1;
      
              if (unlikely(client->head == client->tail)) {
                      /* drop queue but keep our SYN_DROPPED event */
                      client->tail = (client->head - 1) & (client->bufsize - 1);
                      client->packet_head = client->tail;
              }
      }
      
      static void evdev_queue_syn_dropped(struct evdev_client *client)
      {
              unsigned long flags;
      
    1         spin_lock_irqsave(&client->buffer_lock, flags);
              __evdev_queue_syn_dropped(client);
              spin_unlock_irqrestore(&client->buffer_lock, flags);
      }
      
      static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
      {
              unsigned long flags;
              enum input_clock_type clk_type;
      
              switch (clkid) {
      
              case CLOCK_REALTIME:
                      clk_type = INPUT_CLK_REAL;
                      break;
              case CLOCK_MONOTONIC:
                      clk_type = INPUT_CLK_MONO;
                      break;
              case CLOCK_BOOTTIME:
                      clk_type = INPUT_CLK_BOOT;
                      break;
              default:
                      return -EINVAL;
              }
      
    3         if (client->clk_type != clk_type) {
    2                 client->clk_type = clk_type;
      
                      /*
                       * Flush pending events and queue SYN_DROPPED event,
                       * but only if the queue is not empty.
                       */
                      spin_lock_irqsave(&client->buffer_lock, flags);
      
                      if (client->head != client->tail) {
    1                         client->packet_head = client->head = client->tail;
                              __evdev_queue_syn_dropped(client);
                      }
      
    2                 spin_unlock_irqrestore(&client->buffer_lock, flags);
              }
      
              return 0;
      }
      
      static void __pass_event(struct evdev_client *client,
                               const struct input_event *event)
      {
              client->buffer[client->head++] = *event;
              client->head &= client->bufsize - 1;
      
              if (unlikely(client->head == client->tail)) {
                      /*
                       * This effectively "drops" all unconsumed events, leaving
                       * EV_SYN/SYN_DROPPED plus the newest event in the queue.
                       */
    1                 client->tail = (client->head - 2) & (client->bufsize - 1);
      
                      client->buffer[client->tail] = (struct input_event) {
                              .input_event_sec = event->input_event_sec,
                              .input_event_usec = event->input_event_usec,
                              .type = EV_SYN,
                              .code = SYN_DROPPED,
                              .value = 0,
                      };
      
                      client->packet_head = client->tail;
              }
      
   26         if (event->type == EV_SYN && event->code == SYN_REPORT) {
   26                 client->packet_head = client->head;
                      kill_fasync(&client->fasync, SIGIO, POLL_IN);
              }
      }
      
      static void evdev_pass_values(struct evdev_client *client,
                              const struct input_value *vals, unsigned int count,
                              ktime_t *ev_time)
      {
   27         struct evdev *evdev = client->evdev;
              const struct input_value *v;
              struct input_event event;
              struct timespec64 ts;
              bool wakeup = false;
      
   28         if (client->revoked)
   28                 return;
      
              ts = ktime_to_timespec64(ev_time[client->clk_type]);
              event.input_event_sec = ts.tv_sec;
              event.input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
      
              /* Interrupts are disabled, just acquire the lock. */
              spin_lock(&client->buffer_lock);
      
   27         for (v = vals; v != vals + count; v++) {
   27                 if (__evdev_is_filtered(client, v->type, v->code))
                              continue;
      
   27                 if (v->type == EV_SYN && v->code == SYN_REPORT) {
                              /* drop empty SYN_REPORT */
   27                         if (client->packet_head == client->head)
                                      continue;
      
                              wakeup = true;
                      }
      
   26                 event.type = v->type;
                      event.code = v->code;
                      event.value = v->value;
   26                 __pass_event(client, &event);
              }
      
   27         spin_unlock(&client->buffer_lock);
      
              if (wakeup)
   26                 wake_up_interruptible_poll(&evdev->wait,
                              EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM);
      }
      
      /*
       * Pass incoming events to all connected clients.
       */
      static void evdev_events(struct input_handle *handle,
                               const struct input_value *vals, unsigned int count)
      {
   28         struct evdev *evdev = handle->private;
              struct evdev_client *client;
              ktime_t *ev_time = input_get_timestamp(handle->dev);
      
   28         rcu_read_lock();
      
   28         client = rcu_dereference(evdev->grab);
      
   28         if (client)
    2                 evdev_pass_values(client, vals, count, ev_time);
              else
   26                 list_for_each_entry_rcu(client, &evdev->client_list, node)
   26                         evdev_pass_values(client, vals, count, ev_time);
      
   28         rcu_read_unlock();
      }
      
      /*
       * Pass incoming event to all connected clients.
       */
      static void evdev_event(struct input_handle *handle,
                              unsigned int type, unsigned int code, int value)
      {
              struct input_value vals[] = { { type, code, value } };
      
              evdev_events(handle, vals, 1);
      }
      
      static int evdev_fasync(int fd, struct file *file, int on)
      {
    2         struct evdev_client *client = file->private_data;
      
              return fasync_helper(fd, file, on, &client->fasync);
      }
      
      static void evdev_free(struct device *dev)
      {
  268         struct evdev *evdev = container_of(dev, struct evdev, dev);
      
  268         input_put_device(evdev->handle.dev);
  268         kfree(evdev);
      }
      
      /*
       * Grabs an event device (along with underlying input device).
       * This function is called with evdev->mutex taken.
       */
      static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
      {
              int error;
      
    3         if (evdev->grab)
                      return -EBUSY;
      
    2         error = input_grab_device(&evdev->handle);
              if (error)
                      return error;
      
    2         rcu_assign_pointer(evdev->grab, client);
      
              return 0;
      }
      
      static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
      {
   25         struct evdev_client *grab = rcu_dereference_protected(evdev->grab,
                                              lockdep_is_held(&evdev->mutex));
      
              if (grab != client)
                      return  -EINVAL;
      
    2         rcu_assign_pointer(evdev->grab, NULL);
              synchronize_rcu();
              input_release_device(&evdev->handle);
      
   25         return 0;
      }
      
      static void evdev_attach_client(struct evdev *evdev,
                                      struct evdev_client *client)
      {
              spin_lock(&evdev->client_lock);
   78         list_add_tail_rcu(&client->node, &evdev->client_list);
   78         spin_unlock(&evdev->client_lock);
      }
      
      static void evdev_detach_client(struct evdev *evdev,
                                      struct evdev_client *client)
      {
   14         spin_lock(&evdev->client_lock);
   34         list_del_rcu(&client->node);
              spin_unlock(&evdev->client_lock);
              synchronize_rcu();
      }
      
      static int evdev_open_device(struct evdev *evdev)
      {
              int retval;
      
              retval = mutex_lock_interruptible(&evdev->mutex);
              if (retval)
                      return retval;
      
   78         if (!evdev->exist)
                      retval = -ENODEV;
   78         else if (!evdev->open++) {
   72                 retval = input_open_device(&evdev->handle);
                      if (retval)
   14                         evdev->open--;
              }
      
   67         mutex_unlock(&evdev->mutex);
              return retval;
      }
      
      static void evdev_close_device(struct evdev *evdev)
      {
              mutex_lock(&evdev->mutex);
      
   20         if (evdev->exist && !--evdev->open)
   20                 input_close_device(&evdev->handle);
      
   20         mutex_unlock(&evdev->mutex);
      }
      
      /*
       * Wake up users waiting for IO so they can disconnect from
       * dead device.
       */
      static void evdev_hangup(struct evdev *evdev)
      {
              struct evdev_client *client;
      
              spin_lock(&evdev->client_lock);
              list_for_each_entry(client, &evdev->client_list, node)
   14                 kill_fasync(&client->fasync, SIGIO, POLL_HUP);
  270         spin_unlock(&evdev->client_lock);
      
              wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
      }
      
      static int evdev_release(struct inode *inode, struct file *file)
      {
   20         struct evdev_client *client = file->private_data;
              struct evdev *evdev = client->evdev;
              unsigned int i;
      
              mutex_lock(&evdev->mutex);
      
   20         if (evdev->exist && !client->revoked)
   19                 input_flush_device(&evdev->handle, file);
      
   20         evdev_ungrab(evdev, client);
              mutex_unlock(&evdev->mutex);
      
   20         evdev_detach_client(evdev, client);
      
              for (i = 0; i < EV_CNT; ++i)
   20                 bitmap_free(client->evmasks[i]);
      
   20         kvfree(client);
      
   20         evdev_close_device(evdev);
      
              return 0;
      }
      
      static unsigned int evdev_compute_buffer_size(struct input_dev *dev)
      {
              unsigned int n_events =
                      max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS,
                          EVDEV_MIN_BUFFER_SIZE);
      
              return roundup_pow_of_two(n_events);
      }
      
      static int evdev_open(struct inode *inode, struct file *file)
      {
   78         struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
              unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
              struct evdev_client *client;
              int error;
      
              client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
              if (!client)
                      return -ENOMEM;
      
   78         client->bufsize = bufsize;
              spin_lock_init(&client->buffer_lock);
              client->evdev = evdev;
   78         evdev_attach_client(evdev, client);
      
   78         error = evdev_open_device(evdev);
              if (error)
                      goto err_free_client;
      
   67         file->private_data = client;
              stream_open(inode, file);
      
              return 0;
      
       err_free_client:
   14         evdev_detach_client(evdev, client);
              kvfree(client);
   78         return error;
      }
      
      static ssize_t evdev_write(struct file *file, const char __user *buffer,
                                 size_t count, loff_t *ppos)
      {
   28         struct evdev_client *client = file->private_data;
              struct evdev *evdev = client->evdev;
              struct input_event event;
              int retval = 0;
      
   27         if (count != 0 && count < input_event_size())
                      return -EINVAL;
      
   27         retval = mutex_lock_interruptible(&evdev->mutex);
              if (retval)
                      return retval;
      
   27         if (!evdev->exist || client->revoked) {
                      retval = -ENODEV;
                      goto out;
              }
      
   26         while (retval + input_event_size() <= count) {
      
   26                 if (input_event_from_user(buffer + retval, &event)) {
                              retval = -EFAULT;
                              goto out;
                      }
   25                 retval += input_event_size();
      
                      input_inject_event(&evdev->handle,
                                         event.type, event.code, event.value);
                      cond_resched();
              }
      
       out:
    3         mutex_unlock(&evdev->mutex);
    4         return retval;
      }
      
      static int evdev_fetch_next_event(struct evdev_client *client,
                                        struct input_event *event)
      {
              int have_event;
      
    2         spin_lock_irq(&client->buffer_lock);
      
              have_event = client->packet_head != client->tail;
              if (have_event) {
                      *event = client->buffer[client->tail++];
                      client->tail &= client->bufsize - 1;
              }
      
    2         spin_unlock_irq(&client->buffer_lock);
      
              return have_event;
      }
      
      static ssize_t evdev_read(struct file *file, char __user *buffer,
                                size_t count, loff_t *ppos)
      {
    5         struct evdev_client *client = file->private_data;
              struct evdev *evdev = client->evdev;
              struct input_event event;
              size_t read = 0;
              int error;
      
    3         if (count != 0 && count < input_event_size())
                      return -EINVAL;
      
              for (;;) {
    4                 if (!evdev->exist || client->revoked)
                              return -ENODEV;
      
    4                 if (client->packet_head == client->tail &&
    4                     (file->f_flags & O_NONBLOCK))
                              return -EAGAIN;
      
                      /*
                       * count == 0 is special - no IO is done but we check
                       * for error conditions (see above).
                       */
    3                 if (count == 0)
                              break;
      
    2                 while (read + input_event_size() <= count &&
    2                        evdev_fetch_next_event(client, &event)) {
      
                              if (input_event_to_user(buffer + read, &event))
    4                                 return -EFAULT;
      
                              read += input_event_size();
                      }
      
    2                 if (read)
                              break;
      
    2                 if (!(file->f_flags & O_NONBLOCK)) {
    2                         error = wait_event_interruptible(evdev->wait,
                                              client->packet_head != client->tail ||
                                              !evdev->exist || client->revoked);
                              if (error)
                                      return error;
                      }
              }
      
              return read;
      }
      
      /* No kernel lock - fine */
      static __poll_t evdev_poll(struct file *file, poll_table *wait)
      {
              struct evdev_client *client = file->private_data;
              struct evdev *evdev = client->evdev;
              __poll_t mask;
      
              poll_wait(file, &evdev->wait, wait);
      
              if (evdev->exist && !client->revoked)
                      mask = EPOLLOUT | EPOLLWRNORM;
              else
                      mask = EPOLLHUP | EPOLLERR;
      
              if (client->packet_head != client->tail)
                      mask |= EPOLLIN | EPOLLRDNORM;
      
              return mask;
      }
      
      #ifdef CONFIG_COMPAT
      
      #define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
      #define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1)
      
      #ifdef __BIG_ENDIAN
      static int bits_to_user(unsigned long *bits, unsigned int maxbit,
                              unsigned int maxlen, void __user *p, int compat)
      {
              int len, i;
      
              if (compat) {
                      len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
                      if (len > maxlen)
                              len = maxlen;
      
                      for (i = 0; i < len / sizeof(compat_long_t); i++)
                              if (copy_to_user((compat_long_t __user *) p + i,
                                               (compat_long_t *) bits +
                                                      i + 1 - ((i % 2) << 1),
                                               sizeof(compat_long_t)))
                                      return -EFAULT;
              } else {
                      len = BITS_TO_LONGS(maxbit) * sizeof(long);
                      if (len > maxlen)
                              len = maxlen;
      
                      if (copy_to_user(p, bits, len))
                              return -EFAULT;
              }
      
              return len;
      }
      
      static int bits_from_user(unsigned long *bits, unsigned int maxbit,
                                unsigned int maxlen, const void __user *p, int compat)
      {
              int len, i;
      
              if (compat) {
                      if (maxlen % sizeof(compat_long_t))
                              return -EINVAL;
      
                      len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
                      if (len > maxlen)
                              len = maxlen;
      
                      for (i = 0; i < len / sizeof(compat_long_t); i++)
                              if (copy_from_user((compat_long_t *) bits +
                                                      i + 1 - ((i % 2) << 1),
                                                 (compat_long_t __user *) p + i,
                                                 sizeof(compat_long_t)))
                                      return -EFAULT;
                      if (i % 2)
                              *((compat_long_t *) bits + i - 1) = 0;
      
              } else {
                      if (maxlen % sizeof(long))
                              return -EINVAL;
      
                      len = BITS_TO_LONGS(maxbit) * sizeof(long);
                      if (len > maxlen)
                              len = maxlen;
      
                      if (copy_from_user(bits, p, len))
                              return -EFAULT;
              }
      
              return len;
      }
      
      #else
      
      static int bits_to_user(unsigned long *bits, unsigned int maxbit,
                              unsigned int maxlen, void __user *p, int compat)
      {
   18         int len = compat ?
                              BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) :
   18                         BITS_TO_LONGS(maxbit) * sizeof(long);
      
   18         if (len > maxlen)
   10                 len = maxlen;
      
   18         return copy_to_user(p, bits, len) ? -EFAULT : len;
      }
      
      static int bits_from_user(unsigned long *bits, unsigned int maxbit,
                                unsigned int maxlen, const void __user *p, int compat)
      {
    6         size_t chunk_size = compat ? sizeof(compat_long_t) : sizeof(long);
              int len;
      
    6         if (maxlen % chunk_size)
                      return -EINVAL;
      
              len = compat ? BITS_TO_LONGS_COMPAT(maxbit) : BITS_TO_LONGS(maxbit);
    5         len *= chunk_size;
              if (len > maxlen)
    2                 len = maxlen;
      
    5         return copy_from_user(bits, p, len) ? -EFAULT : len;
      }
      
      #endif /* __BIG_ENDIAN */
      
      #else
      
      static int bits_to_user(unsigned long *bits, unsigned int maxbit,
                              unsigned int maxlen, void __user *p, int compat)
      {
              int len = BITS_TO_LONGS(maxbit) * sizeof(long);
      
              if (len > maxlen)
                      len = maxlen;
      
              return copy_to_user(p, bits, len) ? -EFAULT : len;
      }
      
      static int bits_from_user(unsigned long *bits, unsigned int maxbit,
                                unsigned int maxlen, const void __user *p, int compat)
      {
              int len;
      
              if (maxlen % sizeof(long))
                      return -EINVAL;
      
              len = BITS_TO_LONGS(maxbit) * sizeof(long);
              if (len > maxlen)
                      len = maxlen;
      
              return copy_from_user(bits, p, len) ? -EFAULT : len;
      }
      
      #endif /* CONFIG_COMPAT */
      
      static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
      {
              int len;
      
    7         if (!str)
                      return -ENOENT;
      
    6         len = strlen(str) + 1;
              if (len > maxlen)
                      len = maxlen;
      
    7         return copy_to_user(p, str, len) ? -EFAULT : len;
      }
      
      static int handle_eviocgbit(struct input_dev *dev,
                                  unsigned int type, unsigned int size,
                                  void __user *p, int compat_mode)
      {
              unsigned long *bits;
              int len;
      
              switch (type) {
      
              case      0: bits = dev->evbit;  len = EV_MAX;  break;
    1         case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
              case EV_REL: bits = dev->relbit; len = REL_MAX; break;
              case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
              case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
              case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
    1         case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
              case EV_FF:  bits = dev->ffbit;  len = FF_MAX;  break;
    2         case EV_SW:  bits = dev->swbit;  len = SW_MAX;  break;
              default: return -EINVAL;
              }
      
    4         return bits_to_user(bits, len, size, p, compat_mode);
      }
      
      static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p)
      {
    7         struct input_keymap_entry ke = {
                      .len        = sizeof(unsigned int),
                      .flags        = 0,
              };
              int __user *ip = (int __user *)p;
              int error;
      
              /* legacy case */
              if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
                      return -EFAULT;
      
    6         error = input_get_keycode(dev, &ke);
    7         if (error)
                      return error;
      
    4         if (put_user(ke.keycode, ip + 1))
                      return -EFAULT;
      
              return 0;
      }
      
      static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p)
      {
              struct input_keymap_entry ke;
              int error;
      
   17         if (copy_from_user(&ke, p, sizeof(ke)))
                      return -EFAULT;
      
   16         error = input_get_keycode(dev, &ke);
   17         if (error)
                      return error;
      
    9         if (copy_to_user(p, &ke, sizeof(ke)))
                      return -EFAULT;
      
              return 0;
      }
      
      static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p)
      {
    6         struct input_keymap_entry ke = {
                      .len        = sizeof(unsigned int),
                      .flags        = 0,
              };
              int __user *ip = (int __user *)p;
      
              if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
                      return -EFAULT;
      
    6         if (get_user(ke.keycode, ip + 1))
                      return -EFAULT;
      
    6         return input_set_keycode(dev, &ke);
      }
      
      static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
      {
              struct input_keymap_entry ke;
      
   22         if (copy_from_user(&ke, p, sizeof(ke)))
                      return -EFAULT;
      
   21         if (ke.len > sizeof(ke.scancode))
                      return -EINVAL;
      
   22         return input_set_keycode(dev, &ke);
      }
      
      /*
       * If we transfer state to the user, we should flush all pending events
       * of the same type from the client's queue. Otherwise, they might end up
       * with duplicate events, which can screw up client's state tracking.
       * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED
       * event so user-space will notice missing events.
       *
       * LOCKING:
       * We need to take event_lock before buffer_lock to avoid dead-locks. But we
       * need the even_lock only to guarantee consistent state. We can safely release
       * it while flushing the queue. This allows input-core to handle filters while
       * we flush the queue.
       */
      static int evdev_handle_get_val(struct evdev_client *client,
                                      struct input_dev *dev, unsigned int type,
                                      unsigned long *bits, unsigned int maxbit,
                                      unsigned int maxlen, void __user *p,
                                      int compat)
      {
              int ret;
              unsigned long *mem;
      
   12         mem = bitmap_alloc(maxbit, GFP_KERNEL);
              if (!mem)
                      return -ENOMEM;
      
   12         spin_lock_irq(&dev->event_lock);
              spin_lock(&client->buffer_lock);
      
              bitmap_copy(mem, bits, maxbit);
      
              spin_unlock(&dev->event_lock);
      
   12         __evdev_flush_queue(client, type);
      
              spin_unlock_irq(&client->buffer_lock);
      
              ret = bits_to_user(mem, maxbit, maxlen, p, compat);
              if (ret < 0)
    1                 evdev_queue_syn_dropped(client);
      
   12         bitmap_free(mem);
      
   12         return ret;
      }
      
      static int evdev_handle_mt_request(struct input_dev *dev,
                                         unsigned int size,
                                         int __user *ip)
      {
              const struct input_mt *mt = dev->mt;
              unsigned int code;
              int max_slots;
              int i;
      
              if (get_user(code, &ip[0]))
                      return -EFAULT;
    2         if (!mt || !input_is_mt_value(code))
                      return -EINVAL;
      
              max_slots = (size - sizeof(__u32)) / sizeof(__s32);
              for (i = 0; i < mt->num_slots && i < max_slots; i++) {
                      int value = input_mt_get_value(&mt->slots[i], code);
                      if (put_user(value, &ip[1 + i]))
                              return -EFAULT;
              }
      
              return 0;
      }
      
      static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
                              struct file *file)
      {
    4         client->revoked = true;
              evdev_ungrab(evdev, client);
              input_flush_device(&evdev->handle, file);
              wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
      
              return 0;
      }
      
      /* must be called with evdev-mutex held */
      static int evdev_set_mask(struct evdev_client *client,
                                unsigned int type,
                                const void __user *codes,
                                u32 codes_size,
                                int compat)
      {
              unsigned long flags, *mask, *oldmask;
              size_t cnt;
              int error;
      
              /* we allow unknown types and 'codes_size > size' for forward-compat */
    7         cnt = evdev_get_mask_cnt(type);
              if (!cnt)
                      return 0;
      
              mask = bitmap_zalloc(cnt, GFP_KERNEL);
              if (!mask)
                      return -ENOMEM;
      
    6         error = bits_from_user(mask, cnt - 1, codes_size, codes, compat);
              if (error < 0) {
    2                 bitmap_free(mask);
                      return error;
              }
      
    4         spin_lock_irqsave(&client->buffer_lock, flags);
              oldmask = client->evmasks[type];
              client->evmasks[type] = mask;
              spin_unlock_irqrestore(&client->buffer_lock, flags);
      
              bitmap_free(oldmask);
      
  145         return 0;
      }
      
      /* must be called with evdev-mutex held */
      static int evdev_get_mask(struct evdev_client *client,
                                unsigned int type,
                                void __user *codes,
                                u32 codes_size,
                                int compat)
      {
              unsigned long *mask;
              size_t cnt, size, xfer_size;
              int i;
              int error;
      
              /* we allow unknown types and 'codes_size > size' for forward-compat */
   32         cnt = evdev_get_mask_cnt(type);
              size = sizeof(unsigned long) * BITS_TO_LONGS(cnt);
              xfer_size = min_t(size_t, codes_size, size);
      
              if (cnt > 0) {
   30                 mask = client->evmasks[type];
                      if (mask) {
    1                         error = bits_to_user(mask, cnt - 1,
                                                   xfer_size, codes, compat);
                              if (error < 0)
                                      return error;
                      } else {
                              /* fake mask with all bits set */
   29                         for (i = 0; i < xfer_size; i++)
   28                                 if (put_user(0xffU, (u8 __user *)codes + i))
                                              return -EFAULT;
                      }
              }
      
   31         if (xfer_size < codes_size)
   28                 if (clear_user(codes + xfer_size, codes_size - xfer_size))
                              return -EFAULT;
      
              return 0;
      }
      
      static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                                 void __user *p, int compat_mode)
      {
  166         struct evdev_client *client = file->private_data;
              struct evdev *evdev = client->evdev;
              struct input_dev *dev = evdev->handle.dev;
              struct input_absinfo abs;
              struct input_mask mask;
              struct ff_effect effect;
              int __user *ip = (int __user *)p;
              unsigned int i, t, u, v;
              unsigned int size;
              int error;
      
              /* First we check for fixed-length commands */
              switch (cmd) {
      
              case EVIOCGVERSION:
    3                 return put_user(EV_VERSION, ip);
      
              case EVIOCGID:
    1                 if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
                              return -EFAULT;
                      return 0;
      
              case EVIOCGREP:
    6                 if (!test_bit(EV_REP, dev->evbit))
                              return -ENOSYS;
    5                 if (put_user(dev->rep[REP_DELAY], ip))
                              return -EFAULT;
    3                 if (put_user(dev->rep[REP_PERIOD], ip + 1))
                              return -EFAULT;
                      return 0;
      
              case EVIOCSREP:
    6                 if (!test_bit(EV_REP, dev->evbit))
                              return -ENOSYS;
    6                 if (get_user(u, ip))
                              return -EFAULT;
    5                 if (get_user(v, ip + 1))
                              return -EFAULT;
      
    5                 input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u);
                      input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v);
      
                      return 0;
      
              case EVIOCRMFF:
    2                 return input_ff_erase(dev, (int)(unsigned long) p, file);
      
              case EVIOCGEFFECTS:
    3                 i = test_bit(EV_FF, dev->evbit) ?
    1                                 dev->ff->max_effects : 0;
                      if (put_user(i, ip))
                              return -EFAULT;
                      return 0;
      
              case EVIOCGRAB:
    5                 if (p)
    3                         return evdev_grab(evdev, client);
                      else
    2                         return evdev_ungrab(evdev, client);
      
              case EVIOCREVOKE:
    5                 if (p)
                              return -EINVAL;
                      else
    4                         return evdev_revoke(evdev, client, file);
      
              case EVIOCGMASK: {
                      void __user *codes_ptr;
      
   35                 if (copy_from_user(&mask, p, sizeof(mask)))
                              return -EFAULT;
      
   34                 codes_ptr = (void __user *)(unsigned long)mask.codes_ptr;
   34                 return evdev_get_mask(client,
                                            mask.type, codes_ptr, mask.codes_size,
                                            compat_mode);
              }
      
              case EVIOCSMASK: {
                      const void __user *codes_ptr;
      
    8                 if (copy_from_user(&mask, p, sizeof(mask)))
                              return -EFAULT;
      
    6                 codes_ptr = (const void __user *)(unsigned long)mask.codes_ptr;
    8                 return evdev_set_mask(client,
                                            mask.type, codes_ptr, mask.codes_size,
                                            compat_mode);
              }
      
              case EVIOCSCLOCKID:
    4                 if (copy_from_user(&i, p, sizeof(unsigned int)))
                              return -EFAULT;
      
    4                 return evdev_set_clk_type(client, i);
      
              case EVIOCGKEYCODE:
    7                 return evdev_handle_get_keycode(dev, p);
      
              case EVIOCSKEYCODE:
    6                 return evdev_handle_set_keycode(dev, p);
      
              case EVIOCGKEYCODE_V2:
   17                 return evdev_handle_get_keycode_v2(dev, p);
      
              case EVIOCSKEYCODE_V2:
   22                 return evdev_handle_set_keycode_v2(dev, p);
              }
      
   49         size = _IOC_SIZE(cmd);
      
              /* Now check variable-length commands */
      #define EVIOC_MASK_SIZE(nr)        ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
              switch (EVIOC_MASK_SIZE(cmd)) {
      
              case EVIOCGPROP(0):
    2                 return bits_to_user(dev->propbit, INPUT_PROP_MAX,
                                          size, p, compat_mode);
      
              case EVIOCGMTSLOTS(0):
    4                 return evdev_handle_mt_request(dev, size, ip);
      
              case EVIOCGKEY(0):
    3                 return evdev_handle_get_val(client, dev, EV_KEY, dev->key,
                                                  KEY_MAX, size, p, compat_mode);
      
              case EVIOCGLED(0):
    4                 return evdev_handle_get_val(client, dev, EV_LED, dev->led,
                                                  LED_MAX, size, p, compat_mode);
      
              case EVIOCGSND(0):
    2                 return evdev_handle_get_val(client, dev, EV_SND, dev->snd,
                                                  SND_MAX, size, p, compat_mode);
      
              case EVIOCGSW(0):
    3                 return evdev_handle_get_val(client, dev, EV_SW, dev->sw,
                                                  SW_MAX, size, p, compat_mode);
      
              case EVIOCGNAME(0):
    3                 return str_to_user(dev->name, size, p);
      
              case EVIOCGPHYS(0):
    2                 return str_to_user(dev->phys, size, p);
      
              case EVIOCGUNIQ(0):
    2                 return str_to_user(dev->uniq, size, p);
      
              case EVIOC_MASK_SIZE(EVIOCSFF):
   14                 if (input_ff_effect_from_user(p, size, &effect))
                              return -EFAULT;
      
   12                 error = input_ff_upload(dev, &effect, file);
                      if (error)
                              return error;
      
    3                 if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
                              return -EFAULT;
      
                      return 0;
              }
      
              /* Multi-number variable-length handlers */
   14         if (_IOC_TYPE(cmd) != 'E')
                      return -EINVAL;
      
   12         if (_IOC_DIR(cmd) == _IOC_READ) {
      
    8                 if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
   15                         return handle_eviocgbit(dev,
                                                      _IOC_NR(cmd) & EV_MAX, size,
                                                      p, compat_mode);
      
    3                 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
      
    2                         if (!dev->absinfo)
                                      return -EINVAL;
      
    1                         t = _IOC_NR(cmd) & ABS_MAX;
                              abs = dev->absinfo[t];
      
                              if (copy_to_user(p, &abs, min_t(size_t,
                                              size, sizeof(struct input_absinfo))))
                                      return -EFAULT;
      
                              return 0;
                      }
              }
      
    4         if (_IOC_DIR(cmd) == _IOC_WRITE) {
      
    3                 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
      
    2                         if (!dev->absinfo)
                                      return -EINVAL;
      
    2                         t = _IOC_NR(cmd) & ABS_MAX;
      
                              if (copy_from_user(&abs, p, min_t(size_t,
                                              size, sizeof(struct input_absinfo))))
                                      return -EFAULT;
      
    2                         if (size < sizeof(struct input_absinfo))
                                      abs.resolution = 0;
      
                              /* We can't change number of reserved MT slots */
    2                         if (t == ABS_MT_SLOT)
                                      return -EINVAL;
      
                              /*
                               * Take event lock to ensure that we are not
                               * changing device parameters in the middle
                               * of event.
                               */
    1                         spin_lock_irq(&dev->event_lock);
                              dev->absinfo[t] = abs;
                              spin_unlock_irq(&dev->event_lock);
      
                              return 0;
                      }
              }
      
              return -EINVAL;
      }
      
      static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
                                      void __user *p, int compat_mode)
      {
  167         struct evdev_client *client = file->private_data;
              struct evdev *evdev = client->evdev;
              int retval;
      
              retval = mutex_lock_interruptible(&evdev->mutex);
              if (retval)
                      return retval;
      
  167         if (!evdev->exist || client->revoked) {
                      retval = -ENODEV;
                      goto out;
              }
      
              retval = evdev_do_ioctl(file, cmd, p, compat_mode);
      
       out:
  146         mutex_unlock(&evdev->mutex);
              return retval;
      }
      
      static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
      {
  167         return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0);
      }
      
      #ifdef CONFIG_COMPAT
      static long evdev_ioctl_compat(struct file *file,
                                      unsigned int cmd, unsigned long arg)
      {
              return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1);
      }
      #endif
      
      static const struct file_operations evdev_fops = {
              .owner                = THIS_MODULE,
              .read                = evdev_read,
              .write                = evdev_write,
              .poll                = evdev_poll,
              .open                = evdev_open,
              .release        = evdev_release,
              .unlocked_ioctl        = evdev_ioctl,
      #ifdef CONFIG_COMPAT
              .compat_ioctl        = evdev_ioctl_compat,
      #endif
              .fasync                = evdev_fasync,
              .llseek                = no_llseek,
      };
      
      /*
       * Mark device non-existent. This disables writes, ioctls and
       * prevents new users from opening the device. Already posted
       * blocking reads will stay, however new ones will fail.
       */
      static void evdev_mark_dead(struct evdev *evdev)
      {
  270         mutex_lock(&evdev->mutex);
              evdev->exist = false;
              mutex_unlock(&evdev->mutex);
      }
      
      static void evdev_cleanup(struct evdev *evdev)
      {
   23         struct input_handle *handle = &evdev->handle;
      
  270         evdev_mark_dead(evdev);
  270         evdev_hangup(evdev);
      
              /* evdev is marked dead so no one else accesses evdev->open */
              if (evdev->open) {
                      input_flush_device(handle, NULL);
                      input_close_device(handle);
              }
      }
      
      /*
       * Create new evdev device. Note that input core serializes calls
       * to connect and disconnect.
       */
      static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
                               const struct input_device_id *id)
      {
              struct evdev *evdev;
              int minor;
              int dev_no;
              int error;
      
   52         minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true);
              if (minor < 0) {
                      error = minor;
                      pr_err("failed to reserve new minor: %d\n", error);
                      return error;
              }
      
   52         evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL);
              if (!evdev) {
                      error = -ENOMEM;
                      goto err_free_minor;
              }
      
   52         INIT_LIST_HEAD(&evdev->client_list);
              spin_lock_init(&evdev->client_lock);
              mutex_init(&evdev->mutex);
              init_waitqueue_head(&evdev->wait);
              evdev->exist = true;
      
              dev_no = minor;
              /* Normalize device number if it falls into legacy range */
              if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS)
   52                 dev_no -= EVDEV_MINOR_BASE;
   52         dev_set_name(&evdev->dev, "event%d", dev_no);
      
   51         evdev->handle.dev = input_get_device(dev);
   51         evdev->handle.name = dev_name(&evdev->dev);
              evdev->handle.handler = handler;
              evdev->handle.private = evdev;
      
              evdev->dev.devt = MKDEV(INPUT_MAJOR, minor);
              evdev->dev.class = &input_class;
              evdev->dev.parent = &dev->dev;
              evdev->dev.release = evdev_free;
              device_initialize(&evdev->dev);
      
              error = input_register_handle(&evdev->handle);
              if (error)
                      goto err_free_evdev;
      
   51         cdev_init(&evdev->cdev, &evdev_fops);
      
              error = cdev_device_add(&evdev->cdev, &evdev->dev);
              if (error)
                      goto err_cleanup_evdev;
      
              return 0;
      
       err_cleanup_evdev:
              evdev_cleanup(evdev);
              input_unregister_handle(&evdev->handle);
       err_free_evdev:
              put_device(&evdev->dev);
       err_free_minor:
              input_free_minor(minor);
   41         return error;
      }
      
      static void evdev_disconnect(struct input_handle *handle)
      {
  271         struct evdev *evdev = handle->private;
      
              cdev_device_del(&evdev->cdev, &evdev->dev);
              evdev_cleanup(evdev);
              input_free_minor(MINOR(evdev->dev.devt));
              input_unregister_handle(handle);
              put_device(&evdev->dev);
      }
      
      static const struct input_device_id evdev_ids[] = {
              { .driver_info = 1 },        /* Matches all devices */
              { },                        /* Terminating zero entry */
      };
      
      MODULE_DEVICE_TABLE(input, evdev_ids);
      
      static struct input_handler evdev_handler = {
              .event                = evdev_event,
              .events                = evdev_events,
              .connect        = evdev_connect,
              .disconnect        = evdev_disconnect,
              .legacy_minors        = true,
              .minor                = EVDEV_MINOR_BASE,
              .name                = "evdev",
              .id_table        = evdev_ids,
      };
      
      static int __init evdev_init(void)
      {
              return input_register_handler(&evdev_handler);
      }
      
      static void __exit evdev_exit(void)
      {
              input_unregister_handler(&evdev_handler);
      }
      
      module_init(evdev_init);
      module_exit(evdev_exit);
      
      MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
      MODULE_DESCRIPTION("Input driver event char devices");
      MODULE_LICENSE("GPL");
      // SPDX-License-Identifier: GPL-2.0-or-later
      /* Helpers for initial module or kernel cmdline parsing
         Copyright (C) 2001 Rusty Russell.
      
      */
      #include <linux/kernel.h>
      #include <linux/string.h>
      #include <linux/errno.h>
      #include <linux/module.h>
      #include <linux/moduleparam.h>
      #include <linux/device.h>
      #include <linux/err.h>
      #include <linux/slab.h>
      #include <linux/ctype.h>
      #include <linux/security.h>
      
      #ifdef CONFIG_SYSFS
      /* Protects all built-in parameters, modules use their own param_lock */
      static DEFINE_MUTEX(param_lock);
      
      /* Use the module's mutex, or if built-in use the built-in mutex */
      #ifdef CONFIG_MODULES
      #define KPARAM_MUTEX(mod)        ((mod) ? &(mod)->param_lock : &param_lock)
      #else
      #define KPARAM_MUTEX(mod)        (&param_lock)
      #endif
      
      static inline void check_kparam_locked(struct module *mod)
      {
              BUG_ON(!mutex_is_locked(KPARAM_MUTEX(mod)));
      }
      #else
      static inline void check_kparam_locked(struct module *mod)
      {
      }
      #endif /* !CONFIG_SYSFS */
      
      /* This just allows us to keep track of which parameters are kmalloced. */
      struct kmalloced_param {
              struct list_head list;
              char val[];
      };
      static LIST_HEAD(kmalloced_params);
      static DEFINE_SPINLOCK(kmalloced_params_lock);
      
      static void *kmalloc_parameter(unsigned int size)
      {
              struct kmalloced_param *p;
      
              p = kmalloc(sizeof(*p) + size, GFP_KERNEL);
              if (!p)
                      return NULL;
      
              spin_lock(&kmalloced_params_lock);
              list_add(&p->list, &kmalloced_params);
              spin_unlock(&kmalloced_params_lock);
      
              return p->val;
      }
      
      /* Does nothing if parameter wasn't kmalloced above. */
      static void maybe_kfree_parameter(void *param)
      {
              struct kmalloced_param *p;
      
              spin_lock(&kmalloced_params_lock);
              list_for_each_entry(p, &kmalloced_params, list) {
                      if (p->val == param) {
                              list_del(&p->list);
                              kfree(p);
                              break;
                      }
              }
              spin_unlock(&kmalloced_params_lock);
      }
      
      static char dash2underscore(char c)
      {
              if (c == '-')
                      return '_';
              return c;
      }
      
      bool parameqn(const char *a, const char *b, size_t n)
      {
              size_t i;
      
              for (i = 0; i < n; i++) {
                      if (dash2underscore(a[i]) != dash2underscore(b[i]))
                              return false;
              }
              return true;
      }
      
      bool parameq(const char *a, const char *b)
      {
              return parameqn(a, b, strlen(a)+1);
      }
      
      static bool param_check_unsafe(const struct kernel_param *kp)
      {
              if (kp->flags & KERNEL_PARAM_FL_HWPARAM &&
                  security_locked_down(LOCKDOWN_MODULE_PARAMETERS))
                      return false;
      
              if (kp->flags & KERNEL_PARAM_FL_UNSAFE) {
                      pr_notice("Setting dangerous option %s - tainting kernel\n",
                                kp->name);
                      add_taint(TAINT_USER, LOCKDEP_STILL_OK);
              }
      
              return true;
      }
      
      static int parse_one(char *param,
                           char *val,
                           const char *doing,
                           const struct kernel_param *params,
                           unsigned num_params,
                           s16 min_level,
                           s16 max_level,
                           void *arg,
                           int (*handle_unknown)(char *param, char *val,
                                           const char *doing, void *arg))
      {
              unsigned int i;
              int err;
      
              /* Find parameter */
              for (i = 0; i < num_params; i++) {
                      if (parameq(param, params[i].name)) {
                              if (params[i].level < min_level
                                  || params[i].level > max_level)
                                      return 0;
                              /* No one handled NULL, so do it here. */
                              if (!val &&
                                  !(params[i].ops->flags & KERNEL_PARAM_OPS_FL_NOARG))
                                      return -EINVAL;
                              pr_debug("handling %s with %p\n", param,
                                      params[i].ops->set);
                              kernel_param_lock(params[i].mod);
                              if (param_check_unsafe(&params[i]))
                                      err = params[i].ops->set(val, &params[i]);
                              else
                                      err = -EPERM;
                              kernel_param_unlock(params[i].mod);
                              return err;
                      }
              }
      
              if (handle_unknown) {
                      pr_debug("doing %s: %s='%s'\n", doing, param, val);
                      return handle_unknown(param, val, doing, arg);
              }
      
              pr_debug("Unknown argument '%s'\n", param);
              return -ENOENT;
      }
      
      /* Args looks like "foo=bar,bar2 baz=fuz wiz". */
      char *parse_args(const char *doing,
                       char *args,
                       const struct kernel_param *params,
                       unsigned num,
                       s16 min_level,
                       s16 max_level,
                       void *arg,
                       int (*unknown)(char *param, char *val,
                                      const char *doing, void *arg))
      {
              char *param, *val, *err = NULL;
      
              /* Chew leading spaces */
              args = skip_spaces(args);
      
              if (*args)
                      pr_debug("doing %s, parsing ARGS: '%s'\n", doing, args);
      
              while (*args) {
                      int ret;
                      int irq_was_disabled;
      
                      args = next_arg(args, &param, &val);
                      /* Stop at -- */
                      if (!val && strcmp(param, "--") == 0)
                              return err ?: args;
                      irq_was_disabled = irqs_disabled();
                      ret = parse_one(param, val, doing, params, num,
                                      min_level, max_level, arg, unknown);
                      if (irq_was_disabled && !irqs_disabled())
                              pr_warn("%s: option '%s' enabled irq's!\n",
                                      doing, param);
      
                      switch (ret) {
                      case 0:
                              continue;
                      case -ENOENT:
                              pr_err("%s: Unknown parameter `%s'\n", doing, param);
                              break;
                      case -ENOSPC:
                              pr_err("%s: `%s' too large for parameter `%s'\n",
                                     doing, val ?: "", param);
                              break;
                      default:
                              pr_err("%s: `%s' invalid for parameter `%s'\n",
                                     doing, val ?: "", param);
                              break;
                      }
      
                      err = ERR_PTR(ret);
              }
      
              return err;
      }
      
      /* Lazy bastard, eh? */
      #define STANDARD_PARAM_DEF(name, type, format, strtolfn)                      \
              int param_set_##name(const char *val, const struct kernel_param *kp) \
              {                                                                \
                      return strtolfn(val, 0, (type *)kp->arg);                \
              }                                                                \
              int param_get_##name(char *buffer, const struct kernel_param *kp) \
              {                                                                \
                      return scnprintf(buffer, PAGE_SIZE, format "\n",        \
                                      *((type *)kp->arg));                        \
              }                                                                \
              const struct kernel_param_ops param_ops_##name = {                        \
                      .set = param_set_##name,                                \
                      .get = param_get_##name,                                \
              };                                                                \
              EXPORT_SYMBOL(param_set_##name);                                \
              EXPORT_SYMBOL(param_get_##name);                                \
              EXPORT_SYMBOL(param_ops_##name)
      
      
      STANDARD_PARAM_DEF(byte,        unsigned char,                "%hhu", kstrtou8);
      STANDARD_PARAM_DEF(short,        short,                        "%hi",  kstrtos16);
      STANDARD_PARAM_DEF(ushort,        unsigned short,                "%hu",  kstrtou16);
      STANDARD_PARAM_DEF(int,                int,                        "%i",   kstrtoint);
      STANDARD_PARAM_DEF(uint,        unsigned int,                "%u",   kstrtouint);
      STANDARD_PARAM_DEF(long,        long,                        "%li",  kstrtol);
      STANDARD_PARAM_DEF(ulong,        unsigned long,                "%lu",  kstrtoul);
      STANDARD_PARAM_DEF(ullong,        unsigned long long,        "%llu", kstrtoull);
      
      int param_set_charp(const char *val, const struct kernel_param *kp)
      {
              if (strlen(val) > 1024) {
                      pr_err("%s: string parameter too long\n", kp->name);
                      return -ENOSPC;
              }
      
              maybe_kfree_parameter(*(char **)kp->arg);
      
              /* This is a hack.  We can't kmalloc in early boot, and we
               * don't need to; this mangled commandline is preserved. */
              if (slab_is_available()) {
                      *(char **)kp->arg = kmalloc_parameter(strlen(val)+1);
                      if (!*(char **)kp->arg)
                              return -ENOMEM;
                      strcpy(*(char **)kp->arg, val);
              } else
                      *(const char **)kp->arg = val;
      
              return 0;
      }
      EXPORT_SYMBOL(param_set_charp);
      
      int param_get_charp(char *buffer, const struct kernel_param *kp)
      {
              return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg));
      }
      EXPORT_SYMBOL(param_get_charp);
      
      void param_free_charp(void *arg)
      {
              maybe_kfree_parameter(*((char **)arg));
      }
      EXPORT_SYMBOL(param_free_charp);
      
      const struct kernel_param_ops param_ops_charp = {
              .set = param_set_charp,
              .get = param_get_charp,
              .free = param_free_charp,
      };
      EXPORT_SYMBOL(param_ops_charp);
      
      /* Actually could be a bool or an int, for historical reasons. */
      int param_set_bool(const char *val, const struct kernel_param *kp)
      {
              /* No equals means "set"... */
              if (!val) val = "1";
      
              /* One of =[yYnN01] */
              return strtobool(val, kp->arg);
      }
      EXPORT_SYMBOL(param_set_bool);
      
      int param_get_bool(char *buffer, const struct kernel_param *kp)
      {
              /* Y and N chosen as being relatively non-coder friendly */
              return sprintf(buffer, "%c\n", *(bool *)kp->arg ? 'Y' : 'N');
      }
      EXPORT_SYMBOL(param_get_bool);
      
      const struct kernel_param_ops param_ops_bool = {
              .flags = KERNEL_PARAM_OPS_FL_NOARG,
              .set = param_set_bool,
              .get = param_get_bool,
      };
      EXPORT_SYMBOL(param_ops_bool);
      
      int param_set_bool_enable_only(const char *val, const struct kernel_param *kp)
      {
              int err = 0;
              bool new_value;
              bool orig_value = *(bool *)kp->arg;
              struct kernel_param dummy_kp = *kp;
      
              dummy_kp.arg = &new_value;
      
              err = param_set_bool(val, &dummy_kp);
              if (err)
                      return err;
      
              /* Don't let them unset it once it's set! */
              if (!new_value && orig_value)
                      return -EROFS;
      
              if (new_value)
                      err = param_set_bool(val, kp);
      
              return err;
      }
      EXPORT_SYMBOL_GPL(param_set_bool_enable_only);
      
      const struct kernel_param_ops param_ops_bool_enable_only = {
              .flags = KERNEL_PARAM_OPS_FL_NOARG,
              .set = param_set_bool_enable_only,
              .get = param_get_bool,
      };
      EXPORT_SYMBOL_GPL(param_ops_bool_enable_only);
      
      /* This one must be bool. */
      int param_set_invbool(const char *val, const struct kernel_param *kp)
      {
              int ret;
              bool boolval;
              struct kernel_param dummy;
      
              dummy.arg = &boolval;
              ret = param_set_bool(val, &dummy);
              if (ret == 0)
                      *(bool *)kp->arg = !boolval;
              return ret;
      }
      EXPORT_SYMBOL(param_set_invbool);
      
      int param_get_invbool(char *buffer, const struct kernel_param *kp)
      {
              return sprintf(buffer, "%c\n", (*(bool *)kp->arg) ? 'N' : 'Y');
      }
      EXPORT_SYMBOL(param_get_invbool);
      
      const struct kernel_param_ops param_ops_invbool = {
              .set = param_set_invbool,
              .get = param_get_invbool,
      };
      EXPORT_SYMBOL(param_ops_invbool);
      
      int param_set_bint(const char *val, const struct kernel_param *kp)
      {
              /* Match bool exactly, by re-using it. */
              struct kernel_param boolkp = *kp;
              bool v;
              int ret;
      
              boolkp.arg = &v;
      
              ret = param_set_bool(val, &boolkp);
              if (ret == 0)
                      *(int *)kp->arg = v;
              return ret;
      }
      EXPORT_SYMBOL(param_set_bint);
      
      const struct kernel_param_ops param_ops_bint = {
              .flags = KERNEL_PARAM_OPS_FL_NOARG,
              .set = param_set_bint,
              .get = param_get_int,
      };
      EXPORT_SYMBOL(param_ops_bint);
      
      /* We break the rule and mangle the string. */
      static int param_array(struct module *mod,
                             const char *name,
                             const char *val,
                             unsigned int min, unsigned int max,
                             void *elem, int elemsize,
                             int (*set)(const char *, const struct kernel_param *kp),
                             s16 level,
                             unsigned int *num)
      {
              int ret;
              struct kernel_param kp;
              char save;
      
              /* Get the name right for errors. */
              kp.name = name;
              kp.arg = elem;
              kp.level = level;
      
              *num = 0;
              /* We expect a comma-separated list of values. */
              do {
                      int len;
      
                      if (*num == max) {
                              pr_err("%s: can only take %i arguments\n", name, max);
                              return -EINVAL;
                      }
                      len = strcspn(val, ",");
      
                      /* nul-terminate and parse */
                      save = val[len];
                      ((char *)val)[len] = '\0';
                      check_kparam_locked(mod);
                      ret = set(val, &kp);
      
                      if (ret != 0)
                              return ret;
                      kp.arg += elemsize;
                      val += len+1;
                      (*num)++;
              } while (save == ',');
      
              if (*num < min) {
                      pr_err("%s: needs at least %i arguments\n", name, min);
                      return -EINVAL;
              }
              return 0;
      }
      
      static int param_array_set(const char *val, const struct kernel_param *kp)
      {
              const struct kparam_array *arr = kp->arr;
              unsigned int temp_num;
      
              return param_array(kp->mod, kp->name, val, 1, arr->max, arr->elem,
                                 arr->elemsize, arr->ops->set, kp->level,
                                 arr->num ?: &temp_num);
      }
      
      static int param_array_get(char *buffer, const struct kernel_param *kp)
      {
              int i, off, ret;
              const struct kparam_array *arr = kp->arr;
              struct kernel_param p = *kp;
      
              for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) {
                      /* Replace \n with comma */
                      if (i)
                              buffer[off - 1] = ',';
                      p.arg = arr->elem + arr->elemsize * i;
                      check_kparam_locked(p.mod);
                      ret = arr->ops->get(buffer + off, &p);
                      if (ret < 0)
                              return ret;
                      off += ret;
              }
              buffer[off] = '\0';
              return off;
      }
      
      static void param_array_free(void *arg)
      {
              unsigned int i;
              const struct kparam_array *arr = arg;
      
              if (arr->ops->free)
                      for (i = 0; i < (arr->num ? *arr->num : arr->max); i++)
                              arr->ops->free(arr->elem + arr->elemsize * i);
      }
      
      const struct kernel_param_ops param_array_ops = {
              .set = param_array_set,
              .get = param_array_get,
              .free = param_array_free,
      };
      EXPORT_SYMBOL(param_array_ops);
      
      int param_set_copystring(const char *val, const struct kernel_param *kp)
      {
              const struct kparam_string *kps = kp->str;
      
              if (strlen(val)+1 > kps->maxlen) {
                      pr_err("%s: string doesn't fit in %u chars.\n",
                             kp->name, kps->maxlen-1);
                      return -ENOSPC;
              }
              strcpy(kps->string, val);
              return 0;
      }
      EXPORT_SYMBOL(param_set_copystring);
      
      int param_get_string(char *buffer, const struct kernel_param *kp)
      {
              const struct kparam_string *kps = kp->str;
              return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string);
      }
      EXPORT_SYMBOL(param_get_string);
      
      const struct kernel_param_ops param_ops_string = {
              .set = param_set_copystring,
              .get = param_get_string,
      };
      EXPORT_SYMBOL(param_ops_string);
      
      /* sysfs output in /sys/modules/XYZ/parameters/ */
      #define to_module_attr(n) container_of(n, struct module_attribute, attr)
      #define to_module_kobject(n) container_of(n, struct module_kobject, kobj)
      
      struct param_attribute
      {
              struct module_attribute mattr;
              const struct kernel_param *param;
      };
      
      struct module_param_attrs
      {
              unsigned int num;
              struct attribute_group grp;
              struct param_attribute attrs[0];
      };
      
      #ifdef CONFIG_SYSFS
      #define to_param_attr(n) container_of(n, struct param_attribute, mattr)
      
      static ssize_t param_attr_show(struct module_attribute *mattr,
                                     struct module_kobject *mk, char *buf)
      {
              int count;
              struct param_attribute *attribute = to_param_attr(mattr);
      
              if (!attribute->param->ops->get)
                      return -EPERM;
      
              kernel_param_lock(mk->mod);
              count = attribute->param->ops->get(buf, attribute->param);
              kernel_param_unlock(mk->mod);
              return count;
      }
      
      /* sysfs always hands a nul-terminated string in buf.  We rely on that. */
      static ssize_t param_attr_store(struct module_attribute *mattr,
                                      struct module_kobject *mk,
                                      const char *buf, size_t len)
      {
               int err;
              struct param_attribute *attribute = to_param_attr(mattr);
      
              if (!attribute->param->ops->set)
                      return -EPERM;
      
              kernel_param_lock(mk->mod);
              if (param_check_unsafe(attribute->param))
                      err = attribute->param->ops->set(buf, attribute->param);
              else
                      err = -EPERM;
              kernel_param_unlock(mk->mod);
              if (!err)
                      return len;
              return err;
      }
      #endif
      
      #ifdef CONFIG_MODULES
      #define __modinit
      #else
      #define __modinit __init
      #endif
      
      #ifdef CONFIG_SYSFS
      void kernel_param_lock(struct module *mod)
      {
   18         mutex_lock(KPARAM_MUTEX(mod));
      }
      
      void kernel_param_unlock(struct module *mod)
      {
   18         mutex_unlock(KPARAM_MUTEX(mod));
      }
      
      EXPORT_SYMBOL(kernel_param_lock);
      EXPORT_SYMBOL(kernel_param_unlock);
      
      /*
       * add_sysfs_param - add a parameter to sysfs
       * @mk: struct module_kobject
       * @kp: the actual parameter definition to add to sysfs
       * @name: name of parameter
       *
       * Create a kobject if for a (per-module) parameter if mp NULL, and
       * create file in sysfs.  Returns an error on out of memory.  Always cleans up
       * if there's an error.
       */
      static __modinit int add_sysfs_param(struct module_kobject *mk,
                                           const struct kernel_param *kp,
                                           const char *name)
      {
              struct module_param_attrs *new_mp;
              struct attribute **new_attrs;
              unsigned int i;
      
              /* We don't bother calling this with invisible parameters. */
              BUG_ON(!kp->perm);
      
              if (!mk->mp) {
                      /* First allocation. */
                      mk->mp = kzalloc(sizeof(*mk->mp), GFP_KERNEL);
                      if (!mk->mp)
                              return -ENOMEM;
                      mk->mp->grp.name = "parameters";
                      /* NULL-terminated attribute array. */
                      mk->mp->grp.attrs = kzalloc(sizeof(mk->mp->grp.attrs[0]),
                                                  GFP_KERNEL);
                      /* Caller will cleanup via free_module_param_attrs */
                      if (!mk->mp->grp.attrs)
                              return -ENOMEM;
              }
      
              /* Enlarge allocations. */
              new_mp = krealloc(mk->mp,
                                sizeof(*mk->mp) +
                                sizeof(mk->mp->attrs[0]) * (mk->mp->num + 1),
                                GFP_KERNEL);
              if (!new_mp)
                      return -ENOMEM;
              mk->mp = new_mp;
      
              /* Extra pointer for NULL terminator */
              new_attrs = krealloc(mk->mp->grp.attrs,
                                   sizeof(mk->mp->grp.attrs[0]) * (mk->mp->num + 2),
                                   GFP_KERNEL);
              if (!new_attrs)
                      return -ENOMEM;
              mk->mp->grp.attrs = new_attrs;
      
              /* Tack new one on the end. */
              memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
              sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
              mk->mp->attrs[mk->mp->num].param = kp;
              mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
              /* Do not allow runtime DAC changes to make param writable. */
              if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
                      mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
              else
                      mk->mp->attrs[mk->mp->num].mattr.store = NULL;
              mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
              mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
              mk->mp->num++;
      
              /* Fix up all the pointers, since krealloc can move us */
              for (i = 0; i < mk->mp->num; i++)
                      mk->mp->grp.attrs[i] = &mk->mp->attrs[i].mattr.attr;
              mk->mp->grp.attrs[mk->mp->num] = NULL;
              return 0;
      }
      
      #ifdef CONFIG_MODULES
      static void free_module_param_attrs(struct module_kobject *mk)
      {
              if (mk->mp)
                      kfree(mk->mp->grp.attrs);
              kfree(mk->mp);
              mk->mp = NULL;
      }
      
      /*
       * module_param_sysfs_setup - setup sysfs support for one module
       * @mod: module
       * @kparam: module parameters (array)
       * @num_params: number of module parameters
       *
       * Adds sysfs entries for module parameters under
       * /sys/module/[mod->name]/parameters/
       */
      int module_param_sysfs_setup(struct module *mod,
                                   const struct kernel_param *kparam,
                                   unsigned int num_params)
      {
              int i, err;
              bool params = false;
      
              for (i = 0; i < num_params; i++) {
                      if (kparam[i].perm == 0)
                              continue;
                      err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name);
                      if (err) {
                              free_module_param_attrs(&mod->mkobj);
                              return err;
                      }
                      params = true;
              }
      
              if (!params)
                      return 0;
      
              /* Create the param group. */
              err = sysfs_create_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp);
              if (err)
                      free_module_param_attrs(&mod->mkobj);
              return err;
      }
      
      /*
       * module_param_sysfs_remove - remove sysfs support for one module
       * @mod: module
       *
       * Remove sysfs entries for module parameters and the corresponding
       * kobject.
       */
      void module_param_sysfs_remove(struct module *mod)
      {
              if (mod->mkobj.mp) {
                      sysfs_remove_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp);
                      /* We are positive that no one is using any param
                       * attrs at this point.  Deallocate immediately. */
                      free_module_param_attrs(&mod->mkobj);
              }
      }
      #endif
      
      void destroy_params(const struct kernel_param *params, unsigned num)
      {
              unsigned int i;
      
              for (i = 0; i < num; i++)
                      if (params[i].ops->free)
                              params[i].ops->free(params[i].arg);
      }
      
      static struct module_kobject * __init locate_module_kobject(const char *name)
      {
              struct module_kobject *mk;
              struct kobject *kobj;
              int err;
      
              kobj = kset_find_obj(module_kset, name);
              if (kobj) {
                      mk = to_module_kobject(kobj);
              } else {
                      mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
                      BUG_ON(!mk);
      
                      mk->mod = THIS_MODULE;
                      mk->kobj.kset = module_kset;
                      err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
                                                 "%s", name);
      #ifdef CONFIG_MODULES
                      if (!err)
                              err = sysfs_create_file(&mk->kobj, &module_uevent.attr);
      #endif
                      if (err) {
                              kobject_put(&mk->kobj);
                              pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n",
                                      name, err);
                              return NULL;
                      }
      
                      /* So that we hold reference in both cases. */
                      kobject_get(&mk->kobj);
              }
      
              return mk;
      }
      
      static void __init kernel_add_sysfs_param(const char *name,
                                                const struct kernel_param *kparam,
                                                unsigned int name_skip)
      {
              struct module_kobject *mk;
              int err;
      
              mk = locate_module_kobject(name);
              if (!mk)
                      return;
      
              /* We need to remove old parameters before adding more. */
              if (mk->mp)
                      sysfs_remove_group(&mk->kobj, &mk->mp->grp);
      
              /* These should not fail at boot. */
              err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
              BUG_ON(err);
              err = sysfs_create_group(&mk->kobj, &mk->mp->grp);
              BUG_ON(err);
              kobject_uevent(&mk->kobj, KOBJ_ADD);
              kobject_put(&mk->kobj);
      }
      
      /*
       * param_sysfs_builtin - add sysfs parameters for built-in modules
       *
       * Add module_parameters to sysfs for "modules" built into the kernel.
       *
       * The "module" name (KBUILD_MODNAME) is stored before a dot, the
       * "parameter" name is stored behind a dot in kernel_param->name. So,
       * extract the "module" name for all built-in kernel_param-eters,
       * and for all who have the same, call kernel_add_sysfs_param.
       */
      static void __init param_sysfs_builtin(void)
      {
              const struct kernel_param *kp;
              unsigned int name_len;
              char modname[MODULE_NAME_LEN];
      
              for (kp = __start___param; kp < __stop___param; kp++) {
                      char *dot;
      
                      if (kp->perm == 0)
                              continue;
      
                      dot = strchr(kp->name, '.');
                      if (!dot) {
                              /* This happens for core_param() */
                              strcpy(modname, "kernel");
                              name_len = 0;
                      } else {
                              name_len = dot - kp->name + 1;
                              strlcpy(modname, kp->name, name_len);
                      }
                      kernel_add_sysfs_param(modname, kp, name_len);
              }
      }
      
      ssize_t __modver_version_show(struct module_attribute *mattr,
                                    struct module_kobject *mk, char *buf)
      {
              struct module_version_attribute *vattr =
                      container_of(mattr, struct module_version_attribute, mattr);
      
              return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version);
      }
      
      extern const struct module_version_attribute *__start___modver[];
      extern const struct module_version_attribute *__stop___modver[];
      
      static void __init version_sysfs_builtin(void)
      {
              const struct module_version_attribute **p;
              struct module_kobject *mk;
              int err;
      
              for (p = __start___modver; p < __stop___modver; p++) {
                      const struct module_version_attribute *vattr = *p;
      
                      mk = locate_module_kobject(vattr->module_name);
                      if (mk) {
                              err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
                              WARN_ON_ONCE(err);
                              kobject_uevent(&mk->kobj, KOBJ_ADD);
                              kobject_put(&mk->kobj);
                      }
              }
      }
      
      /* module-related sysfs stuff */
      
      static ssize_t module_attr_show(struct kobject *kobj,
                                      struct attribute *attr,
                                      char *buf)
      {
              struct module_attribute *attribute;
              struct module_kobject *mk;
              int ret;
      
              attribute = to_module_attr(attr);
              mk = to_module_kobject(kobj);
      
              if (!attribute->show)
                      return -EIO;
      
              ret = attribute->show(attribute, mk, buf);
      
              return ret;
      }
      
      static ssize_t module_attr_store(struct kobject *kobj,
                                      struct attribute *attr,
                                      const char *buf, size_t len)
      {
              struct module_attribute *attribute;
              struct module_kobject *mk;
              int ret;
      
              attribute = to_module_attr(attr);
              mk = to_module_kobject(kobj);
      
              if (!attribute->store)
                      return -EIO;
      
              ret = attribute->store(attribute, mk, buf, len);
      
              return ret;
      }
      
      static const struct sysfs_ops module_sysfs_ops = {
              .show = module_attr_show,
              .store = module_attr_store,
      };
      
      static int uevent_filter(struct kset *kset, struct kobject *kobj)
      {
              struct kobj_type *ktype = get_ktype(kobj);
      
              if (ktype == &module_ktype)
                      return 1;
              return 0;
      }
      
      static const struct kset_uevent_ops module_uevent_ops = {
              .filter = uevent_filter,
      };
      
      struct kset *module_kset;
      int module_sysfs_initialized;
      
      static void module_kobj_release(struct kobject *kobj)
      {
              struct module_kobject *mk = to_module_kobject(kobj);
              complete(mk->kobj_completion);
      }
      
      struct kobj_type module_ktype = {
              .release   =        module_kobj_release,
              .sysfs_ops =        &module_sysfs_ops,
      };
      
      /*
       * param_sysfs_init - wrapper for built-in params support
       */
      static int __init param_sysfs_init(void)
      {
              module_kset = kset_create_and_add("module", &module_uevent_ops, NULL);
              if (!module_kset) {
                      printk(KERN_WARNING "%s (%d): error creating kset\n",
                              __FILE__, __LINE__);
                      return -ENOMEM;
              }
              module_sysfs_initialized = 1;
      
              version_sysfs_builtin();
              param_sysfs_builtin();
      
              return 0;
      }
      subsys_initcall(param_sysfs_init);
      
      #endif /* CONFIG_SYSFS */
      // SPDX-License-Identifier: GPL-2.0-or-later
      /*
       * Roccat Lua driver for Linux
       *
       * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
       */
      
      /*
       */
      
      /*
       * Roccat Lua is a gamer mouse which cpi, button and light settings can be
       * configured.
       */
      
      #include <linux/device.h>
      #include <linux/input.h>
      #include <linux/hid.h>
      #include <linux/module.h>
      #include <linux/slab.h>
      #include <linux/hid-roccat.h>
      #include "hid-ids.h"
      #include "hid-roccat-common.h"
      #include "hid-roccat-lua.h"
      
      static ssize_t lua_sysfs_read(struct file *fp, struct kobject *kobj,
                      char *buf, loff_t off, size_t count,
                      size_t real_size, uint command)
      {
              struct device *dev = kobj_to_dev(kobj);
              struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
              struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
              int retval;
      
              if (off >= real_size)
                      return 0;
      
              if (off != 0 || count != real_size)
                      return -EINVAL;
      
              mutex_lock(&lua->lua_lock);
              retval = roccat_common2_receive(usb_dev, command, buf, real_size);
              mutex_unlock(&lua->lua_lock);
      
              return retval ? retval : real_size;
      }
      
      static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
                      void const *buf, loff_t off, size_t count,
                      size_t real_size, uint command)
      {
              struct device *dev = kobj_to_dev(kobj);
              struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
              struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
              int retval;
      
              if (off != 0 || count != real_size)
                      return -EINVAL;
      
              mutex_lock(&lua->lua_lock);
              retval = roccat_common2_send(usb_dev, command, buf, real_size);
              mutex_unlock(&lua->lua_lock);
      
              return retval ? retval : real_size;
      }
      
      #define LUA_SYSFS_W(thingy, THINGY) \
      static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
                      struct kobject *kobj, struct bin_attribute *attr, \
                      char *buf, loff_t off, size_t count) \
      { \
              return lua_sysfs_write(fp, kobj, buf, off, count, \
                              LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \
      }
      
      #define LUA_SYSFS_R(thingy, THINGY) \
      static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
                      struct kobject *kobj, struct bin_attribute *attr, \
                      char *buf, loff_t off, size_t count) \
      { \
              return lua_sysfs_read(fp, kobj, buf, off, count, \
                              LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \
      }
      
      #define LUA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
      LUA_SYSFS_W(thingy, THINGY) \
      LUA_SYSFS_R(thingy, THINGY) \
      static struct bin_attribute lua_ ## thingy ## _attr = { \
              .attr = { .name = #thingy, .mode = 0660 }, \
              .size = LUA_SIZE_ ## THINGY, \
              .read = lua_sysfs_read_ ## thingy, \
              .write = lua_sysfs_write_ ## thingy \
      };
      
      LUA_BIN_ATTRIBUTE_RW(control, CONTROL)
      
      static int lua_create_sysfs_attributes(struct usb_interface *intf)
      {
              return sysfs_create_bin_file(&intf->dev.kobj, &lua_control_attr);
      }
      
      static void lua_remove_sysfs_attributes(struct usb_interface *intf)
      {
              sysfs_remove_bin_file(&intf->dev.kobj, &lua_control_attr);
      }
      
      static int lua_init_lua_device_struct(struct usb_device *usb_dev,
                      struct lua_device *lua)
      {
              mutex_init(&lua->lua_lock);
      
              return 0;
      }
      
      static int lua_init_specials(struct hid_device *hdev)
      {
    2         struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
              struct usb_device *usb_dev = interface_to_usbdev(intf);
              struct lua_device *lua;
              int retval;
      
              lua = kzalloc(sizeof(*lua), GFP_KERNEL);
              if (!lua) {
                      hid_err(hdev, "can't alloc device descriptor\n");
                      return -ENOMEM;
              }
    2         hid_set_drvdata(hdev, lua);
      
              retval = lua_init_lua_device_struct(usb_dev, lua);
              if (retval) {
                      hid_err(hdev, "couldn't init struct lua_device\n");
                      goto exit;
              }
      
              retval = lua_create_sysfs_attributes(intf);
    3         if (retval) {
                      hid_err(hdev, "cannot create sysfs files\n");
                      goto exit;
              }
      
              return 0;
      exit:
              kfree(lua);
              return retval;
      }
      
      static void lua_remove_specials(struct hid_device *hdev)
      {
    2         struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
              struct lua_device *lua;
      
              lua_remove_sysfs_attributes(intf);
      
              lua = hid_get_drvdata(hdev);
              kfree(lua);
      }
      
      static int lua_probe(struct hid_device *hdev,
                      const struct hid_device_id *id)
      {
              int retval;
      
    3         retval = hid_parse(hdev);
              if (retval) {
    1                 hid_err(hdev, "parse failed\n");
                      goto exit;
              }
      
    2         retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
              if (retval) {
                      hid_err(hdev, "hw start failed\n");
                      goto exit;
              }
      
    3         retval = lua_init_specials(hdev);
              if (retval) {
                      hid_err(hdev, "couldn't install mouse\n");
                      goto exit_stop;
              }
      
              return 0;
      
      exit_stop:
              hid_hw_stop(hdev);
      exit:
              return retval;
      }
      
      static void lua_remove(struct hid_device *hdev)
      {
    2         lua_remove_specials(hdev);
              hid_hw_stop(hdev);
      }
      
      static const struct hid_device_id lua_devices[] = {
              { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
              { }
      };
      
      MODULE_DEVICE_TABLE(hid, lua_devices);
      
      static struct hid_driver lua_driver = {
                      .name = "lua",
                      .id_table = lua_devices,
                      .probe = lua_probe,
                      .remove = lua_remove
      };
      module_hid_driver(lua_driver);
      
      MODULE_AUTHOR("Stefan Achatz");
      MODULE_DESCRIPTION("USB Roccat Lua driver");
      MODULE_LICENSE("GPL v2");
      // SPDX-License-Identifier: GPL-2.0-only
      /***************************************************************************
       *   Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org>  *
       *                                                                         *
       *   Based on Logitech G13 driver (v0.4)                                   *
       *     Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu>   *
       *                                                                         *
       ***************************************************************************/
      
      #include <linux/hid.h>
      
      #include <linux/fb.h>
      #include <linux/lcd.h>
      
      #include "hid-picolcd.h"
      
      /*
       * lcd class device
       */
      static int picolcd_get_contrast(struct lcd_device *ldev)
      {
              struct picolcd_data *data = lcd_get_data(ldev);
              return data->lcd_contrast;
      }
      
      static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
      {
              struct picolcd_data *data = lcd_get_data(ldev);
              struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev);
              unsigned long flags;
      
              if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
                      return -ENODEV;
      
              data->lcd_contrast = contrast & 0x0ff;
              spin_lock_irqsave(&data->lock, flags);
              hid_set_field(report->field[0], 0, data->lcd_contrast);
              if (!(data->status & PICOLCD_FAILED))
                      hid_hw_request(data->hdev, report, HID_REQ_SET_REPORT);
              spin_unlock_irqrestore(&data->lock, flags);
              return 0;
      }
      
      static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
      {
              return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
      }
      
      static struct lcd_ops picolcd_lcdops = {
              .get_contrast   = picolcd_get_contrast,
              .set_contrast   = picolcd_set_contrast,
              .check_fb       = picolcd_check_lcd_fb,
      };
      
      int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
      {
              struct device *dev = &data->hdev->dev;
              struct lcd_device *ldev;
      
              if (!report)
                      return -ENODEV;
              if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
                              report->field[0]->report_size != 8) {
                      dev_err(dev, "unsupported CONTRAST report");
                      return -EINVAL;
              }
      
              ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops);
              if (IS_ERR(ldev)) {
                      dev_err(dev, "failed to register LCD\n");
                      return PTR_ERR(ldev);
              }
              ldev->props.max_contrast = 0x0ff;
              data->lcd_contrast = 0xe5;
              data->lcd = ldev;
              picolcd_set_contrast(ldev, 0xe5);
              return 0;
      }
      
      void picolcd_exit_lcd(struct picolcd_data *data)
      {
    5         struct lcd_device *ldev = data->lcd;
      
              data->lcd = NULL;
              lcd_device_unregister(ldev);
      }
      
      int picolcd_resume_lcd(struct picolcd_data *data)
      {
              if (!data->lcd)
                      return 0;
              return picolcd_set_contrast(data->lcd, data->lcd_contrast);
      }
      
      /* SPDX-License-Identifier: GPL-2.0-or-later */
      /*
       * i2c-core.h - interfaces internal to the I2C framework
       */
      
      #include <linux/rwsem.h>
      
      struct i2c_devinfo {
              struct list_head        list;
              int                        busnum;
              struct i2c_board_info        board_info;
      };
      
      /* board_lock protects board_list and first_dynamic_bus_num.
       * only i2c core components are allowed to use these