16 14 46 15 15 12 13 13 1 13 13 13 115 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_SIGNAL_H #define _LINUX_SCHED_SIGNAL_H #include <linux/rculist.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/sched/jobctl.h> #include <linux/sched/task.h> #include <linux/cred.h> #include <linux/refcount.h> #include <linux/pid.h> #include <linux/posix-timers.h> #include <linux/mm_types.h> #include <asm/ptrace.h> /* * Types defining task->signal and task->sighand and APIs using them: */ struct sighand_struct { spinlock_t siglock; refcount_t count; wait_queue_head_t signalfd_wqh; struct k_sigaction action[_NSIG]; }; /* * Per-process accounting stats: */ struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; u64 ac_utime, ac_stime; unsigned long ac_minflt, ac_majflt; }; struct cpu_itimer { u64 expires; u64 incr; }; /* * This is the atomic variant of task_cputime, which can be used for * storing and updating task_cputime statistics without locking. */ struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; }; #define INIT_CPUTIME_ATOMIC \ (struct task_cputime_atomic) { \ .utime = ATOMIC64_INIT(0), \ .stime = ATOMIC64_INIT(0), \ .sum_exec_runtime = ATOMIC64_INIT(0), \ } /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; }; struct multiprocess_signals { sigset_t signal; struct hlist_node node; }; struct core_thread { struct task_struct *task; struct core_thread *next; }; struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; }; /* * NOTE! "signal_struct" does not have its own * locking, because a shared signal_struct always * implies a shared sighand_struct, so locking * sighand_struct is always a proper superset of * the locking of signal_struct. */ struct signal_struct { refcount_t sigcnt; atomic_t live; int nr_threads; int quick_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; /* For collecting multiprocess signals during fork */ struct hlist_head multiprocess; /* thread group exit support */ int group_exit_code; /* notify group_exec_task when notify_count is less or equal to 0 */ int notify_count; struct task_struct *group_exec_task; /* thread group stop support, overloads group_exit_code too */ int group_stop_count; unsigned int flags; /* see SIGNAL_* flags below */ struct core_state *core_state; /* coredumping support */ /* * PR_SET_CHILD_SUBREAPER marks a process, like a service * manager, to re-parent orphan (double-forking) child processes * to this process instead of 'init'. The service manager is * able to receive SIGCHLD signals and is able to investigate * the process until it calls wait(). All children of this * process will inherit a flag if they should look for a * child_subreaper process at exit. */ unsigned int is_child_subreaper:1; unsigned int has_child_subreaper:1; #ifdef CONFIG_POSIX_TIMERS /* POSIX.1b Interval Timers */ unsigned int next_posix_timer_id; struct list_head posix_timers; /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; ktime_t it_real_incr; /* * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these * values are defined to 0 and 1 respectively */ struct cpu_itimer it[2]; /* * Thread group totals for process CPU timers. * See thread_group_cputimer(), et al, for details. */ struct thread_group_cputimer cputimer; #endif /* Empty if CONFIG_POSIX_TIMERS=n */ struct posix_cputimers posix_cputimers; /* PID/PID hash table linkage. */ struct pid *pids[PIDTYPE_MAX]; #ifdef CONFIG_NO_HZ_FULL atomic_t tick_dep_mask; #endif struct pid *tty_old_pgrp; /* boolean value for session group leader */ int leader; struct tty_struct *tty; /* NULL if no tty */ #ifdef CONFIG_SCHED_AUTOGROUP struct autogroup *autogroup; #endif /* * Cumulative resource counters for dead threads in the group, * and for reaped dead child processes forked by this group. * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */ seqlock_t stats_lock; u64 utime, stime, cutime, cstime; u64 gtime; u64 cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; unsigned long maxrss, cmaxrss; struct task_io_accounting ioac; /* * Cumulative ns of schedule CPU time fo dead threads in the * group, not including a zombie group leader, (This only differs * from jiffies_to_ns(utime + stime) if sched_clock uses something * other than jiffies.) */ unsigned long long sum_sched_runtime; /* * We don't bother to synchronize most readers of this at all, * because there is no reader checking a limit that actually needs * to get both rlim_cur and rlim_max atomically, and either one * alone is a single word that can safely be read normally. * getrlimit/setrlimit use task_lock(current->group_leader) to * protect this instead of the siglock, because they really * have no need to disable irqs. */ struct rlimit rlim[RLIM_NLIMITS]; #ifdef CONFIG_BSD_PROCESS_ACCT struct pacct_struct pacct; /* per-process accounting information */ #endif #ifdef CONFIG_TASKSTATS struct taskstats *stats; #endif #ifdef CONFIG_AUDIT unsigned audit_tty; struct tty_audit_buf *tty_audit_buf; #endif /* * Thread is the potential origin of an oom condition; kill first on * oom */ bool oom_flag_origin; short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj_min; /* OOM kill score adjustment min value. * Only settable by CAP_SYS_RESOURCE. */ struct mm_struct *oom_mm; /* recorded mm when the thread group got * killed by the oom killer */ struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations * (notably. ptrace) * Deprecated do not use in new code. * Use exec_update_lock instead. */ struct rw_semaphore exec_update_lock; /* Held while task_struct is * being updated during exec, * and may have inconsistent * permissions. */ } __randomize_layout; /* * Bits in flags field of signal_struct. */ #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ /* * Pending notifications to parent. */ #define SIGNAL_CLD_STOPPED 0x00000010 #define SIGNAL_CLD_CONTINUED 0x00000020 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ SIGNAL_STOP_CONTINUED) static inline void signal_set_stop_flags(struct signal_struct *sig, unsigned int flags) { WARN_ON(sig->flags & SIGNAL_GROUP_EXIT); sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; } extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(struct task_struct *task, sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type); static inline int kernel_dequeue_signal(void) { struct task_struct *task = current; kernel_siginfo_t __info; enum pid_type __type; int ret; spin_lock_irq(&task->sighand->siglock); ret = dequeue_signal(task, &task->blocked, &__info, &__type); spin_unlock_irq(&task->sighand->siglock); return ret; } static inline void kernel_signal_stop(void) { spin_lock_irq(&current->sighand->siglock); if (current->jobctl & JOBCTL_STOP_DEQUEUED) { current->jobctl |= JOBCTL_STOPPED; set_special_state(TASK_STOPPED); } spin_unlock_irq(&current->sighand->siglock); schedule(); } int force_sig_fault_to_task(int sig, int code, void __user *addr, struct task_struct *t); int force_sig_fault(int sig, int code, void __user *addr); int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t); int force_sig_mceerr(int code, void __user *, short); int send_sig_mceerr(int code, void __user *, short, struct task_struct *); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); int force_sig_pkuerr(void __user *addr, u32 pkey); int send_sig_perf(void __user *addr, u32 type, u64 sig_data); int force_sig_ptrace_errno_trap(int errno, void __user *addr); int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno); int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, struct task_struct *t); int force_sig_seccomp(int syscall, int reason, bool force_coredump); extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern void force_sigsegv(int sig); extern int force_sig_info(struct kernel_siginfo *); extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int); extern void force_fatal_sig(int); extern void force_exit_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); static inline void clear_notify_signal(void) { clear_thread_flag(TIF_NOTIFY_SIGNAL); smp_mb__after_atomic(); } /* * Returns 'true' if kick_process() is needed to force a transition from * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work. */ static inline bool __set_notify_signal(struct task_struct *task) { return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && !wake_up_state(task, TASK_INTERRUPTIBLE); } /* * Called to break out of interruptible wait loops, and enter the * exit_to_user_mode_loop(). */ static inline void set_notify_signal(struct task_struct *task) { if (__set_notify_signal(task)) kick_process(task); } static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); return -ERESTARTNOINTR; } static inline int task_sigpending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } static inline int signal_pending(struct task_struct *p) { /* * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same * behavior in terms of ensuring that we break out of wait loops * so that notify signal callbacks can be processed. */ if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) return 1; return task_sigpending(p); } static inline int __fatal_signal_pending(struct task_struct *p) { return unlikely(sigismember(&p->pending.signal, SIGKILL)); } static inline int fatal_signal_pending(struct task_struct *p) { return task_sigpending(p) && __fatal_signal_pending(p); } static inline int signal_pending_state(unsigned int state, struct task_struct *p) { if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) return 0; if (!signal_pending(p)) return 0; return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } /* * This should only be used in fault handlers to decide whether we * should stop the current fault routine to handle the signals * instead, especially with the case where we've got interrupted with * a VM_FAULT_RETRY. */ static inline bool fault_signal_pending(vm_fault_t fault_flags, struct pt_regs *regs) { return unlikely((fault_flags & VM_FAULT_RETRY) && (fatal_signal_pending(current) || (user_mode(regs) && signal_pending(current)))); } /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. * This is required every time the blocked sigset_t changes. * callers must hold sighand->siglock. */ extern void recalc_sigpending(void); extern void calculate_sigpending(void); extern void signal_wake_up_state(struct task_struct *t, unsigned int state); static inline void signal_wake_up(struct task_struct *t, bool fatal) { unsigned int state = 0; if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) { t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED); state = TASK_WAKEKILL | __TASK_TRACED; } signal_wake_up_state(t, state); } static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) { unsigned int state = 0; if (resume) { t->jobctl &= ~JOBCTL_TRACED; state = __TASK_TRACED; } signal_wake_up_state(t, state); } void task_join_group_stop(struct task_struct *task); #ifdef TIF_RESTORE_SIGMASK /* * Legacy restore_sigmask accessors. These are inefficient on * SMP architectures because they require atomic operations. */ /** * set_restore_sigmask() - make sure saved_sigmask processing gets done * * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code * will run before returning to user mode, to process the flag. For * all callers, TIF_SIGPENDING is already set or it's no harm to set * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the * arch code will notice on return to user mode, in case those bits * are scarce. We set TIF_SIGPENDING here to ensure that the arch * signal code always gets run when TIF_RESTORE_SIGMASK is set. */ static inline void set_restore_sigmask(void) { set_thread_flag(TIF_RESTORE_SIGMASK); } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); } static inline void clear_restore_sigmask(void) { clear_thread_flag(TIF_RESTORE_SIGMASK); } static inline bool test_tsk_restore_sigmask(struct task_struct *task) { return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); } static inline bool test_restore_sigmask(void) { return test_thread_flag(TIF_RESTORE_SIGMASK); } static inline bool test_and_clear_restore_sigmask(void) { return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); } #else /* TIF_RESTORE_SIGMASK */ /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ static inline void set_restore_sigmask(void) { current->restore_sigmask = true; } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { task->restore_sigmask = false; } static inline void clear_restore_sigmask(void) { current->restore_sigmask = false; } static inline bool test_restore_sigmask(void) { return current->restore_sigmask; } static inline bool test_tsk_restore_sigmask(struct task_struct *task) { return task->restore_sigmask; } static inline bool test_and_clear_restore_sigmask(void) { if (!current->restore_sigmask) return false; current->restore_sigmask = false; return true; } #endif static inline void restore_saved_sigmask(void) { if (test_and_clear_restore_sigmask()) __set_current_blocked(&current->saved_sigmask); } extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); static inline void restore_saved_sigmask_unless(bool interrupted) { if (interrupted) WARN_ON(!signal_pending(current)); else restore_saved_sigmask(); } static inline sigset_t *sigmask_to_save(void) { sigset_t *res = &current->blocked; if (unlikely(test_restore_sigmask())) res = &current->saved_sigmask; return res; } static inline int kill_cad_pid(int sig, int priv) { return kill_pid(cad_pid, sig, priv); } /* These can be the second arg to send_sig_info/send_group_sig_info. */ #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) static inline int __on_sig_stack(unsigned long sp) { #ifdef CONFIG_STACK_GROWSUP return sp >= current->sas_ss_sp && sp - current->sas_ss_sp < current->sas_ss_size; #else return sp > current->sas_ss_sp && sp - current->sas_ss_sp <= current->sas_ss_size; #endif } /* * True if we are on the alternate signal stack. */ static inline int on_sig_stack(unsigned long sp) { /* * If the signal stack is SS_AUTODISARM then, by construction, we * can't be on the signal stack unless user code deliberately set * SS_AUTODISARM when we were already on it. * * This improves reliability: if user state gets corrupted such that * the stack pointer points very close to the end of the signal stack, * then this check will enable the signal to be handled anyway. */ if (current->sas_ss_flags & SS_AUTODISARM) return 0; return __on_sig_stack(sp); } static inline int sas_ss_flags(unsigned long sp) { if (!current->sas_ss_size) return SS_DISABLE; return on_sig_stack(sp) ? SS_ONSTACK : 0; } static inline void sas_ss_reset(struct task_struct *p) { p->sas_ss_sp = 0; p->sas_ss_size = 0; p->sas_ss_flags = SS_DISABLE; } static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) { if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) #ifdef CONFIG_STACK_GROWSUP return current->sas_ss_sp; #else return current->sas_ss_sp + current->sas_ss_size; #endif return sp; } extern void __cleanup_sighand(struct sighand_struct *); extern void flush_itimer_signals(void); #define tasklist_empty() \ list_empty(&init_task.tasks) #define next_task(p) \ list_entry_rcu((p)->tasks.next, struct task_struct, tasks) #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) extern bool current_is_single_threaded(void); /* * Without tasklist/siglock it is only rcu-safe if g can't exit/exec, * otherwise next_thread(t) will never reach g after list_del_rcu(g). */ #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) #define for_other_threads(p, t) \ for (t = p; (t = next_thread(t)) != p; ) #define __for_each_thread(signal, t) \ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \ lockdep_is_held(&tasklist_lock)) #define for_each_thread(p, t) \ __for_each_thread((p)->signal, t) /* Careful: this is a double loop, 'break' won't work as expected. */ #define for_each_process_thread(p, t) \ for_each_process(p) for_each_thread(p, t) typedef int (*proc_visitor)(struct task_struct *p, void *data); void walk_process_tree(struct task_struct *top, proc_visitor, void *); static inline struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { struct pid *pid; if (type == PIDTYPE_PID) pid = task_pid(task); else pid = task->signal->pids[type]; return pid; } static inline struct pid *task_tgid(struct task_struct *task) { return task->signal->pids[PIDTYPE_TGID]; } /* * Without tasklist or RCU lock it is not safe to dereference * the result of task_pgrp/task_session even if task == current, * we can race with another thread doing sys_setsid/sys_setpgid. */ static inline struct pid *task_pgrp(struct task_struct *task) { return task->signal->pids[PIDTYPE_PGID]; } static inline struct pid *task_session(struct task_struct *task) { return task->signal->pids[PIDTYPE_SID]; } static inline int get_nr_threads(struct task_struct *task) { return task->signal->nr_threads; } static inline bool thread_group_leader(struct task_struct *p) { return p->exit_signal >= 0; } static inline bool same_thread_group(struct task_struct *p1, struct task_struct *p2) { return p1->signal == p2->signal; } /* * returns NULL if p is the last thread in the thread group */ static inline struct task_struct *__next_thread(struct task_struct *p) { return list_next_or_null_rcu(&p->signal->thread_head, &p->thread_node, struct task_struct, thread_node); } static inline struct task_struct *next_thread(struct task_struct *p) { return __next_thread(p) ?: p->group_leader; } static inline int thread_group_empty(struct task_struct *p) { return thread_group_leader(p) && list_is_last(&p->thread_node, &p->signal->thread_head); } #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, unsigned long *flags); static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, unsigned long *flags) { struct sighand_struct *ret; ret = __lock_task_sighand(task, flags); (void)__cond_lock(&task->sighand->siglock, ret); return ret; } static inline void unlock_task_sighand(struct task_struct *task, unsigned long *flags) { spin_unlock_irqrestore(&task->sighand->siglock, *flags); } #ifdef CONFIG_LOCKDEP extern void lockdep_assert_task_sighand_held(struct task_struct *task); #else static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } #endif static inline unsigned long task_rlimit(const struct task_struct *task, unsigned int limit) { return READ_ONCE(task->signal->rlim[limit].rlim_cur); } static inline unsigned long task_rlimit_max(const struct task_struct *task, unsigned int limit) { return READ_ONCE(task->signal->rlim[limit].rlim_max); } static inline unsigned long rlimit(unsigned int limit) { return task_rlimit(current, limit); } static inline unsigned long rlimit_max(unsigned int limit) { return task_rlimit_max(current, limit); } #endif /* _LINUX_SCHED_SIGNAL_H */
741 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_CURRENT_H #define _ASM_X86_CURRENT_H #include <linux/build_bug.h> #include <linux/compiler.h> #ifndef __ASSEMBLY__ #include <linux/cache.h> #include <asm/percpu.h> struct task_struct; struct pcpu_hot { union { struct { struct task_struct *current_task; int preempt_count; int cpu_number; #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING u64 call_depth; #endif unsigned long top_of_stack; void *hardirq_stack_ptr; u16 softirq_pending; #ifdef CONFIG_X86_64 bool hardirq_stack_inuse; #else void *softirq_stack_ptr; #endif }; u8 pad[64]; }; }; static_assert(sizeof(struct pcpu_hot) == 64); DECLARE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot); /* const-qualified alias to pcpu_hot, aliased by linker. */ DECLARE_PER_CPU_ALIGNED(const struct pcpu_hot __percpu_seg_override, const_pcpu_hot); static __always_inline struct task_struct *get_current(void) { if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT)) return this_cpu_read_const(const_pcpu_hot.current_task); return this_cpu_read_stable(pcpu_hot.current_task); } #define current get_current() #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_CURRENT_H */
9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 /* SPDX-License-Identifier: GPL-2.0 */ /* * kernel/workqueue_internal.h * * Workqueue internal header file. Only to be included by workqueue and * core kernel subsystems. */ #ifndef _KERNEL_WORKQUEUE_INTERNAL_H #define _KERNEL_WORKQUEUE_INTERNAL_H #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/preempt.h> struct worker_pool; /* * The poor guys doing the actual heavy lifting. All on-duty workers are * either serving the manager role, on idle list or on busy hash. For * details on the locking annotation (L, I, X...), refer to workqueue.c. * * Only to be used in workqueue and async. */ struct worker { /* on idle list while idle, on busy hash table while busy */ union { struct list_head entry; /* L: while idle */ struct hlist_node hentry; /* L: while busy */ }; struct work_struct *current_work; /* K: work being processed and its */ work_func_t current_func; /* K: function */ struct pool_workqueue *current_pwq; /* K: pwq */ u64 current_at; /* K: runtime at start or last wakeup */ unsigned int current_color; /* K: color */ int sleeping; /* S: is worker sleeping? */ /* used by the scheduler to determine a worker's last known identity */ work_func_t last_func; /* K: last work's fn */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct worker_pool *pool; /* A: the associated pool */ /* L: for rescuers */ struct list_head node; /* A: anchored at pool->workers */ /* A: runs through worker->node */ unsigned long last_active; /* K: last active timestamp */ unsigned int flags; /* L: flags */ int id; /* I: worker id */ /* * Opaque string set with work_set_desc(). Printed out with task * dump for debugging - WARN, BUG, panic or sysrq. */ char desc[WORKER_DESC_LEN]; /* used only by rescuers to point to the target workqueue */ struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ }; /** * current_wq_worker - return struct worker if %current is a workqueue worker */ static inline struct worker *current_wq_worker(void) { if (in_task() && (current->flags & PF_WQ_WORKER)) return kthread_data(current); return NULL; } /* * Scheduler hooks for concurrency managed workqueue. Only to be used from * sched/ and workqueue.c. */ void wq_worker_running(struct task_struct *task); void wq_worker_sleeping(struct task_struct *task); void wq_worker_tick(struct task_struct *task); work_func_t wq_worker_last_func(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
30 61 61 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 /* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions related to Power Management Quality of Service (PM QoS). * * Copyright (C) 2020 Intel Corporation * * Authors: * Mark Gross <mgross@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ #ifndef _LINUX_PM_QOS_H #define _LINUX_PM_QOS_H #include <linux/plist.h> #include <linux/notifier.h> #include <linux/device.h> enum pm_qos_flags_status { PM_QOS_FLAGS_UNDEFINED = -1, PM_QOS_FLAGS_NONE, PM_QOS_FLAGS_SOME, PM_QOS_FLAGS_ALL, }; #define PM_QOS_DEFAULT_VALUE (-1) #define PM_QOS_LATENCY_ANY S32_MAX #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) #define PM_QOS_CPU_LATENCY_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 #define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0 #define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE FREQ_QOS_MAX_DEFAULT_VALUE #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) enum pm_qos_type { PM_QOS_UNITIALIZED, PM_QOS_MAX, /* return the largest value */ PM_QOS_MIN, /* return the smallest value */ }; /* * Note: The lockless read path depends on the CPU accessing target_value * or effective_flags atomically. Atomic access is only guaranteed on all CPU * types linux supports for 32 bit quantites */ struct pm_qos_constraints { struct plist_head list; s32 target_value; /* Do not change to 64 bit */ s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; }; struct pm_qos_request { struct plist_node node; struct pm_qos_constraints *qos; }; struct pm_qos_flags_request { struct list_head node; s32 flags; /* Do not change to 64 bit */ }; struct pm_qos_flags { struct list_head list; s32 effective_flags; /* Do not change to 64 bit */ }; #define FREQ_QOS_MIN_DEFAULT_VALUE 0 #define FREQ_QOS_MAX_DEFAULT_VALUE S32_MAX enum freq_qos_req_type { FREQ_QOS_MIN = 1, FREQ_QOS_MAX, }; struct freq_constraints { struct pm_qos_constraints min_freq; struct blocking_notifier_head min_freq_notifiers; struct pm_qos_constraints max_freq; struct blocking_notifier_head max_freq_notifiers; }; struct freq_qos_request { enum freq_qos_req_type type; struct plist_node pnode; struct freq_constraints *qos; }; enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE, DEV_PM_QOS_MIN_FREQUENCY, DEV_PM_QOS_MAX_FREQUENCY, DEV_PM_QOS_FLAGS, }; struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union { struct plist_node pnode; struct pm_qos_flags_request flr; struct freq_qos_request freq; } data; struct device *dev; }; struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct freq_constraints freq; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; }; /* Action requested to pm_qos_update_target */ enum pm_qos_req_action { PM_QOS_ADD_REQ, /* Add a new request */ PM_QOS_UPDATE_REQ, /* Update an existing request */ PM_QOS_REMOVE_REQ /* Remove an existing request */ }; static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) { return req->dev != NULL; } s32 pm_qos_read_value(struct pm_qos_constraints *c); int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value); bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val); #ifdef CONFIG_CPU_IDLE s32 cpu_latency_qos_limit(void); bool cpu_latency_qos_request_active(struct pm_qos_request *req); void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value); void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value); void cpu_latency_qos_remove_request(struct pm_qos_request *req); #else static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; } static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req) { return false; } static inline void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value) {} static inline void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value) {} static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {} #endif #ifdef CONFIG_PM enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); s32 __dev_pm_qos_resume_latency(struct device *dev); s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type); int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type); int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); void dev_pm_qos_hide_latency_limit(struct device *dev); int dev_pm_qos_expose_flags(struct device *dev, s32 value); void dev_pm_qos_hide_flags(struct device *dev); int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); int dev_pm_qos_expose_latency_tolerance(struct device *dev); void dev_pm_qos_hide_latency_tolerance(struct device *dev); static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return dev->power.qos->resume_latency_req->data.pnode.prio; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return dev->power.qos->flags_req->data.flr.flags; } static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : pm_qos_read_value(&dev->power.qos->resume_latency); } #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) { return PM_QOS_FLAGS_UNDEFINED; } static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) { return PM_QOS_FLAGS_UNDEFINED; } static inline s32 __dev_pm_qos_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } static inline s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type) { switch (type) { case DEV_PM_QOS_RESUME_LATENCY: return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; case DEV_PM_QOS_MIN_FREQUENCY: return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE; case DEV_PM_QOS_MAX_FREQUENCY: return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; default: WARN_ON(1); return 0; } } static inline int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { return 0; } static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) { return 0; } static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { return 0; } static inline int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type) { return 0; } static inline int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type) { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { dev->power.power_state = PMSG_ON; } static inline void dev_pm_qos_constraints_destroy(struct device *dev) { dev->power.power_state = PMSG_INVALID; } static inline int dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { return 0; } static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) { return 0; } static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) { return 0; } static inline void dev_pm_qos_hide_flags(struct device *dev) {} static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) { return 0; } static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) { return 0; } static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) { return 0; } static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } #endif static inline int freq_qos_request_active(struct freq_qos_request *req) { return !IS_ERR_OR_NULL(req->qos); } void freq_constraints_init(struct freq_constraints *qos); s32 freq_qos_read_value(struct freq_constraints *qos, enum freq_qos_req_type type); int freq_qos_add_request(struct freq_constraints *qos, struct freq_qos_request *req, enum freq_qos_req_type type, s32 value); int freq_qos_update_request(struct freq_qos_request *req, s32 new_value); int freq_qos_remove_request(struct freq_qos_request *req); int freq_qos_apply(struct freq_qos_request *req, enum pm_qos_req_action action, s32 value); int freq_qos_add_notifier(struct freq_constraints *qos, enum freq_qos_req_type type, struct notifier_block *notifier); int freq_qos_remove_notifier(struct freq_constraints *qos, enum freq_qos_req_type type, struct notifier_block *notifier); #endif
78 58 57 58 58 58 115 57 166 167 166 167 58 58 57 58 58 57 57 58 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 66 66 58 66 7 61 66 66 66 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> */ #include <linux/dcache.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/srcu.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" /* * Clear all of the marks on an inode when it is being evicted from core */ void __fsnotify_inode_delete(struct inode *inode) { fsnotify_clear_marks_by_inode(inode); } EXPORT_SYMBOL_GPL(__fsnotify_inode_delete); void __fsnotify_vfsmount_delete(struct vfsmount *mnt) { fsnotify_clear_marks_by_mount(mnt); } /** * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes. * @sb: superblock being unmounted. * * Called during unmount with no locks held, so needs to be safe against * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block. */ static void fsnotify_unmount_inodes(struct super_block *sb) { struct inode *inode, *iput_inode = NULL; spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { /* * We cannot __iget() an inode in state I_FREEING, * I_WILL_FREE, or I_NEW which is fine because by that point * the inode cannot have any associated watches. */ spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { spin_unlock(&inode->i_lock); continue; } /* * If i_count is zero, the inode cannot have any watches and * doing an __iget/iput with SB_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is * unnecessarily violent and may in fact be illegal to do. * However, we should have been called /after/ evict_inodes * removed all zero refcount inodes, in any case. Test to * be sure. */ if (!atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); iput(iput_inode); /* for each watch, send FS_UNMOUNT and then remove it */ fsnotify_inode(inode, FS_UNMOUNT); fsnotify_inode_delete(inode); iput_inode = inode; cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); iput(iput_inode); } void fsnotify_sb_delete(struct super_block *sb) { struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); /* Were any marks ever added to any object on this sb? */ if (!sbinfo) return; fsnotify_unmount_inodes(sb); fsnotify_clear_marks_by_sb(sb); /* Wait for outstanding object references from connectors */ wait_var_event(fsnotify_sb_watched_objects(sb), !atomic_long_read(fsnotify_sb_watched_objects(sb))); WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_CONTENT)); WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_PRE_CONTENT)); } void fsnotify_sb_free(struct super_block *sb) { kfree(sb->s_fsnotify_info); } /* * Given an inode, first check if we care what happens to our children. Inotify * and dnotify both tell their parents about events. If we care about any event * on a child we run all of our children and set a dentry flag saying that the * parent cares. Thus when an event happens on a child it can quickly tell * if there is a need to find a parent and send the event to the parent. */ void __fsnotify_update_child_dentry_flags(struct inode *inode) { struct dentry *alias; int watched; if (!S_ISDIR(inode->i_mode)) return; /* determine if the children should tell inode about their events */ watched = fsnotify_inode_watches_children(inode); spin_lock(&inode->i_lock); /* run all of the dentries associated with this inode. Since this is a * directory, there damn well better only be one item on this list */ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { struct dentry *child; /* run all of the children of the original inode and fix their * d_flags to indicate parental interest (their parent is the * original inode) */ spin_lock(&alias->d_lock); hlist_for_each_entry(child, &alias->d_children, d_sib) { if (!child->d_inode) continue; spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (watched) child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } spin_unlock(&alias->d_lock); } spin_unlock(&inode->i_lock); } /* Are inode/sb/mount interested in parent and name info with this event? */ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask, __u32 mask) { __u32 marks_mask = 0; /* We only send parent/name to inode/sb/mount for events on non-dir */ if (mask & FS_ISDIR) return false; /* * All events that are possible on child can also may be reported with * parent/name info to inode/sb/mount. Otherwise, a watching parent * could result in events reported with unexpected name info to sb/mount. */ BUILD_BUG_ON(FS_EVENTS_POSS_ON_CHILD & ~FS_EVENTS_POSS_TO_PARENT); /* Did either inode/sb/mount subscribe for events with parent/name? */ marks_mask |= fsnotify_parent_needed_mask(inode->i_fsnotify_mask); marks_mask |= fsnotify_parent_needed_mask(inode->i_sb->s_fsnotify_mask); marks_mask |= fsnotify_parent_needed_mask(mnt_mask); /* Did they subscribe for this event with parent/name info? */ return mask & marks_mask; } /* Are there any inode/mount/sb objects that are interested in this event? */ static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask, __u32 mask) { __u32 marks_mask = inode->i_fsnotify_mask | mnt_mask | inode->i_sb->s_fsnotify_mask; return mask & marks_mask & ALL_FSNOTIFY_EVENTS; } /* * Notify this dentry's parent about a child's events with child name info * if parent is watching or if inode/sb/mount are interested in events with * parent and name info. * * Notify only the child without name info if parent is not watching and * inode/sb/mount are not interested in events with parent and name info. */ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { const struct path *path = fsnotify_data_path(data, data_type); __u32 mnt_mask = path ? real_mount(path->mnt)->mnt_fsnotify_mask : 0; struct inode *inode = d_inode(dentry); struct dentry *parent; bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED; bool parent_needed, parent_interested; __u32 p_mask; struct inode *p_inode = NULL; struct name_snapshot name; struct qstr *file_name = NULL; int ret = 0; /* Optimize the likely case of nobody watching this path */ if (likely(!parent_watched && !fsnotify_object_watched(inode, mnt_mask, mask))) return 0; parent = NULL; parent_needed = fsnotify_event_needs_parent(inode, mnt_mask, mask); if (!parent_watched && !parent_needed) goto notify; /* Does parent inode care about events on children? */ parent = dget_parent(dentry); p_inode = parent->d_inode; p_mask = fsnotify_inode_watches_children(p_inode); if (unlikely(parent_watched && !p_mask)) __fsnotify_update_child_dentry_flags(p_inode); /* * Include parent/name in notification either if some notification * groups require parent info or the parent is interested in this event. */ parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS; if (parent_needed || parent_interested) { /* When notifying parent, child should be passed as data */ WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type)); /* Notify both parent and child with child name info */ take_dentry_name_snapshot(&name, dentry); file_name = &name.name; if (parent_interested) mask |= FS_EVENT_ON_CHILD; } notify: ret = fsnotify(mask, data, data_type, p_inode, file_name, inode, 0); if (file_name) release_dentry_name_snapshot(&name); dput(parent); return ret; } EXPORT_SYMBOL_GPL(__fsnotify_parent); static int fsnotify_handle_inode_event(struct fsnotify_group *group, struct fsnotify_mark *inode_mark, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, u32 cookie) { const struct path *path = fsnotify_data_path(data, data_type); struct inode *inode = fsnotify_data_inode(data, data_type); const struct fsnotify_ops *ops = group->ops; if (WARN_ON_ONCE(!ops->handle_inode_event)) return 0; if (WARN_ON_ONCE(!inode && !dir)) return 0; if ((inode_mark->flags & FSNOTIFY_MARK_FLAG_EXCL_UNLINK) && path && d_unlinked(path->dentry)) return 0; /* Check interest of this mark in case event was sent with two marks */ if (!(mask & inode_mark->mask & ALL_FSNOTIFY_EVENTS)) return 0; return ops->handle_inode_event(inode_mark, mask, inode, dir, name, cookie); } static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct fsnotify_mark *parent_mark = fsnotify_iter_parent_mark(iter_info); int ret; if (WARN_ON_ONCE(fsnotify_iter_sb_mark(iter_info)) || WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info))) return 0; /* * For FS_RENAME, 'dir' is old dir and 'data' is new dentry. * The only ->handle_inode_event() backend that supports FS_RENAME is * dnotify, where it means file was renamed within same parent. */ if (mask & FS_RENAME) { struct dentry *moved = fsnotify_data_dentry(data, data_type); if (dir != moved->d_parent->d_inode) return 0; } if (parent_mark) { ret = fsnotify_handle_inode_event(group, parent_mark, mask, data, data_type, dir, name, 0); if (ret) return ret; } if (!inode_mark) return 0; if (mask & FS_EVENT_ON_CHILD) { /* * Some events can be sent on both parent dir and child marks * (e.g. FS_ATTRIB). If both parent dir and child are * watching, report the event once to parent dir with name (if * interested) and once to child without name (if interested). * The child watcher is expecting an event without a file name * and without the FS_EVENT_ON_CHILD flag. */ mask &= ~FS_EVENT_ON_CHILD; dir = NULL; name = NULL; } return fsnotify_handle_inode_event(group, inode_mark, mask, data, data_type, dir, name, cookie); } static int send_to_group(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); __u32 marks_mask = 0; __u32 marks_ignore_mask = 0; bool is_dir = mask & FS_ISDIR; struct fsnotify_mark *mark; int type; if (!iter_info->report_mask) return 0; /* clear ignored on inode modification */ if (mask & FS_MODIFY) { fsnotify_foreach_iter_mark_type(iter_info, mark, type) { if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) mark->ignore_mask = 0; } } /* Are any of the group marks interested in this event? */ fsnotify_foreach_iter_mark_type(iter_info, mark, type) { group = mark->group; marks_mask |= mark->mask; marks_ignore_mask |= fsnotify_effective_ignore_mask(mark, is_dir, type); } pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignore_mask=%x data=%p data_type=%d dir=%p cookie=%d\n", __func__, group, mask, marks_mask, marks_ignore_mask, data, data_type, dir, cookie); if (!(test_mask & marks_mask & ~marks_ignore_mask)) return 0; if (group->ops->handle_event) { return group->ops->handle_event(group, mask, data, data_type, dir, file_name, cookie, iter_info); } return fsnotify_handle_event(group, mask, data, data_type, dir, file_name, cookie, iter_info); } static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp) { struct fsnotify_mark_connector *conn; struct hlist_node *node = NULL; conn = srcu_dereference(*connp, &fsnotify_mark_srcu); if (conn) node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu); return hlist_entry_safe(node, struct fsnotify_mark, obj_list); } static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) { struct hlist_node *node = NULL; if (mark) node = srcu_dereference(mark->obj_list.next, &fsnotify_mark_srcu); return hlist_entry_safe(node, struct fsnotify_mark, obj_list); } /* * iter_info is a multi head priority queue of marks. * Pick a subset of marks from queue heads, all with the same group * and set the report_mask to a subset of the selected marks. * Returns false if there are no more groups to iterate. */ static bool fsnotify_iter_select_report_types( struct fsnotify_iter_info *iter_info) { struct fsnotify_group *max_prio_group = NULL; struct fsnotify_mark *mark; int type; /* Choose max prio group among groups of all queue heads */ fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && fsnotify_compare_groups(max_prio_group, mark->group) > 0) max_prio_group = mark->group; } if (!max_prio_group) return false; /* Set the report mask for marks from same group as max prio group */ iter_info->current_group = max_prio_group; iter_info->report_mask = 0; fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && mark->group == iter_info->current_group) { /* * FSNOTIFY_ITER_TYPE_PARENT indicates that this inode * is watching children and interested in this event, * which is an event possible on child. * But is *this mark* watching children? */ if (type == FSNOTIFY_ITER_TYPE_PARENT && !(mark->mask & FS_EVENT_ON_CHILD) && !(fsnotify_ignore_mask(mark) & FS_EVENT_ON_CHILD)) continue; fsnotify_iter_set_report_type(iter_info, type); } } return true; } /* * Pop from iter_info multi head queue, the marks that belong to the group of * current iteration step. */ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *mark; int type; /* * We cannot use fsnotify_foreach_iter_mark_type() here because we * may need to advance a mark of type X that belongs to current_group * but was not selected for reporting. */ fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && mark->group == iter_info->current_group) iter_info->marks[type] = fsnotify_next_mark(iter_info->marks[type]); } } /* * fsnotify - This is the main call to fsnotify. * * The VFS calls into hook specific functions in linux/fsnotify.h. * Those functions then in turn call here. Here will call out to all of the * registered fsnotify_group. Those groups can then use the notification event * in whatever means they feel necessary. * * @mask: event type and flags * @data: object that event happened on * @data_type: type of object for fanotify_data_XXX() accessors * @dir: optional directory associated with event - * if @file_name is not NULL, this is the directory that * @file_name is relative to * @file_name: optional file name associated with event * @inode: optional inode associated with event - * If @dir and @inode are both non-NULL, event may be * reported to both. * @cookie: inotify rename cookie */ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, struct inode *inode, u32 cookie) { const struct path *path = fsnotify_data_path(data, data_type); struct super_block *sb = fsnotify_data_sb(data, data_type); struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); struct fsnotify_iter_info iter_info = {}; struct mount *mnt = NULL; struct inode *inode2 = NULL; struct dentry *moved; int inode2_type; int ret = 0; __u32 test_mask, marks_mask; if (path) mnt = real_mount(path->mnt); if (!inode) { /* Dirent event - report on TYPE_INODE to dir */ inode = dir; /* For FS_RENAME, inode is old_dir and inode2 is new_dir */ if (mask & FS_RENAME) { moved = fsnotify_data_dentry(data, data_type); inode2 = moved->d_parent->d_inode; inode2_type = FSNOTIFY_ITER_TYPE_INODE2; } } else if (mask & FS_EVENT_ON_CHILD) { /* * Event on child - report on TYPE_PARENT to dir if it is * watching children and on TYPE_INODE to child. */ inode2 = dir; inode2_type = FSNOTIFY_ITER_TYPE_PARENT; } /* * Optimization: srcu_read_lock() has a memory barrier which can * be expensive. It protects walking the *_fsnotify_marks lists. * However, if we do not walk the lists, we do not have to do * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ if ((!sbinfo || !sbinfo->sb_marks) && (!mnt || !mnt->mnt_fsnotify_marks) && (!inode || !inode->i_fsnotify_marks) && (!inode2 || !inode2->i_fsnotify_marks)) return 0; marks_mask = sb->s_fsnotify_mask; if (mnt) marks_mask |= mnt->mnt_fsnotify_mask; if (inode) marks_mask |= inode->i_fsnotify_mask; if (inode2) marks_mask |= inode2->i_fsnotify_mask; /* * If this is a modify event we may need to clear some ignore masks. * In that case, the object with ignore masks will have the FS_MODIFY * event in its mask. * Otherwise, return if none of the marks care about this type of event. */ test_mask = (mask & ALL_FSNOTIFY_EVENTS); if (!(test_mask & marks_mask)) return 0; iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); if (sbinfo) { iter_info.marks[FSNOTIFY_ITER_TYPE_SB] = fsnotify_first_mark(&sbinfo->sb_marks); } if (mnt) { iter_info.marks[FSNOTIFY_ITER_TYPE_VFSMOUNT] = fsnotify_first_mark(&mnt->mnt_fsnotify_marks); } if (inode) { iter_info.marks[FSNOTIFY_ITER_TYPE_INODE] = fsnotify_first_mark(&inode->i_fsnotify_marks); } if (inode2) { iter_info.marks[inode2_type] = fsnotify_first_mark(&inode2->i_fsnotify_marks); } /* * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark * ignore masks are properly reflected for mount/sb mark notifications. * That's why this traversal is so complicated... */ while (fsnotify_iter_select_report_types(&iter_info)) { ret = send_to_group(mask, data, data_type, dir, file_name, cookie, &iter_info); if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; fsnotify_iter_next(&iter_info); } ret = 0; out: srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx); return ret; } EXPORT_SYMBOL_GPL(fsnotify); static __init int fsnotify_init(void) { int ret; BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) panic("initializing fsnotify_mark_srcu"); fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector, SLAB_PANIC); return 0; } core_initcall(fsnotify_init);
1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 // SPDX-License-Identifier: GPL-2.0-only /* * Backlight Lowlevel Control Abstraction * * Copyright (C) 2003,2004 Hewlett-Packard Company * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/backlight.h> #include <linux/notifier.h> #include <linux/ctype.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/slab.h> #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif /** * DOC: overview * * The backlight core supports implementing backlight drivers. * * A backlight driver registers a driver using * devm_backlight_device_register(). The properties of the backlight * driver such as type and max_brightness must be specified. * When the core detect changes in for example brightness or power state * the update_status() operation is called. The backlight driver shall * implement this operation and use it to adjust backlight. * * Several sysfs attributes are provided by the backlight core:: * * - brightness R/W, set the requested brightness level * - actual_brightness RO, the brightness level used by the HW * - max_brightness RO, the maximum brightness level supported * * See Documentation/ABI/stable/sysfs-class-backlight for the full list. * * The backlight can be adjusted using the sysfs interface, and * the backlight driver may also support adjusting backlight using * a hot-key or some other platform or firmware specific way. * * The driver must implement the get_brightness() operation if * the HW do not support all the levels that can be specified in * brightness, thus providing user-space access to the actual level * via the actual_brightness attribute. * * When the backlight changes this is reported to user-space using * an uevent connected to the actual_brightness attribute. * When brightness is set by platform specific means, for example * a hot-key to adjust backlight, the driver must notify the backlight * core that brightness has changed using backlight_force_update(). * * The backlight driver core receives notifications from fbdev and * if the event is FB_EVENT_BLANK and if the value of blank, from the * FBIOBLANK ioctrl, results in a change in the backlight state the * update_status() operation is called. */ static struct list_head backlight_dev_list; static struct mutex backlight_dev_list_mutex; static struct blocking_notifier_head backlight_notifier; static const char *const backlight_types[] = { [BACKLIGHT_RAW] = "raw", [BACKLIGHT_PLATFORM] = "platform", [BACKLIGHT_FIRMWARE] = "firmware", }; static const char *const backlight_scale_types[] = { [BACKLIGHT_SCALE_UNKNOWN] = "unknown", [BACKLIGHT_SCALE_LINEAR] = "linear", [BACKLIGHT_SCALE_NON_LINEAR] = "non-linear", }; #if defined(CONFIG_FB_CORE) || (defined(CONFIG_FB_CORE_MODULE) && \ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) /* * fb_notifier_callback * * This callback gets called when something important happens inside a * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK * which is reported to the driver using backlight_update_status() * as a state change. * * There may be several fbdev's connected to the backlight device, * in which case they are kept track of. A state change is only reported * if there is a change in backlight for the specified fbdev. */ static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct backlight_device *bd; struct fb_event *evdata = data; struct fb_info *info = evdata->info; struct backlight_device *fb_bd = fb_bl_device(info); int node = info->node; int fb_blank = 0; /* If we aren't interested in this event, skip it immediately ... */ if (event != FB_EVENT_BLANK) return 0; bd = container_of(self, struct backlight_device, fb_notif); mutex_lock(&bd->ops_lock); if (!bd->ops) goto out; if (bd->ops->controls_device && !bd->ops->controls_device(bd, info->device)) goto out; if (fb_bd && fb_bd != bd) goto out; fb_blank = *(int *)evdata->data; if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) { bd->fb_bl_on[node] = true; if (!bd->use_count++) { bd->props.state &= ~BL_CORE_FBBLANK; backlight_update_status(bd); } } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) { bd->fb_bl_on[node] = false; if (!(--bd->use_count)) { bd->props.state |= BL_CORE_FBBLANK; backlight_update_status(bd); } } out: mutex_unlock(&bd->ops_lock); return 0; } static int backlight_register_fb(struct backlight_device *bd) { memset(&bd->fb_notif, 0, sizeof(bd->fb_notif)); bd->fb_notif.notifier_call = fb_notifier_callback; return fb_register_client(&bd->fb_notif); } static void backlight_unregister_fb(struct backlight_device *bd) { fb_unregister_client(&bd->fb_notif); } #else static inline int backlight_register_fb(struct backlight_device *bd) { return 0; } static inline void backlight_unregister_fb(struct backlight_device *bd) { } #endif /* CONFIG_FB_CORE */ static void backlight_generate_event(struct backlight_device *bd, enum backlight_update_reason reason) { char *envp[2]; switch (reason) { case BACKLIGHT_UPDATE_SYSFS: envp[0] = "SOURCE=sysfs"; break; case BACKLIGHT_UPDATE_HOTKEY: envp[0] = "SOURCE=hotkey"; break; default: envp[0] = "SOURCE=unknown"; break; } envp[1] = NULL; kobject_uevent_env(&bd->dev.kobj, KOBJ_CHANGE, envp); sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness"); } static ssize_t bl_power_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%d\n", bd->props.power); } static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; struct backlight_device *bd = to_backlight_device(dev); unsigned long power, old_power; rc = kstrtoul(buf, 0, &power); if (rc) return rc; rc = -ENXIO; mutex_lock(&bd->ops_lock); if (bd->ops) { pr_debug("set power to %lu\n", power); if (bd->props.power != power) { old_power = bd->props.power; bd->props.power = power; rc = backlight_update_status(bd); if (rc) bd->props.power = old_power; else rc = count; } else { rc = count; } } mutex_unlock(&bd->ops_lock); return rc; } static DEVICE_ATTR_RW(bl_power); static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%d\n", bd->props.brightness); } int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness) { int rc = -ENXIO; mutex_lock(&bd->ops_lock); if (bd->ops) { if (brightness > bd->props.max_brightness) rc = -EINVAL; else { pr_debug("set brightness to %lu\n", brightness); bd->props.brightness = brightness; rc = backlight_update_status(bd); } } mutex_unlock(&bd->ops_lock); backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS); return rc; } EXPORT_SYMBOL(backlight_device_set_brightness); static ssize_t brightness_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; struct backlight_device *bd = to_backlight_device(dev); unsigned long brightness; rc = kstrtoul(buf, 0, &brightness); if (rc) return rc; rc = backlight_device_set_brightness(bd, brightness); return rc ? rc : count; } static DEVICE_ATTR_RW(brightness); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%s\n", backlight_types[bd->props.type]); } static DEVICE_ATTR_RO(type); static ssize_t max_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); return sprintf(buf, "%d\n", bd->props.max_brightness); } static DEVICE_ATTR_RO(max_brightness); static ssize_t actual_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { int rc = -ENXIO; struct backlight_device *bd = to_backlight_device(dev); mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->get_brightness) { rc = bd->ops->get_brightness(bd); if (rc >= 0) rc = sprintf(buf, "%d\n", rc); } else { rc = sprintf(buf, "%d\n", bd->props.brightness); } mutex_unlock(&bd->ops_lock); return rc; } static DEVICE_ATTR_RO(actual_brightness); static ssize_t scale_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backlight_device *bd = to_backlight_device(dev); if (WARN_ON(bd->props.scale > BACKLIGHT_SCALE_NON_LINEAR)) return sprintf(buf, "unknown\n"); return sprintf(buf, "%s\n", backlight_scale_types[bd->props.scale]); } static DEVICE_ATTR_RO(scale); #ifdef CONFIG_PM_SLEEP static int backlight_suspend(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state |= BL_CORE_SUSPENDED; backlight_update_status(bd); } mutex_unlock(&bd->ops_lock); return 0; } static int backlight_resume(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state &= ~BL_CORE_SUSPENDED; backlight_update_status(bd); } mutex_unlock(&bd->ops_lock); return 0; } #endif static SIMPLE_DEV_PM_OPS(backlight_class_dev_pm_ops, backlight_suspend, backlight_resume); static void bl_device_release(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); kfree(bd); } static struct attribute *bl_device_attrs[] = { &dev_attr_bl_power.attr, &dev_attr_brightness.attr, &dev_attr_actual_brightness.attr, &dev_attr_max_brightness.attr, &dev_attr_scale.attr, &dev_attr_type.attr, NULL, }; ATTRIBUTE_GROUPS(bl_device); static const struct class backlight_class = { .name = "backlight", .dev_groups = bl_device_groups, .pm = &backlight_class_dev_pm_ops, }; /** * backlight_force_update - tell the backlight subsystem that hardware state * has changed * @bd: the backlight device to update * @reason: reason for update * * Updates the internal state of the backlight in response to a hardware event, * and generates an uevent to notify userspace. A backlight driver shall call * backlight_force_update() when the backlight is changed using, for example, * a hot-key. The updated brightness is read using get_brightness() and the * brightness value is reported using an uevent. */ void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason) { int brightness; mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->get_brightness) { brightness = bd->ops->get_brightness(bd); if (brightness >= 0) bd->props.brightness = brightness; else dev_err(&bd->dev, "Could not update brightness from device: %pe\n", ERR_PTR(brightness)); } mutex_unlock(&bd->ops_lock); backlight_generate_event(bd, reason); } EXPORT_SYMBOL(backlight_force_update); /* deprecated - use devm_backlight_device_register() */ struct backlight_device *backlight_device_register(const char *name, struct device *parent, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props) { struct backlight_device *new_bd; int rc; pr_debug("backlight_device_register: name=%s\n", name); new_bd = kzalloc(sizeof(struct backlight_device), GFP_KERNEL); if (!new_bd) return ERR_PTR(-ENOMEM); mutex_init(&new_bd->update_lock); mutex_init(&new_bd->ops_lock); new_bd->dev.class = &backlight_class; new_bd->dev.parent = parent; new_bd->dev.release = bl_device_release; dev_set_name(&new_bd->dev, "%s", name); dev_set_drvdata(&new_bd->dev, devdata); /* Set default properties */ if (props) { memcpy(&new_bd->props, props, sizeof(struct backlight_properties)); if (props->type <= 0 || props->type >= BACKLIGHT_TYPE_MAX) { WARN(1, "%s: invalid backlight type", name); new_bd->props.type = BACKLIGHT_RAW; } } else { new_bd->props.type = BACKLIGHT_RAW; } rc = device_register(&new_bd->dev); if (rc) { put_device(&new_bd->dev); return ERR_PTR(rc); } rc = backlight_register_fb(new_bd); if (rc) { device_unregister(&new_bd->dev); return ERR_PTR(rc); } new_bd->ops = ops; #ifdef CONFIG_PMAC_BACKLIGHT mutex_lock(&pmac_backlight_mutex); if (!pmac_backlight) pmac_backlight = new_bd; mutex_unlock(&pmac_backlight_mutex); #endif mutex_lock(&backlight_dev_list_mutex); list_add(&new_bd->entry, &backlight_dev_list); mutex_unlock(&backlight_dev_list_mutex); blocking_notifier_call_chain(&backlight_notifier, BACKLIGHT_REGISTERED, new_bd); return new_bd; } EXPORT_SYMBOL(backlight_device_register); /** backlight_device_get_by_type - find first backlight device of a type * @type: the type of backlight device * * Look up the first backlight device of the specified type * * RETURNS: * * Pointer to backlight device if any was found. Otherwise NULL. */ struct backlight_device *backlight_device_get_by_type(enum backlight_type type) { bool found = false; struct backlight_device *bd; mutex_lock(&backlight_dev_list_mutex); list_for_each_entry(bd, &backlight_dev_list, entry) { if (bd->props.type == type) { found = true; break; } } mutex_unlock(&backlight_dev_list_mutex); return found ? bd : NULL; } EXPORT_SYMBOL(backlight_device_get_by_type); /** * backlight_device_get_by_name - Get backlight device by name * @name: Device name * * This function looks up a backlight device by its name. It obtains a reference * on the backlight device and it is the caller's responsibility to drop the * reference by calling put_device(). * * Returns: * A pointer to the backlight device if found, otherwise NULL. */ struct backlight_device *backlight_device_get_by_name(const char *name) { struct device *dev; dev = class_find_device_by_name(&backlight_class, name); return dev ? to_backlight_device(dev) : NULL; } EXPORT_SYMBOL(backlight_device_get_by_name); /* deprecated - use devm_backlight_device_unregister() */ void backlight_device_unregister(struct backlight_device *bd) { if (!bd) return; mutex_lock(&backlight_dev_list_mutex); list_del(&bd->entry); mutex_unlock(&backlight_dev_list_mutex); #ifdef CONFIG_PMAC_BACKLIGHT mutex_lock(&pmac_backlight_mutex); if (pmac_backlight == bd) pmac_backlight = NULL; mutex_unlock(&pmac_backlight_mutex); #endif blocking_notifier_call_chain(&backlight_notifier, BACKLIGHT_UNREGISTERED, bd); mutex_lock(&bd->ops_lock); bd->ops = NULL; mutex_unlock(&bd->ops_lock); backlight_unregister_fb(bd); device_unregister(&bd->dev); } EXPORT_SYMBOL(backlight_device_unregister); static void devm_backlight_device_release(struct device *dev, void *res) { struct backlight_device *backlight = *(struct backlight_device **)res; backlight_device_unregister(backlight); } static int devm_backlight_device_match(struct device *dev, void *res, void *data) { struct backlight_device **r = res; return *r == data; } /** * backlight_register_notifier - get notified of backlight (un)registration * @nb: notifier block with the notifier to call on backlight (un)registration * * Register a notifier to get notified when backlight devices get registered * or unregistered. * * RETURNS: * * 0 on success, otherwise a negative error code */ int backlight_register_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&backlight_notifier, nb); } EXPORT_SYMBOL(backlight_register_notifier); /** * backlight_unregister_notifier - unregister a backlight notifier * @nb: notifier block to unregister * * Register a notifier to get notified when backlight devices get registered * or unregistered. * * RETURNS: * * 0 on success, otherwise a negative error code */ int backlight_unregister_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&backlight_notifier, nb); } EXPORT_SYMBOL(backlight_unregister_notifier); /** * devm_backlight_device_register - register a new backlight device * @dev: the device to register * @name: the name of the device * @parent: a pointer to the parent device (often the same as @dev) * @devdata: an optional pointer to be stored for private driver use * @ops: the backlight operations structure * @props: the backlight properties * * Creates and registers new backlight device. When a backlight device * is registered the configuration must be specified in the @props * parameter. See description of &backlight_properties. * * RETURNS: * * struct backlight on success, or an ERR_PTR on error */ struct backlight_device *devm_backlight_device_register(struct device *dev, const char *name, struct device *parent, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props) { struct backlight_device **ptr, *backlight; ptr = devres_alloc(devm_backlight_device_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); backlight = backlight_device_register(name, parent, devdata, ops, props); if (!IS_ERR(backlight)) { *ptr = backlight; devres_add(dev, ptr); } else { devres_free(ptr); } return backlight; } EXPORT_SYMBOL(devm_backlight_device_register); /** * devm_backlight_device_unregister - unregister backlight device * @dev: the device to unregister * @bd: the backlight device to unregister * * Deallocates a backlight allocated with devm_backlight_device_register(). * Normally this function will not need to be called and the resource management * code will ensure that the resources are freed. */ void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd) { int rc; rc = devres_release(dev, devm_backlight_device_release, devm_backlight_device_match, bd); WARN_ON(rc); } EXPORT_SYMBOL(devm_backlight_device_unregister); #ifdef CONFIG_OF static int of_parent_match(struct device *dev, const void *data) { return dev->parent && dev->parent->of_node == data; } /** * of_find_backlight_by_node() - find backlight device by device-tree node * @node: device-tree node of the backlight device * * Returns a pointer to the backlight device corresponding to the given DT * node or NULL if no such backlight device exists or if the device hasn't * been probed yet. * * This function obtains a reference on the backlight device and it is the * caller's responsibility to drop the reference by calling put_device() on * the backlight device's .dev field. */ struct backlight_device *of_find_backlight_by_node(struct device_node *node) { struct device *dev; dev = class_find_device(&backlight_class, NULL, node, of_parent_match); return dev ? to_backlight_device(dev) : NULL; } EXPORT_SYMBOL(of_find_backlight_by_node); #endif static struct backlight_device *of_find_backlight(struct device *dev) { struct backlight_device *bd = NULL; struct device_node *np; if (!dev) return NULL; if (IS_ENABLED(CONFIG_OF) && dev->of_node) { np = of_parse_phandle(dev->of_node, "backlight", 0); if (np) { bd = of_find_backlight_by_node(np); of_node_put(np); if (!bd) return ERR_PTR(-EPROBE_DEFER); } } return bd; } static void devm_backlight_release(void *data) { struct backlight_device *bd = data; put_device(&bd->dev); } /** * devm_of_find_backlight - find backlight for a device * @dev: the device * * This function looks for a property named 'backlight' on the DT node * connected to @dev and looks up the backlight device. The lookup is * device managed so the reference to the backlight device is automatically * dropped on driver detach. * * RETURNS: * * A pointer to the backlight device if found. * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight * device is found. NULL if there's no backlight property. */ struct backlight_device *devm_of_find_backlight(struct device *dev) { struct backlight_device *bd; int ret; bd = of_find_backlight(dev); if (IS_ERR_OR_NULL(bd)) return bd; ret = devm_add_action_or_reset(dev, devm_backlight_release, bd); if (ret) return ERR_PTR(ret); return bd; } EXPORT_SYMBOL(devm_of_find_backlight); static void __exit backlight_class_exit(void) { class_unregister(&backlight_class); } static int __init backlight_class_init(void) { int ret; ret = class_register(&backlight_class); if (ret) { pr_warn("Unable to create backlight class; errno = %d\n", ret); return ret; } INIT_LIST_HEAD(&backlight_dev_list); mutex_init(&backlight_dev_list_mutex); BLOCKING_INIT_NOTIFIER_HEAD(&backlight_notifier); return 0; } /* * if this is compiled into the kernel, we need to ensure that the * class is registered before users of the class try to register lcd's */ postcore_initcall(backlight_class_init); module_exit(backlight_class_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamey Hicks <jamey.hicks@hp.com>, Andrew Zabolotny <zap@homelink.ru>"); MODULE_DESCRIPTION("Backlight Lowlevel Control Abstraction");
14 14 14 14 14 14 14 14 14 14 14 14 14 12 9 9 9 9 9 9 12 12 12 12 12 12 12 10 9 13 14 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 // SPDX-License-Identifier: GPL-2.0-or-later /* * net-sysfs.c - network device class and attributes * * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/sched/isolation.h> #include <linux/nsproxy.h> #include <net/sock.h> #include <net/net_namespace.h> #include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <linux/jiffies.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_net.h> #include <linux/cpu.h> #include <net/netdev_rx_queue.h> #include <net/rps.h> #include "dev.h" #include "net-sysfs.h" #ifdef CONFIG_SYSFS static const char fmt_hex[] = "%#x\n"; static const char fmt_dec[] = "%d\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; /* Caller holds RTNL or RCU */ static inline int dev_isalive(const struct net_device *dev) { return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED; } /* use same locking rules as GIF* ioctl's */ static ssize_t netdev_show(const struct device *dev, struct device_attribute *attr, char *buf, ssize_t (*format)(const struct net_device *, char *)) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; rcu_read_lock(); if (dev_isalive(ndev)) ret = (*format)(ndev, buf); rcu_read_unlock(); return ret; } /* generate a show function for simple field */ #define NETDEVICE_SHOW(field, format_string) \ static ssize_t format_##field(const struct net_device *dev, char *buf) \ { \ return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \ } \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return netdev_show(dev, attr, buf, format_##field); \ } \ #define NETDEVICE_SHOW_RO(field, format_string) \ NETDEVICE_SHOW(field, format_string); \ static DEVICE_ATTR_RO(field) #define NETDEVICE_SHOW_RW(field, format_string) \ NETDEVICE_SHOW(field, format_string); \ static DEVICE_ATTR_RW(field) /* use same locking and permission rules as SIF* ioctl's */ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int (*set)(struct net_device *, unsigned long)) { struct net_device *netdev = to_net_dev(dev); struct net *net = dev_net(netdev); unsigned long new; int ret; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; ret = kstrtoul(buf, 0, &new); if (ret) goto err; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { ret = (*set)(netdev, new); if (ret == 0) ret = len; } rtnl_unlock(); err: return ret; } NETDEVICE_SHOW_RO(dev_id, fmt_hex); NETDEVICE_SHOW_RO(dev_port, fmt_dec); NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); NETDEVICE_SHOW_RO(addr_len, fmt_dec); NETDEVICE_SHOW_RO(ifindex, fmt_dec); NETDEVICE_SHOW_RO(type, fmt_dec); NETDEVICE_SHOW_RO(link_mode, fmt_dec); static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, dev_get_iflink(ndev)); } static DEVICE_ATTR_RO(iflink); static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) { return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type)); } static ssize_t name_assign_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN) ret = netdev_show(dev, attr, buf, format_name_assign_type); return ret; } static DEVICE_ATTR_RO(name_assign_type); /* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */ static ssize_t address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; down_read(&dev_addr_sem); rcu_read_lock(); if (dev_isalive(ndev)) ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); rcu_read_unlock(); up_read(&dev_addr_sem); return ret; } static DEVICE_ATTR_RO(address); static ssize_t broadcast_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); int ret = -EINVAL; rcu_read_lock(); if (dev_isalive(ndev)) ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); rcu_read_unlock(); return ret; } static DEVICE_ATTR_RO(broadcast); static int change_carrier(struct net_device *dev, unsigned long new_carrier) { if (!netif_running(dev)) return -EINVAL; return dev_change_carrier(dev, (bool)new_carrier); } static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); /* The check is also done in change_carrier; this helps returning early * without hitting the trylock/restart in netdev_store. */ if (!netdev->netdev_ops->ndo_change_carrier) return -EOPNOTSUPP; return netdev_store(dev, attr, buf, len, change_carrier); } static ssize_t carrier_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { /* Synchronize carrier state with link watch, * see also rtnl_getlink(). */ linkwatch_sync_dev(netdev); ret = sysfs_emit(buf, fmt_dec, !!netif_carrier_ok(netdev)); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RW(carrier); static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; /* The check is also done in __ethtool_get_link_ksettings; this helps * returning early without hitting the trylock/restart below. */ if (!netdev->ethtool_ops->get_link_ksettings) return ret; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev) && netif_device_present(netdev)) { struct ethtool_link_ksettings cmd; if (!__ethtool_get_link_ksettings(netdev, &cmd)) ret = sysfs_emit(buf, fmt_dec, cmd.base.speed); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(speed); static ssize_t duplex_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; /* The check is also done in __ethtool_get_link_ksettings; this helps * returning early without hitting the trylock/restart below. */ if (!netdev->ethtool_ops->get_link_ksettings) return ret; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { struct ethtool_link_ksettings cmd; if (!__ethtool_get_link_ksettings(netdev, &cmd)) { const char *duplex; switch (cmd.base.duplex) { case DUPLEX_HALF: duplex = "half"; break; case DUPLEX_FULL: duplex = "full"; break; default: duplex = "unknown"; break; } ret = sysfs_emit(buf, "%s\n", duplex); } } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(duplex); static ssize_t testing_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) return sysfs_emit(buf, fmt_dec, !!netif_testing(netdev)); return -EINVAL; } static DEVICE_ATTR_RO(testing); static ssize_t dormant_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) return sysfs_emit(buf, fmt_dec, !!netif_dormant(netdev)); return -EINVAL; } static DEVICE_ATTR_RO(dormant); static const char *const operstates[] = { "unknown", "notpresent", /* currently unused */ "down", "lowerlayerdown", "testing", "dormant", "up" }; static ssize_t operstate_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct net_device *netdev = to_net_dev(dev); unsigned char operstate; operstate = READ_ONCE(netdev->operstate); if (!netif_running(netdev)) operstate = IF_OPER_DOWN; if (operstate >= ARRAY_SIZE(operstates)) return -EINVAL; /* should not happen */ return sysfs_emit(buf, "%s\n", operstates[operstate]); } static DEVICE_ATTR_RO(operstate); static ssize_t carrier_changes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count) + atomic_read(&netdev->carrier_down_count)); } static DEVICE_ATTR_RO(carrier_changes); static ssize_t carrier_up_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); } static DEVICE_ATTR_RO(carrier_up_count); static ssize_t carrier_down_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); } static DEVICE_ATTR_RO(carrier_down_count); /* read-write attributes */ static int change_mtu(struct net_device *dev, unsigned long new_mtu) { return dev_set_mtu(dev, (int)new_mtu); } static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_mtu); } NETDEVICE_SHOW_RW(mtu, fmt_dec); static int change_flags(struct net_device *dev, unsigned long new_flags) { return dev_change_flags(dev, (unsigned int)new_flags, NULL); } static ssize_t flags_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_flags); } NETDEVICE_SHOW_RW(flags, fmt_hex); static ssize_t tx_queue_len_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); } NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) { WRITE_ONCE(dev->gro_flush_timeout, val); return 0; } static ssize_t gro_flush_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); } NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) { WRITE_ONCE(dev->napi_defer_hard_irqs, val); return 0; } static ssize_t napi_defer_hard_irqs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); } NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec); static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct net *net = dev_net(netdev); size_t count = len; ssize_t ret = 0; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; /* ignore trailing newline */ if (len > 0 && buf[len - 1] == '\n') --count; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { ret = dev_set_alias(netdev, buf, count); if (ret < 0) goto err; ret = len; netdev_state_change(netdev); } err: rtnl_unlock(); return ret; } static ssize_t ifalias_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct net_device *netdev = to_net_dev(dev); char tmp[IFALIASZ]; ssize_t ret = 0; ret = dev_get_alias(netdev, tmp, sizeof(tmp)); if (ret > 0) ret = sysfs_emit(buf, "%s\n", tmp); return ret; } static DEVICE_ATTR_RW(ifalias); static int change_group(struct net_device *dev, unsigned long new_group) { dev_set_group(dev, (int)new_group); return 0; } static ssize_t group_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_group); } NETDEVICE_SHOW(group, fmt_dec); static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); static int change_proto_down(struct net_device *dev, unsigned long proto_down) { return dev_change_proto_down(dev, (bool)proto_down); } static ssize_t proto_down_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_proto_down); } NETDEVICE_SHOW_RW(proto_down, fmt_dec); static ssize_t phys_port_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; /* The check is also done in dev_get_phys_port_id; this helps returning * early without hitting the trylock/restart below. */ if (!netdev->netdev_ops->ndo_get_phys_port_id) return -EOPNOTSUPP; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { struct netdev_phys_item_id ppid; ret = dev_get_phys_port_id(netdev, &ppid); if (!ret) ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(phys_port_id); static ssize_t phys_port_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; /* The checks are also done in dev_get_phys_port_name; this helps * returning early without hitting the trylock/restart below. */ if (!netdev->netdev_ops->ndo_get_phys_port_name && !netdev->devlink_port) return -EOPNOTSUPP; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { char name[IFNAMSIZ]; ret = dev_get_phys_port_name(netdev, name, sizeof(name)); if (!ret) ret = sysfs_emit(buf, "%s\n", name); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(phys_port_name); static ssize_t phys_switch_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; /* The checks are also done in dev_get_phys_port_name; this helps * returning early without hitting the trylock/restart below. This works * because recurse is false when calling dev_get_port_parent_id. */ if (!netdev->netdev_ops->ndo_get_port_parent_id && !netdev->devlink_port) return -EOPNOTSUPP; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { struct netdev_phys_item_id ppid = { }; ret = dev_get_port_parent_id(netdev, &ppid, false); if (!ret) ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(phys_switch_id); static ssize_t threaded_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; rcu_read_lock(); if (dev_isalive(netdev)) ret = sysfs_emit(buf, fmt_dec, READ_ONCE(netdev->threaded)); rcu_read_unlock(); return ret; } static int modify_napi_threaded(struct net_device *dev, unsigned long val) { int ret; if (list_empty(&dev->napi_list)) return -EOPNOTSUPP; if (val != 0 && val != 1) return -EOPNOTSUPP; ret = dev_set_threaded(dev, val); return ret; } static ssize_t threaded_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, modify_napi_threaded); } static DEVICE_ATTR_RW(threaded); static struct attribute *net_class_attrs[] __ro_after_init = { &dev_attr_netdev_group.attr, &dev_attr_type.attr, &dev_attr_dev_id.attr, &dev_attr_dev_port.attr, &dev_attr_iflink.attr, &dev_attr_ifindex.attr, &dev_attr_name_assign_type.attr, &dev_attr_addr_assign_type.attr, &dev_attr_addr_len.attr, &dev_attr_link_mode.attr, &dev_attr_address.attr, &dev_attr_broadcast.attr, &dev_attr_speed.attr, &dev_attr_duplex.attr, &dev_attr_dormant.attr, &dev_attr_testing.attr, &dev_attr_operstate.attr, &dev_attr_carrier_changes.attr, &dev_attr_ifalias.attr, &dev_attr_carrier.attr, &dev_attr_mtu.attr, &dev_attr_flags.attr, &dev_attr_tx_queue_len.attr, &dev_attr_gro_flush_timeout.attr, &dev_attr_napi_defer_hard_irqs.attr, &dev_attr_phys_port_id.attr, &dev_attr_phys_port_name.attr, &dev_attr_phys_switch_id.attr, &dev_attr_proto_down.attr, &dev_attr_carrier_up_count.attr, &dev_attr_carrier_down_count.attr, &dev_attr_threaded.attr, NULL, }; ATTRIBUTE_GROUPS(net_class); /* Show a given an attribute in the statistics group */ static ssize_t netstat_show(const struct device *d, struct device_attribute *attr, char *buf, unsigned long offset) { struct net_device *dev = to_net_dev(d); ssize_t ret = -EINVAL; WARN_ON(offset > sizeof(struct rtnl_link_stats64) || offset % sizeof(u64) != 0); rcu_read_lock(); if (dev_isalive(dev)) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); } rcu_read_unlock(); return ret; } /* generate a read-only statistics attribute */ #define NETSTAT_ENTRY(name) \ static ssize_t name##_show(struct device *d, \ struct device_attribute *attr, char *buf) \ { \ return netstat_show(d, attr, buf, \ offsetof(struct rtnl_link_stats64, name)); \ } \ static DEVICE_ATTR_RO(name) NETSTAT_ENTRY(rx_packets); NETSTAT_ENTRY(tx_packets); NETSTAT_ENTRY(rx_bytes); NETSTAT_ENTRY(tx_bytes); NETSTAT_ENTRY(rx_errors); NETSTAT_ENTRY(tx_errors); NETSTAT_ENTRY(rx_dropped); NETSTAT_ENTRY(tx_dropped); NETSTAT_ENTRY(multicast); NETSTAT_ENTRY(collisions); NETSTAT_ENTRY(rx_length_errors); NETSTAT_ENTRY(rx_over_errors); NETSTAT_ENTRY(rx_crc_errors); NETSTAT_ENTRY(rx_frame_errors); NETSTAT_ENTRY(rx_fifo_errors); NETSTAT_ENTRY(rx_missed_errors); NETSTAT_ENTRY(tx_aborted_errors); NETSTAT_ENTRY(tx_carrier_errors); NETSTAT_ENTRY(tx_fifo_errors); NETSTAT_ENTRY(tx_heartbeat_errors); NETSTAT_ENTRY(tx_window_errors); NETSTAT_ENTRY(rx_compressed); NETSTAT_ENTRY(tx_compressed); NETSTAT_ENTRY(rx_nohandler); static struct attribute *netstat_attrs[] __ro_after_init = { &dev_attr_rx_packets.attr, &dev_attr_tx_packets.attr, &dev_attr_rx_bytes.attr, &dev_attr_tx_bytes.attr, &dev_attr_rx_errors.attr, &dev_attr_tx_errors.attr, &dev_attr_rx_dropped.attr, &dev_attr_tx_dropped.attr, &dev_attr_multicast.attr, &dev_attr_collisions.attr, &dev_attr_rx_length_errors.attr, &dev_attr_rx_over_errors.attr, &dev_attr_rx_crc_errors.attr, &dev_attr_rx_frame_errors.attr, &dev_attr_rx_fifo_errors.attr, &dev_attr_rx_missed_errors.attr, &dev_attr_tx_aborted_errors.attr, &dev_attr_tx_carrier_errors.attr, &dev_attr_tx_fifo_errors.attr, &dev_attr_tx_heartbeat_errors.attr, &dev_attr_tx_window_errors.attr, &dev_attr_rx_compressed.attr, &dev_attr_tx_compressed.attr, &dev_attr_rx_nohandler.attr, NULL }; static const struct attribute_group netstat_group = { .name = "statistics", .attrs = netstat_attrs, }; static struct attribute *wireless_attrs[] = { NULL }; static const struct attribute_group wireless_group = { .name = "wireless", .attrs = wireless_attrs, }; static bool wireless_group_needed(struct net_device *ndev) { #if IS_ENABLED(CONFIG_CFG80211) if (ndev->ieee80211_ptr) return true; #endif #if IS_ENABLED(CONFIG_WIRELESS_EXT) if (ndev->wireless_handlers) return true; #endif return false; } #else /* CONFIG_SYSFS */ #define net_class_groups NULL #endif /* CONFIG_SYSFS */ #ifdef CONFIG_SYSFS #define to_rx_queue_attr(_attr) \ container_of(_attr, struct rx_queue_attribute, attr) #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->show) return -EIO; return attribute->show(queue, buf); } static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->store) return -EIO; return attribute->store(queue, buf, count); } static const struct sysfs_ops rx_queue_sysfs_ops = { .show = rx_queue_attr_show, .store = rx_queue_attr_store, }; #ifdef CONFIG_RPS static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) { struct rps_map *map; cpumask_var_t mask; int i, len; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; rcu_read_lock(); map = rcu_dereference(queue->rps_map); if (map) for (i = 0; i < map->len; i++) cpumask_set_cpu(map->cpus[i], mask); len = sysfs_emit(buf, "%*pb\n", cpumask_pr_args(mask)); rcu_read_unlock(); free_cpumask_var(mask); return len < PAGE_SIZE ? len : -EINVAL; } static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue, cpumask_var_t mask) { static DEFINE_MUTEX(rps_map_mutex); struct rps_map *old_map, *map; int cpu, i; map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); if (!map) return -ENOMEM; i = 0; for_each_cpu_and(cpu, mask, cpu_online_mask) map->cpus[i++] = cpu; if (i) { map->len = i; } else { kfree(map); map = NULL; } mutex_lock(&rps_map_mutex); old_map = rcu_dereference_protected(queue->rps_map, mutex_is_locked(&rps_map_mutex)); rcu_assign_pointer(queue->rps_map, map); if (map) static_branch_inc(&rps_needed); if (old_map) static_branch_dec(&rps_needed); mutex_unlock(&rps_map_mutex); if (old_map) kfree_rcu(old_map, rcu); return 0; } int rps_cpumask_housekeeping(struct cpumask *mask) { if (!cpumask_empty(mask)) { cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN)); cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ)); if (cpumask_empty(mask)) return -EINVAL; } return 0; } static ssize_t store_rps_map(struct netdev_rx_queue *queue, const char *buf, size_t len) { cpumask_var_t mask; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); if (err) goto out; err = rps_cpumask_housekeeping(mask); if (err) goto out; err = netdev_rx_queue_set_rps_mask(queue, mask); out: free_cpumask_var(mask); return err ? : len; } static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, char *buf) { struct rps_dev_flow_table *flow_table; unsigned long val = 0; rcu_read_lock(); flow_table = rcu_dereference(queue->rps_flow_table); if (flow_table) val = (unsigned long)flow_table->mask + 1; rcu_read_unlock(); return sysfs_emit(buf, "%lu\n", val); } static void rps_dev_flow_table_release(struct rcu_head *rcu) { struct rps_dev_flow_table *table = container_of(rcu, struct rps_dev_flow_table, rcu); vfree(table); } static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, const char *buf, size_t len) { unsigned long mask, count; struct rps_dev_flow_table *table, *old_table; static DEFINE_SPINLOCK(rps_dev_flow_lock); int rc; if (!capable(CAP_NET_ADMIN)) return -EPERM; rc = kstrtoul(buf, 0, &count); if (rc < 0) return rc; if (count) { mask = count - 1; /* mask = roundup_pow_of_two(count) - 1; * without overflows... */ while ((mask | (mask >> 1)) != mask) mask |= (mask >> 1); /* On 64 bit arches, must check mask fits in table->mask (u32), * and on 32bit arches, must check * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. */ #if BITS_PER_LONG > 32 if (mask > (unsigned long)(u32)mask) return -EINVAL; #else if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) / sizeof(struct rps_dev_flow)) { /* Enforce a limit to prevent overflow */ return -EINVAL; } #endif table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); if (!table) return -ENOMEM; table->mask = mask; for (count = 0; count <= mask; count++) table->flows[count].cpu = RPS_NO_CPU; } else { table = NULL; } spin_lock(&rps_dev_flow_lock); old_table = rcu_dereference_protected(queue->rps_flow_table, lockdep_is_held(&rps_dev_flow_lock)); rcu_assign_pointer(queue->rps_flow_table, table); spin_unlock(&rps_dev_flow_lock); if (old_table) call_rcu(&old_table->rcu, rps_dev_flow_table_release); return len; } static struct rx_queue_attribute rps_cpus_attribute __ro_after_init = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init = __ATTR(rps_flow_cnt, 0644, show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); #endif /* CONFIG_RPS */ static struct attribute *rx_queue_default_attrs[] __ro_after_init = { #ifdef CONFIG_RPS &rps_cpus_attribute.attr, &rps_dev_flow_table_cnt_attribute.attr, #endif NULL }; ATTRIBUTE_GROUPS(rx_queue_default); static void rx_queue_release(struct kobject *kobj) { struct netdev_rx_queue *queue = to_rx_queue(kobj); #ifdef CONFIG_RPS struct rps_map *map; struct rps_dev_flow_table *flow_table; map = rcu_dereference_protected(queue->rps_map, 1); if (map) { RCU_INIT_POINTER(queue->rps_map, NULL); kfree_rcu(map, rcu); } flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); if (flow_table) { RCU_INIT_POINTER(queue->rps_flow_table, NULL); call_rcu(&flow_table->rcu, rps_dev_flow_table_release); } #endif memset(kobj, 0, sizeof(*kobj)); netdev_put(queue->dev, &queue->dev_tracker); } static const void *rx_queue_namespace(const struct kobject *kobj) { struct netdev_rx_queue *queue = to_rx_queue(kobj); struct device *dev = &queue->dev->dev; const void *ns = NULL; if (dev->class && dev->class->ns_type) ns = dev->class->namespace(dev); return ns; } static void rx_queue_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) { const struct net *net = rx_queue_namespace(kobj); net_ns_get_ownership(net, uid, gid); } static const struct kobj_type rx_queue_ktype = { .sysfs_ops = &rx_queue_sysfs_ops, .release = rx_queue_release, .default_groups = rx_queue_default_groups, .namespace = rx_queue_namespace, .get_ownership = rx_queue_get_ownership, }; static int rx_queue_default_mask(struct net_device *dev, struct netdev_rx_queue *queue) { #if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL) struct cpumask *rps_default_mask = READ_ONCE(dev_net(dev)->core.rps_default_mask); if (rps_default_mask && !cpumask_empty(rps_default_mask)) return netdev_rx_queue_set_rps_mask(queue, rps_default_mask); #endif return 0; } static int rx_queue_add_kobject(struct net_device *dev, int index) { struct netdev_rx_queue *queue = dev->_rx + index; struct kobject *kobj = &queue->kobj; int error = 0; /* Kobject_put later will trigger rx_queue_release call which * decreases dev refcount: Take that reference here */ netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, "rx-%u", index); if (error) goto err; if (dev->sysfs_rx_queue_group) { error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); if (error) goto err; } error = rx_queue_default_mask(dev, queue); if (error) goto err; kobject_uevent(kobj, KOBJ_ADD); return error; err: kobject_put(kobj); return error; } static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, kgid_t kgid) { struct netdev_rx_queue *queue = dev->_rx + index; struct kobject *kobj = &queue->kobj; int error; error = sysfs_change_owner(kobj, kuid, kgid); if (error) return error; if (dev->sysfs_rx_queue_group) error = sysfs_group_change_owner( kobj, dev->sysfs_rx_queue_group, kuid, kgid); return error; } #endif /* CONFIG_SYSFS */ int net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) { #ifdef CONFIG_SYSFS int i; int error = 0; #ifndef CONFIG_RPS if (!dev->sysfs_rx_queue_group) return 0; #endif for (i = old_num; i < new_num; i++) { error = rx_queue_add_kobject(dev, i); if (error) { new_num = old_num; break; } } while (--i >= new_num) { struct kobject *kobj = &dev->_rx[i].kobj; if (!refcount_read(&dev_net(dev)->ns.count)) kobj->uevent_suppress = 1; if (dev->sysfs_rx_queue_group) sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); kobject_put(kobj); } return error; #else return 0; #endif } static int net_rx_queue_change_owner(struct net_device *dev, int num, kuid_t kuid, kgid_t kgid) { #ifdef CONFIG_SYSFS int error = 0; int i; #ifndef CONFIG_RPS if (!dev->sysfs_rx_queue_group) return 0; #endif for (i = 0; i < num; i++) { error = rx_queue_change_owner(dev, i, kuid, kgid); if (error) break; } return error; #else return 0; #endif } #ifdef CONFIG_SYSFS /* * netdev_queue sysfs structures and functions. */ struct netdev_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_queue *queue, char *buf); ssize_t (*store)(struct netdev_queue *queue, const char *buf, size_t len); }; #define to_netdev_queue_attr(_attr) \ container_of(_attr, struct netdev_queue_attribute, attr) #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) static ssize_t netdev_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { const struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->show) return -EIO; return attribute->show(queue, buf); } static ssize_t netdev_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { const struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->store) return -EIO; return attribute->store(queue, buf, count); } static const struct sysfs_ops netdev_queue_sysfs_ops = { .show = netdev_queue_attr_show, .store = netdev_queue_attr_store, }; static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) { unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout); return sysfs_emit(buf, fmt_ulong, trans_timeout); } static unsigned int get_netdev_queue_index(struct netdev_queue *queue) { struct net_device *dev = queue->dev; unsigned int i; i = queue - dev->_tx; BUG_ON(i >= dev->num_tx_queues); return i; } static ssize_t traffic_class_show(struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; int num_tc, tc; int index; if (!netif_is_multiqueue(dev)) return -ENOENT; if (!rtnl_trylock()) return restart_syscall(); index = get_netdev_queue_index(queue); /* If queue belongs to subordinate dev use its TC mapping */ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; num_tc = dev->num_tc; tc = netdev_txq_to_tc(dev, index); rtnl_unlock(); if (tc < 0) return -EINVAL; /* We can report the traffic class one of two ways: * Subordinate device traffic classes are reported with the traffic * class first, and then the subordinate class so for example TC0 on * subordinate device 2 will be reported as "0-2". If the queue * belongs to the root device it will be reported with just the * traffic class, so just "0" for TC 0 for example. */ return num_tc < 0 ? sysfs_emit(buf, "%d%d\n", tc, num_tc) : sysfs_emit(buf, "%d\n", tc); } #ifdef CONFIG_XPS static ssize_t tx_maxrate_show(struct netdev_queue *queue, char *buf) { return sysfs_emit(buf, "%lu\n", queue->tx_maxrate); } static ssize_t tx_maxrate_store(struct netdev_queue *queue, const char *buf, size_t len) { struct net_device *dev = queue->dev; int err, index = get_netdev_queue_index(queue); u32 rate = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; /* The check is also done later; this helps returning early without * hitting the trylock/restart below. */ if (!dev->netdev_ops->ndo_set_tx_maxrate) return -EOPNOTSUPP; err = kstrtou32(buf, 10, &rate); if (err < 0) return err; if (!rtnl_trylock()) return restart_syscall(); err = -EOPNOTSUPP; if (dev->netdev_ops->ndo_set_tx_maxrate) err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); rtnl_unlock(); if (!err) { queue->tx_maxrate = rate; return len; } return err; } static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init = __ATTR_RW(tx_maxrate); #endif static struct netdev_queue_attribute queue_trans_timeout __ro_after_init = __ATTR_RO(tx_timeout); static struct netdev_queue_attribute queue_traffic_class __ro_after_init = __ATTR_RO(traffic_class); #ifdef CONFIG_BQL /* * Byte queue limits sysfs structures and functions. */ static ssize_t bql_show(char *buf, unsigned int value) { return sysfs_emit(buf, "%u\n", value); } static ssize_t bql_set(const char *buf, const size_t count, unsigned int *pvalue) { unsigned int value; int err; if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { value = DQL_MAX_LIMIT; } else { err = kstrtouint(buf, 10, &value); if (err < 0) return err; if (value > DQL_MAX_LIMIT) return -EINVAL; } *pvalue = value; return count; } static ssize_t bql_show_hold_time(struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); } static ssize_t bql_set_hold_time(struct netdev_queue *queue, const char *buf, size_t len) { struct dql *dql = &queue->dql; unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err < 0) return err; dql->slack_hold_time = msecs_to_jiffies(value); return len; } static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init = __ATTR(hold_time, 0644, bql_show_hold_time, bql_set_hold_time); static ssize_t bql_show_stall_thrs(struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs)); } static ssize_t bql_set_stall_thrs(struct netdev_queue *queue, const char *buf, size_t len) { struct dql *dql = &queue->dql; unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err < 0) return err; value = msecs_to_jiffies(value); if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG)) return -ERANGE; if (!dql->stall_thrs && value) dql->last_reap = jiffies; /* Force last_reap to be live */ smp_wmb(); dql->stall_thrs = value; return len; } static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init = __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs); static ssize_t bql_show_stall_max(struct netdev_queue *queue, char *buf) { return sysfs_emit(buf, "%u\n", READ_ONCE(queue->dql.stall_max)); } static ssize_t bql_set_stall_max(struct netdev_queue *queue, const char *buf, size_t len) { WRITE_ONCE(queue->dql.stall_max, 0); return len; } static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init = __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max); static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; return sysfs_emit(buf, "%lu\n", dql->stall_cnt); } static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init = __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL); static ssize_t bql_show_inflight(struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; return sysfs_emit(buf, "%u\n", dql->num_queued - dql->num_completed); } static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = __ATTR(inflight, 0444, bql_show_inflight, NULL); #define BQL_ATTR(NAME, FIELD) \ static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ char *buf) \ { \ return bql_show(buf, queue->dql.FIELD); \ } \ \ static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ const char *buf, size_t len) \ { \ return bql_set(buf, len, &queue->dql.FIELD); \ } \ \ static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ = __ATTR(NAME, 0644, \ bql_show_ ## NAME, bql_set_ ## NAME) BQL_ATTR(limit, limit); BQL_ATTR(limit_max, max_limit); BQL_ATTR(limit_min, min_limit); static struct attribute *dql_attrs[] __ro_after_init = { &bql_limit_attribute.attr, &bql_limit_max_attribute.attr, &bql_limit_min_attribute.attr, &bql_hold_time_attribute.attr, &bql_inflight_attribute.attr, &bql_stall_thrs_attribute.attr, &bql_stall_cnt_attribute.attr, &bql_stall_max_attribute.attr, NULL }; static const struct attribute_group dql_group = { .name = "byte_queue_limits", .attrs = dql_attrs, }; #else /* Fake declaration, all the code using it should be dead */ extern const struct attribute_group dql_group; #endif /* CONFIG_BQL */ #ifdef CONFIG_XPS static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, int tc, char *buf, enum xps_map_type type) { struct xps_dev_maps *dev_maps; unsigned long *mask; unsigned int nr_ids; int j, len; rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps[type]); /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0 * when dev_maps hasn't been allocated yet, to be backward compatible. */ nr_ids = dev_maps ? dev_maps->nr_ids : (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues); mask = bitmap_zalloc(nr_ids, GFP_NOWAIT); if (!mask) { rcu_read_unlock(); return -ENOMEM; } if (!dev_maps || tc >= dev_maps->num_tc) goto out_no_maps; for (j = 0; j < nr_ids; j++) { int i, tci = j * dev_maps->num_tc + tc; struct xps_map *map; map = rcu_dereference(dev_maps->attr_map[tci]); if (!map) continue; for (i = map->len; i--;) { if (map->queues[i] == index) { __set_bit(j, mask); break; } } } out_no_maps: rcu_read_unlock(); len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids); bitmap_free(mask); return len < PAGE_SIZE ? len : -EINVAL; } static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; unsigned int index; int len, tc; if (!netif_is_multiqueue(dev)) return -ENOENT; index = get_netdev_queue_index(queue); if (!rtnl_trylock()) return restart_syscall(); /* If queue belongs to subordinate dev use its map */ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; tc = netdev_txq_to_tc(dev, index); if (tc < 0) { rtnl_unlock(); return -EINVAL; } /* Make sure the subordinate device can't be freed */ get_device(&dev->dev); rtnl_unlock(); len = xps_queue_show(dev, index, tc, buf, XPS_CPUS); put_device(&dev->dev); return len; } static ssize_t xps_cpus_store(struct netdev_queue *queue, const char *buf, size_t len) { struct net_device *dev = queue->dev; unsigned int index; cpumask_var_t mask; int err; if (!netif_is_multiqueue(dev)) return -ENOENT; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; index = get_netdev_queue_index(queue); err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); if (err) { free_cpumask_var(mask); return err; } if (!rtnl_trylock()) { free_cpumask_var(mask); return restart_syscall(); } err = netif_set_xps_queue(dev, mask, index); rtnl_unlock(); free_cpumask_var(mask); return err ? : len; } static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init = __ATTR_RW(xps_cpus); static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; unsigned int index; int tc; index = get_netdev_queue_index(queue); if (!rtnl_trylock()) return restart_syscall(); tc = netdev_txq_to_tc(dev, index); rtnl_unlock(); if (tc < 0) return -EINVAL; return xps_queue_show(dev, index, tc, buf, XPS_RXQS); } static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, size_t len) { struct net_device *dev = queue->dev; struct net *net = dev_net(dev); unsigned long *mask; unsigned int index; int err; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); if (!mask) return -ENOMEM; index = get_netdev_queue_index(queue); err = bitmap_parse(buf, len, mask, dev->num_rx_queues); if (err) { bitmap_free(mask); return err; } if (!rtnl_trylock()) { bitmap_free(mask); return restart_syscall(); } cpus_read_lock(); err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS); cpus_read_unlock(); rtnl_unlock(); bitmap_free(mask); return err ? : len; } static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init = __ATTR_RW(xps_rxqs); #endif /* CONFIG_XPS */ static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { &queue_trans_timeout.attr, &queue_traffic_class.attr, #ifdef CONFIG_XPS &xps_cpus_attribute.attr, &xps_rxqs_attribute.attr, &queue_tx_maxrate.attr, #endif NULL }; ATTRIBUTE_GROUPS(netdev_queue_default); static void netdev_queue_release(struct kobject *kobj) { struct netdev_queue *queue = to_netdev_queue(kobj); memset(kobj, 0, sizeof(*kobj)); netdev_put(queue->dev, &queue->dev_tracker); } static const void *netdev_queue_namespace(const struct kobject *kobj) { struct netdev_queue *queue = to_netdev_queue(kobj); struct device *dev = &queue->dev->dev; const void *ns = NULL; if (dev->class && dev->class->ns_type) ns = dev->class->namespace(dev); return ns; } static void netdev_queue_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) { const struct net *net = netdev_queue_namespace(kobj); net_ns_get_ownership(net, uid, gid); } static const struct kobj_type netdev_queue_ktype = { .sysfs_ops = &netdev_queue_sysfs_ops, .release = netdev_queue_release, .default_groups = netdev_queue_default_groups, .namespace = netdev_queue_namespace, .get_ownership = netdev_queue_get_ownership, }; static bool netdev_uses_bql(const struct net_device *dev) { if (dev->features & NETIF_F_LLTX || dev->priv_flags & IFF_NO_QUEUE) return false; return IS_ENABLED(CONFIG_BQL); } static int netdev_queue_add_kobject(struct net_device *dev, int index) { struct netdev_queue *queue = dev->_tx + index; struct kobject *kobj = &queue->kobj; int error = 0; /* Kobject_put later will trigger netdev_queue_release call * which decreases dev refcount: Take that reference here */ netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); if (error) goto err; if (netdev_uses_bql(dev)) { error = sysfs_create_group(kobj, &dql_group); if (error) goto err; } kobject_uevent(kobj, KOBJ_ADD); return 0; err: kobject_put(kobj); return error; } static int tx_queue_change_owner(struct net_device *ndev, int index, kuid_t kuid, kgid_t kgid) { struct netdev_queue *queue = ndev->_tx + index; struct kobject *kobj = &queue->kobj; int error; error = sysfs_change_owner(kobj, kuid, kgid); if (error) return error; if (netdev_uses_bql(ndev)) error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); return error; } #endif /* CONFIG_SYSFS */ int netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) { #ifdef CONFIG_SYSFS int i; int error = 0; /* Tx queue kobjects are allowed to be updated when a device is being * unregistered, but solely to remove queues from qdiscs. Any path * adding queues should be fixed. */ WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num, "New queues can't be registered after device unregistration."); for (i = old_num; i < new_num; i++) { error = netdev_queue_add_kobject(dev, i); if (error) { new_num = old_num; break; } } while (--i >= new_num) { struct netdev_queue *queue = dev->_tx + i; if (!refcount_read(&dev_net(dev)->ns.count)) queue->kobj.uevent_suppress = 1; if (netdev_uses_bql(dev)) sysfs_remove_group(&queue->kobj, &dql_group); kobject_put(&queue->kobj); } return error; #else return 0; #endif /* CONFIG_SYSFS */ } static int net_tx_queue_change_owner(struct net_device *dev, int num, kuid_t kuid, kgid_t kgid) { #ifdef CONFIG_SYSFS int error = 0; int i; for (i = 0; i < num; i++) { error = tx_queue_change_owner(dev, i, kuid, kgid); if (error) break; } return error; #else return 0; #endif /* CONFIG_SYSFS */ } static int register_queue_kobjects(struct net_device *dev) { int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; #ifdef CONFIG_SYSFS dev->queues_kset = kset_create_and_add("queues", NULL, &dev->dev.kobj); if (!dev->queues_kset) return -ENOMEM; real_rx = dev->real_num_rx_queues; #endif real_tx = dev->real_num_tx_queues; error = net_rx_queue_update_kobjects(dev, 0, real_rx); if (error) goto error; rxq = real_rx; error = netdev_queue_update_kobjects(dev, 0, real_tx); if (error) goto error; txq = real_tx; return 0; error: netdev_queue_update_kobjects(dev, txq, 0); net_rx_queue_update_kobjects(dev, rxq, 0); #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif return error; } static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) { int error = 0, real_rx = 0, real_tx = 0; #ifdef CONFIG_SYSFS if (ndev->queues_kset) { error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); if (error) return error; } real_rx = ndev->real_num_rx_queues; #endif real_tx = ndev->real_num_tx_queues; error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); if (error) return error; error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); if (error) return error; return 0; } static void remove_queue_kobjects(struct net_device *dev) { int real_rx = 0, real_tx = 0; #ifdef CONFIG_SYSFS real_rx = dev->real_num_rx_queues; #endif real_tx = dev->real_num_tx_queues; net_rx_queue_update_kobjects(dev, real_rx, 0); netdev_queue_update_kobjects(dev, real_tx, 0); dev->real_num_rx_queues = 0; dev->real_num_tx_queues = 0; #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif } static bool net_current_may_mount(void) { struct net *net = current->nsproxy->net_ns; return ns_capable(net->user_ns, CAP_SYS_ADMIN); } static void *net_grab_current_ns(void) { struct net *ns = current->nsproxy->net_ns; #ifdef CONFIG_NET_NS if (ns) refcount_inc(&ns->passive); #endif return ns; } static const void *net_initial_ns(void) { return &init_net; } static const void *net_netlink_ns(struct sock *sk) { return sock_net(sk); } const struct kobj_ns_type_operations net_ns_type_operations = { .type = KOBJ_NS_TYPE_NET, .current_may_mount = net_current_may_mount, .grab_current_ns = net_grab_current_ns, .netlink_ns = net_netlink_ns, .initial_ns = net_initial_ns, .drop_ns = net_drop_ns, }; EXPORT_SYMBOL_GPL(net_ns_type_operations); static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env) { const struct net_device *dev = to_net_dev(d); int retval; /* pass interface to uevent. */ retval = add_uevent_var(env, "INTERFACE=%s", dev->name); if (retval) goto exit; /* pass ifindex to uevent. * ifindex is useful as it won't change (interface name may change) * and is what RtNetlink uses natively. */ retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); exit: return retval; } /* * netdev_release -- destroy and free a dead device. * Called when last reference to device kobject is gone. */ static void netdev_release(struct device *d) { struct net_device *dev = to_net_dev(d); BUG_ON(dev->reg_state != NETREG_RELEASED); /* no need to wait for rcu grace period: * device is dead and about to be freed. */ kfree(rcu_access_pointer(dev->ifalias)); netdev_freemem(dev); } static const void *net_namespace(const struct device *d) { const struct net_device *dev = to_net_dev(d); return dev_net(dev); } static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid) { const struct net_device *dev = to_net_dev(d); const struct net *net = dev_net(dev); net_ns_get_ownership(net, uid, gid); } static const struct class net_class = { .name = "net", .dev_release = netdev_release, .dev_groups = net_class_groups, .dev_uevent = netdev_uevent, .ns_type = &net_ns_type_operations, .namespace = net_namespace, .get_ownership = net_get_ownership, }; #ifdef CONFIG_OF static int of_dev_node_match(struct device *dev, const void *data) { for (; dev; dev = dev->parent) { if (dev->of_node == data) return 1; } return 0; } /* * of_find_net_device_by_node - lookup the net device for the device node * @np: OF device node * * Looks up the net_device structure corresponding with the device node. * If successful, returns a pointer to the net_device with the embedded * struct device refcount incremented by one, or NULL on failure. The * refcount must be dropped when done with the net_device. */ struct net_device *of_find_net_device_by_node(struct device_node *np) { struct device *dev; dev = class_find_device(&net_class, NULL, np, of_dev_node_match); if (!dev) return NULL; return to_net_dev(dev); } EXPORT_SYMBOL(of_find_net_device_by_node); #endif /* Delete sysfs entries but hold kobject reference until after all * netdev references are gone. */ void netdev_unregister_kobject(struct net_device *ndev) { struct device *dev = &ndev->dev; if (!refcount_read(&dev_net(ndev)->ns.count)) dev_set_uevent_suppress(dev, 1); kobject_get(&dev->kobj); remove_queue_kobjects(ndev); pm_runtime_set_memalloc_noio(dev, false); device_del(dev); } /* Create sysfs entries for network device. */ int netdev_register_kobject(struct net_device *ndev) { struct device *dev = &ndev->dev; const struct attribute_group **groups = ndev->sysfs_groups; int error = 0; device_initialize(dev); dev->class = &net_class; dev->platform_data = ndev; dev->groups = groups; dev_set_name(dev, "%s", ndev->name); #ifdef CONFIG_SYSFS /* Allow for a device specific group */ if (*groups) groups++; *groups++ = &netstat_group; if (wireless_group_needed(ndev)) *groups++ = &wireless_group; #endif /* CONFIG_SYSFS */ error = device_add(dev); if (error) return error; error = register_queue_kobjects(ndev); if (error) { device_del(dev); return error; } pm_runtime_set_memalloc_noio(dev, true); return error; } /* Change owner for sysfs entries when moving network devices across network * namespaces owned by different user namespaces. */ int netdev_change_owner(struct net_device *ndev, const struct net *net_old, const struct net *net_new) { kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; struct device *dev = &ndev->dev; int error; net_ns_get_ownership(net_old, &old_uid, &old_gid); net_ns_get_ownership(net_new, &new_uid, &new_gid); /* The network namespace was changed but the owning user namespace is * identical so there's no need to change the owner of sysfs entries. */ if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) return 0; error = device_change_owner(dev, new_uid, new_gid); if (error) return error; error = queue_change_owner(ndev, new_uid, new_gid); if (error) return error; return 0; } int netdev_class_create_file_ns(const struct class_attribute *class_attr, const void *ns) { return class_create_file_ns(&net_class, class_attr, ns); } EXPORT_SYMBOL(netdev_class_create_file_ns); void netdev_class_remove_file_ns(const struct class_attribute *class_attr, const void *ns) { class_remove_file_ns(&net_class, class_attr, ns); } EXPORT_SYMBOL(netdev_class_remove_file_ns); int __init netdev_kobject_init(void) { kobj_ns_type_register(&net_ns_type_operations); return class_register(&net_class); }
38 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _INPUT_COMPAT_H #define _INPUT_COMPAT_H /* * 32bit compatibility wrappers for the input subsystem. * * Very heavily based on evdev.c - Copyright (c) 1999-2002 Vojtech Pavlik */ #include <linux/compiler.h> #include <linux/compat.h> #include <linux/input.h> #ifdef CONFIG_COMPAT struct input_event_compat { compat_ulong_t sec; compat_ulong_t usec; __u16 type; __u16 code; __s32 value; }; struct ff_periodic_effect_compat { __u16 waveform; __u16 period; __s16 magnitude; __s16 offset; __u16 phase; struct ff_envelope envelope; __u32 custom_len; compat_uptr_t custom_data; }; struct ff_effect_compat { __u16 type; __s16 id; __u16 direction; struct ff_trigger trigger; struct ff_replay replay; union { struct ff_constant_effect constant; struct ff_ramp_effect ramp; struct ff_periodic_effect_compat periodic; struct ff_condition_effect condition[2]; /* One for each axis */ struct ff_rumble_effect rumble; } u; }; static inline size_t input_event_size(void) { return (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) ? sizeof(struct input_event_compat) : sizeof(struct input_event); } #else static inline size_t input_event_size(void) { return sizeof(struct input_event); } #endif /* CONFIG_COMPAT */ int input_event_from_user(const char __user *buffer, struct input_event *event); int input_event_to_user(char __user *buffer, const struct input_event *event); int input_ff_effect_from_user(const char __user *buffer, size_t size, struct ff_effect *effect); #endif /* _INPUT_COMPAT_H */
1 1 2 5 5 5 5 5 5 5 5 5 3 5 5 5 1 1 1 1 1 2 2 1 5 5 2 5 5 5 5 5 5 5 3 5 5 5 5 5 5 5 5 5 3 3 5 5 5 5 5 5 5 5 5 5 5 3 5 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for USB Windows Media Center Ed. eHome Infrared Transceivers * * Copyright (c) 2010-2011, Jarod Wilson <jarod@redhat.com> * * Based on the original lirc_mceusb and lirc_mceusb2 drivers, by Dan * Conti, Martin Blatter and Daniel Melander, the latter of which was * in turn also based on the lirc_atiusb driver by Paul Miller. The * two mce drivers were merged into one by Jarod Wilson, with transmit * support for the 1st-gen device added primarily by Patrick Calhoun, * with a bit of tweaks by Jarod. Debugging improvements and proper * support for what appears to be 3rd-gen hardware added by Jarod. * Initial port from lirc driver to ir-core drivery by Jarod, based * partially on a port to an earlier proposed IR infrastructure by * Jon Smirl, which included enhancements and simplifications to the * incoming IR buffer parsing routines. * * Updated in July of 2011 with the aid of Microsoft's official * remote/transceiver requirements and specification document, found at * download.microsoft.com, title * Windows-Media-Center-RC-IR-Collection-Green-Button-Specification-03-08-2011-V2.pdf */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <linux/pm_wakeup.h> #include <media/rc-core.h> #define DRIVER_VERSION "1.95" #define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>" #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \ "device driver" #define DRIVER_NAME "mceusb" #define USB_TX_TIMEOUT 1000 /* in milliseconds */ #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */ #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */ /* MCE constants */ #define MCE_IRBUF_SIZE 128 /* TX IR buffer length */ #define MCE_TIME_UNIT 50 /* Approx 50us resolution */ #define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */ #define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1) /* Actual format is 0x80 + num_bytes */ #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */ #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */ #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */ #define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */ #define MCE_PULSE_MASK 0x7f /* Pulse mask */ #define MCE_MAX_PULSE_LENGTH 0x7f /* Longest transmittable pulse symbol */ /* * The interface between the host and the IR hardware is command-response * based. All commands and responses have a consistent format, where a lead * byte always identifies the type of data following it. The lead byte has * a port value in the 3 highest bits and a length value in the 5 lowest * bits. * * The length field is overloaded, with a value of 11111 indicating that the * following byte is a command or response code, and the length of the entire * message is determined by the code. If the length field is not 11111, then * it specifies the number of bytes of port data that follow. */ #define MCE_CMD 0x1f #define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */ #define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */ #define MCE_PORT_SER 0x6 /* 0xc0 through 0xdf flush & 0x1f bytes */ #define MCE_PORT_MASK 0xe0 /* Mask out command bits */ /* Command port headers */ #define MCE_CMD_PORT_IR 0x9f /* IR-related cmd/rsp */ #define MCE_CMD_PORT_SYS 0xff /* System (non-IR) device cmd/rsp */ /* Commands that set device state (2-4 bytes in length) */ #define MCE_CMD_RESET 0xfe /* Reset device, 2 bytes */ #define MCE_CMD_RESUME 0xaa /* Resume device after error, 2 bytes */ #define MCE_CMD_SETIRCFS 0x06 /* Set tx carrier, 4 bytes */ #define MCE_CMD_SETIRTIMEOUT 0x0c /* Set timeout, 4 bytes */ #define MCE_CMD_SETIRTXPORTS 0x08 /* Set tx ports, 3 bytes */ #define MCE_CMD_SETIRRXPORTEN 0x14 /* Set rx ports, 3 bytes */ #define MCE_CMD_FLASHLED 0x23 /* Flash receiver LED, 2 bytes */ /* Commands that query device state (all 2 bytes, unless noted) */ #define MCE_CMD_GETIRCFS 0x07 /* Get carrier */ #define MCE_CMD_GETIRTIMEOUT 0x0d /* Get timeout */ #define MCE_CMD_GETIRTXPORTS 0x13 /* Get tx ports */ #define MCE_CMD_GETIRRXPORTEN 0x15 /* Get rx ports */ #define MCE_CMD_GETPORTSTATUS 0x11 /* Get tx port status, 3 bytes */ #define MCE_CMD_GETIRNUMPORTS 0x16 /* Get number of ports */ #define MCE_CMD_GETWAKESOURCE 0x17 /* Get wake source */ #define MCE_CMD_GETEMVER 0x22 /* Get emulator interface version */ #define MCE_CMD_GETDEVDETAILS 0x21 /* Get device details (em ver2 only) */ #define MCE_CMD_GETWAKESUPPORT 0x20 /* Get wake details (em ver2 only) */ #define MCE_CMD_GETWAKEVERSION 0x18 /* Get wake pattern (em ver2 only) */ /* Misc commands */ #define MCE_CMD_NOP 0xff /* No operation */ /* Responses to commands (non-error cases) */ #define MCE_RSP_EQIRCFS 0x06 /* tx carrier, 4 bytes */ #define MCE_RSP_EQIRTIMEOUT 0x0c /* rx timeout, 4 bytes */ #define MCE_RSP_GETWAKESOURCE 0x17 /* wake source, 3 bytes */ #define MCE_RSP_EQIRTXPORTS 0x08 /* tx port mask, 3 bytes */ #define MCE_RSP_EQIRRXPORTEN 0x14 /* rx port mask, 3 bytes */ #define MCE_RSP_GETPORTSTATUS 0x11 /* tx port status, 7 bytes */ #define MCE_RSP_EQIRRXCFCNT 0x15 /* rx carrier count, 4 bytes */ #define MCE_RSP_EQIRNUMPORTS 0x16 /* number of ports, 4 bytes */ #define MCE_RSP_EQWAKESUPPORT 0x20 /* wake capabilities, 3 bytes */ #define MCE_RSP_EQWAKEVERSION 0x18 /* wake pattern details, 6 bytes */ #define MCE_RSP_EQDEVDETAILS 0x21 /* device capabilities, 3 bytes */ #define MCE_RSP_EQEMVER 0x22 /* emulator interface ver, 3 bytes */ #define MCE_RSP_FLASHLED 0x23 /* success flashing LED, 2 bytes */ /* Responses to error cases, must send MCE_CMD_RESUME to clear them */ #define MCE_RSP_CMD_ILLEGAL 0xfe /* illegal command for port, 2 bytes */ #define MCE_RSP_TX_TIMEOUT 0x81 /* tx timed out, 2 bytes */ /* Misc commands/responses not defined in the MCE remote/transceiver spec */ #define MCE_CMD_SIG_END 0x01 /* End of signal */ #define MCE_CMD_PING 0x03 /* Ping device */ #define MCE_CMD_UNKNOWN 0x04 /* Unknown */ #define MCE_CMD_UNKNOWN2 0x05 /* Unknown */ #define MCE_CMD_UNKNOWN3 0x09 /* Unknown */ #define MCE_CMD_UNKNOWN4 0x0a /* Unknown */ #define MCE_CMD_G_REVISION 0x0b /* Get hw/sw revision */ #define MCE_CMD_UNKNOWN5 0x0e /* Unknown */ #define MCE_CMD_UNKNOWN6 0x0f /* Unknown */ #define MCE_CMD_UNKNOWN8 0x19 /* Unknown */ #define MCE_CMD_UNKNOWN9 0x1b /* Unknown */ #define MCE_CMD_NULL 0x00 /* These show up various places... */ /* if buf[i] & MCE_PORT_MASK == 0x80 and buf[i] != MCE_CMD_PORT_IR, * then we're looking at a raw IR data sample */ #define MCE_COMMAND_IRDATA 0x80 #define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */ #define VENDOR_PHILIPS 0x0471 #define VENDOR_SMK 0x0609 #define VENDOR_TATUNG 0x1460 #define VENDOR_GATEWAY 0x107b #define VENDOR_SHUTTLE 0x1308 #define VENDOR_SHUTTLE2 0x051c #define VENDOR_MITSUMI 0x03ee #define VENDOR_TOPSEED 0x1784 #define VENDOR_RICAVISION 0x179d #define VENDOR_ITRON 0x195d #define VENDOR_FIC 0x1509 #define VENDOR_LG 0x043e #define VENDOR_MICROSOFT 0x045e #define VENDOR_FORMOSA 0x147a #define VENDOR_FINTEK 0x1934 #define VENDOR_PINNACLE 0x2304 #define VENDOR_ECS 0x1019 #define VENDOR_WISTRON 0x0fb8 #define VENDOR_COMPRO 0x185b #define VENDOR_NORTHSTAR 0x04eb #define VENDOR_REALTEK 0x0bda #define VENDOR_TIVO 0x105a #define VENDOR_CONEXANT 0x0572 #define VENDOR_TWISTEDMELON 0x2596 #define VENDOR_HAUPPAUGE 0x2040 #define VENDOR_PCTV 0x2013 #define VENDOR_ADAPTEC 0x03f3 enum mceusb_model_type { MCE_GEN2 = 0, /* Most boards */ MCE_GEN1, MCE_GEN3, MCE_GEN3_BROKEN_IRTIMEOUT, MCE_GEN2_TX_INV, MCE_GEN2_TX_INV_RX_GOOD, POLARIS_EVK, CX_HYBRID_TV, MULTIFUNCTION, TIVO_KIT, MCE_GEN2_NO_TX, HAUPPAUGE_CX_HYBRID_TV, EVROMEDIA_FULL_HYBRID_FULLHD, ASTROMETA_T2HYBRID, }; struct mceusb_model { u32 mce_gen1:1; u32 mce_gen2:1; u32 mce_gen3:1; u32 tx_mask_normal:1; u32 no_tx:1; u32 broken_irtimeout:1; /* * 2nd IR receiver (short-range, wideband) for learning mode: * 0, absent 2nd receiver (rx2) * 1, rx2 present * 2, rx2 which under counts IR carrier cycles */ u32 rx2; int ir_intfnum; const char *rc_map; /* Allow specify a per-board map */ const char *name; /* per-board name */ }; static const struct mceusb_model mceusb_model[] = { [MCE_GEN1] = { .mce_gen1 = 1, .tx_mask_normal = 1, .rx2 = 2, }, [MCE_GEN2] = { .mce_gen2 = 1, .rx2 = 2, }, [MCE_GEN2_NO_TX] = { .mce_gen2 = 1, .no_tx = 1, }, [MCE_GEN2_TX_INV] = { .mce_gen2 = 1, .tx_mask_normal = 1, .rx2 = 1, }, [MCE_GEN2_TX_INV_RX_GOOD] = { .mce_gen2 = 1, .tx_mask_normal = 1, .rx2 = 2, }, [MCE_GEN3] = { .mce_gen3 = 1, .tx_mask_normal = 1, .rx2 = 2, }, [MCE_GEN3_BROKEN_IRTIMEOUT] = { .mce_gen3 = 1, .tx_mask_normal = 1, .rx2 = 2, .broken_irtimeout = 1 }, [POLARIS_EVK] = { /* * In fact, the EVK is shipped without * remotes, but we should have something handy, * to allow testing it */ .name = "Conexant Hybrid TV (cx231xx) MCE IR", .rx2 = 2, }, [CX_HYBRID_TV] = { .no_tx = 1, /* tx isn't wired up at all */ .name = "Conexant Hybrid TV (cx231xx) MCE IR", }, [HAUPPAUGE_CX_HYBRID_TV] = { .no_tx = 1, /* eeprom says it has no tx */ .name = "Conexant Hybrid TV (cx231xx) MCE IR no TX", }, [MULTIFUNCTION] = { .mce_gen2 = 1, .ir_intfnum = 2, .rx2 = 2, }, [TIVO_KIT] = { .mce_gen2 = 1, .rc_map = RC_MAP_TIVO, .rx2 = 2, }, [EVROMEDIA_FULL_HYBRID_FULLHD] = { .name = "Evromedia USB Full Hybrid Full HD", .no_tx = 1, .rc_map = RC_MAP_MSI_DIGIVOX_III, }, [ASTROMETA_T2HYBRID] = { .name = "Astrometa T2Hybrid", .no_tx = 1, .rc_map = RC_MAP_ASTROMETA_T2HYBRID, } }; static const struct usb_device_id mceusb_dev_table[] = { /* Original Microsoft MCE IR Transceiver (often HP-branded) */ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d), .driver_info = MCE_GEN1 }, /* Philips Infrared Transceiver - Sahara branded */ { USB_DEVICE(VENDOR_PHILIPS, 0x0608) }, /* Philips Infrared Transceiver - HP branded */ { USB_DEVICE(VENDOR_PHILIPS, 0x060c), .driver_info = MCE_GEN2_TX_INV }, /* Philips SRM5100 */ { USB_DEVICE(VENDOR_PHILIPS, 0x060d) }, /* Philips Infrared Transceiver - Omaura */ { USB_DEVICE(VENDOR_PHILIPS, 0x060f) }, /* Philips Infrared Transceiver - Spinel plus */ { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, /* Philips eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, /* Philips/Spinel plus IR transceiver for ASUS */ { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, /* Philips/Spinel plus IR transceiver for ASUS */ { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, /* Philips IR transceiver (Dell branded) */ { USB_DEVICE(VENDOR_PHILIPS, 0x2093), .driver_info = MCE_GEN2_TX_INV }, /* Realtek MCE IR Receiver and card reader */ { USB_DEVICE(VENDOR_REALTEK, 0x0161), .driver_info = MULTIFUNCTION }, /* SMK/Toshiba G83C0004D410 */ { USB_DEVICE(VENDOR_SMK, 0x031d), .driver_info = MCE_GEN2_TX_INV_RX_GOOD }, /* SMK eHome Infrared Transceiver (Sony VAIO) */ { USB_DEVICE(VENDOR_SMK, 0x0322), .driver_info = MCE_GEN2_TX_INV }, /* bundled with Hauppauge PVR-150 */ { USB_DEVICE(VENDOR_SMK, 0x0334), .driver_info = MCE_GEN2_TX_INV }, /* SMK eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_SMK, 0x0338) }, /* SMK/I-O Data GV-MC7/RCKIT Receiver */ { USB_DEVICE(VENDOR_SMK, 0x0353), .driver_info = MCE_GEN2_NO_TX }, /* SMK RXX6000 Infrared Receiver */ { USB_DEVICE(VENDOR_SMK, 0x0357), .driver_info = MCE_GEN2_NO_TX }, /* Tatung eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TATUNG, 0x9150) }, /* Shuttle eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_SHUTTLE, 0xc001) }, /* Shuttle eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_SHUTTLE2, 0xc001) }, /* Gateway eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_GATEWAY, 0x3009) }, /* Mitsumi */ { USB_DEVICE(VENDOR_MITSUMI, 0x2501) }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0001), .driver_info = MCE_GEN2_TX_INV }, /* Topseed HP eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0006), .driver_info = MCE_GEN2_TX_INV }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0007), .driver_info = MCE_GEN2_TX_INV }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0008), .driver_info = MCE_GEN3 }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x000a), .driver_info = MCE_GEN2_TX_INV }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0011), .driver_info = MCE_GEN3_BROKEN_IRTIMEOUT }, /* Ricavision internal Infrared Transceiver */ { USB_DEVICE(VENDOR_RICAVISION, 0x0010) }, /* Itron ione Libra Q-11 */ { USB_DEVICE(VENDOR_ITRON, 0x7002) }, /* FIC eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_FIC, 0x9242) }, /* LG eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_LG, 0x9803) }, /* Microsoft MCE Infrared Transceiver */ { USB_DEVICE(VENDOR_MICROSOFT, 0x00a0) }, /* Formosa eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe015) }, /* Formosa21 / eHome Infrared Receiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe016) }, /* Formosa aim / Trust MCE Infrared Receiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe017), .driver_info = MCE_GEN2_NO_TX }, /* Formosa Industrial Computing / Beanbag Emulation Device */ { USB_DEVICE(VENDOR_FORMOSA, 0xe018) }, /* Formosa21 / eHome Infrared Receiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe03a) }, /* Formosa Industrial Computing AIM IR605/A */ { USB_DEVICE(VENDOR_FORMOSA, 0xe03c) }, /* Formosa Industrial Computing */ { USB_DEVICE(VENDOR_FORMOSA, 0xe03e) }, /* Formosa Industrial Computing */ { USB_DEVICE(VENDOR_FORMOSA, 0xe042) }, /* Fintek eHome Infrared Transceiver (HP branded) */ { USB_DEVICE(VENDOR_FINTEK, 0x5168), .driver_info = MCE_GEN2_TX_INV }, /* Fintek eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_FINTEK, 0x0602) }, /* Fintek eHome Infrared Transceiver (in the AOpen MP45) */ { USB_DEVICE(VENDOR_FINTEK, 0x0702) }, /* Pinnacle Remote Kit */ { USB_DEVICE(VENDOR_PINNACLE, 0x0225), .driver_info = MCE_GEN3 }, /* Elitegroup Computer Systems IR */ { USB_DEVICE(VENDOR_ECS, 0x0f38) }, /* Wistron Corp. eHome Infrared Receiver */ { USB_DEVICE(VENDOR_WISTRON, 0x0002) }, /* Compro K100 */ { USB_DEVICE(VENDOR_COMPRO, 0x3020) }, /* Compro K100 v2 */ { USB_DEVICE(VENDOR_COMPRO, 0x3082) }, /* Northstar Systems, Inc. eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) }, /* TiVo PC IR Receiver */ { USB_DEVICE(VENDOR_TIVO, 0x2000), .driver_info = TIVO_KIT }, /* Conexant Hybrid TV "Shelby" Polaris SDK */ { USB_DEVICE(VENDOR_CONEXANT, 0x58a1), .driver_info = POLARIS_EVK }, /* Conexant Hybrid TV RDU253S Polaris */ { USB_DEVICE(VENDOR_CONEXANT, 0x58a5), .driver_info = CX_HYBRID_TV }, /* Twisted Melon Inc. - Manta Mini Receiver */ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8008) }, /* Twisted Melon Inc. - Manta Pico Receiver */ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) }, /* Twisted Melon Inc. - Manta Transceiver */ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) }, /* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb131), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb138), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb139), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Hauppauge WinTV-HVR-935C - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb151), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Hauppauge WinTV-HVR-955Q - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb123), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Hauppauge WinTV-HVR-975 - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb150), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_PCTV, 0x0259), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_PCTV, 0x025e), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Adaptec / HP eHome Receiver */ { USB_DEVICE(VENDOR_ADAPTEC, 0x0094) }, /* Evromedia USB Full Hybrid Full HD */ { USB_DEVICE(0x1b80, 0xd3b2), .driver_info = EVROMEDIA_FULL_HYBRID_FULLHD }, /* Astrometa T2hybrid */ { USB_DEVICE(0x15f4, 0x0135), .driver_info = ASTROMETA_T2HYBRID }, /* Terminating entry */ { } }; /* data structure for each usb transceiver */ struct mceusb_dev { /* ir-core bits */ struct rc_dev *rc; /* optional features we can enable */ bool carrier_report_enabled; bool wideband_rx_enabled; /* aka learning mode, short-range rx */ /* core device bits */ struct device *dev; /* usb */ struct usb_device *usbdev; struct usb_interface *usbintf; struct urb *urb_in; unsigned int pipe_in; struct usb_endpoint_descriptor *usb_ep_out; unsigned int pipe_out; /* buffers and dma */ unsigned char *buf_in; unsigned int len_in; dma_addr_t dma_in; enum { CMD_HEADER = 0, SUBCMD, CMD_DATA, PARSE_IRDATA, } parser_state; u8 cmd, rem; /* Remaining IR data bytes in packet */ struct { u32 connected:1; u32 tx_mask_normal:1; u32 microsoft_gen1:1; u32 no_tx:1; u32 rx2; } flags; /* transmit support */ u32 carrier; unsigned char tx_mask; char name[128]; char phys[64]; enum mceusb_model_type model; bool need_reset; /* flag to issue a device resume cmd */ u8 emver; /* emulator interface version */ u8 num_txports; /* number of transmit ports */ u8 num_rxports; /* number of receive sensors */ u8 txports_cabled; /* bitmask of transmitters with cable */ u8 rxports_active; /* bitmask of active receive sensors */ bool learning_active; /* wideband rx is active */ /* receiver carrier frequency detection support */ u32 pulse_tunit; /* IR pulse "on" cumulative time units */ u32 pulse_count; /* pulse "on" count in measurement interval */ /* * support for async error handler mceusb_deferred_kevent() * where usb_clear_halt(), usb_reset_configuration(), * usb_reset_device(), etc. must be done in process context */ struct work_struct kevent; unsigned long kevent_flags; # define EVENT_TX_HALT 0 # define EVENT_RX_HALT 1 # define EVENT_RST_PEND 31 }; /* MCE Device Command Strings, generally a port and command pair */ static char DEVICE_RESUME[] = {MCE_CMD_NULL, MCE_CMD_PORT_SYS, MCE_CMD_RESUME}; static char GET_REVISION[] = {MCE_CMD_PORT_SYS, MCE_CMD_G_REVISION}; static char GET_EMVER[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETEMVER}; static char GET_WAKEVERSION[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETWAKEVERSION}; static char FLASH_LED[] = {MCE_CMD_PORT_SYS, MCE_CMD_FLASHLED}; static char GET_UNKNOWN2[] = {MCE_CMD_PORT_IR, MCE_CMD_UNKNOWN2}; static char GET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRCFS}; static char GET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTIMEOUT}; static char GET_NUM_PORTS[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRNUMPORTS}; static char GET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTXPORTS}; static char GET_RX_SENSOR[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRRXPORTEN}; /* sub in desired values in lower byte or bytes for full command */ /* FIXME: make use of these for transmit. static char SET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRCFS, 0x00, 0x00}; static char SET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00}; static char SET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRTIMEOUT, 0x00, 0x00}; static char SET_RX_SENSOR[] = {MCE_CMD_PORT_IR, MCE_RSP_EQIRRXPORTEN, 0x00}; */ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd) { int datasize = 0; switch (cmd) { case MCE_CMD_NULL: if (subcmd == MCE_CMD_PORT_SYS) datasize = 1; break; case MCE_CMD_PORT_SYS: switch (subcmd) { case MCE_RSP_GETPORTSTATUS: datasize = 5; break; case MCE_RSP_EQWAKEVERSION: datasize = 4; break; case MCE_CMD_G_REVISION: datasize = 4; break; case MCE_RSP_EQWAKESUPPORT: case MCE_RSP_GETWAKESOURCE: case MCE_RSP_EQDEVDETAILS: case MCE_RSP_EQEMVER: datasize = 1; break; } break; case MCE_CMD_PORT_IR: switch (subcmd) { case MCE_CMD_UNKNOWN: case MCE_RSP_EQIRCFS: case MCE_RSP_EQIRTIMEOUT: case MCE_RSP_EQIRRXCFCNT: case MCE_RSP_EQIRNUMPORTS: datasize = 2; break; case MCE_CMD_SIG_END: case MCE_RSP_EQIRTXPORTS: case MCE_RSP_EQIRRXPORTEN: datasize = 1; break; } } return datasize; } static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, int offset, int len, bool out) { #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) char *inout; u8 cmd, subcmd, *data; struct device *dev = ir->dev; u32 carrier, period; if (offset < 0 || offset >= buf_len) return; dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)", (out ? 't' : 'r'), offset, min(len, buf_len - offset), buf + offset, len, buf_len); inout = out ? "Request" : "Got"; cmd = buf[offset]; subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0; data = &buf[offset] + 2; /* Trace meaningless 0xb1 0x60 header bytes on original receiver */ if (ir->flags.microsoft_gen1 && !out && !offset) { dev_dbg(dev, "MCE gen 1 header"); return; } /* Trace IR data header or trailer */ if (cmd != MCE_CMD_PORT_IR && (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) { if (cmd == MCE_IRDATA_TRAILER) dev_dbg(dev, "End of raw IR data"); else dev_dbg(dev, "Raw IR data, %d pulse/space samples", cmd & MCE_PACKET_LENGTH_MASK); return; } /* Unexpected end of buffer? */ if (offset + len > buf_len) return; /* Decode MCE command/response */ switch (cmd) { case MCE_CMD_NULL: if (subcmd == MCE_CMD_NULL) break; if ((subcmd == MCE_CMD_PORT_SYS) && (data[0] == MCE_CMD_RESUME)) dev_dbg(dev, "Device resume requested"); else dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); break; case MCE_CMD_PORT_SYS: switch (subcmd) { case MCE_RSP_EQEMVER: if (!out) dev_dbg(dev, "Emulator interface version %x", data[0]); break; case MCE_CMD_G_REVISION: if (len == 2) dev_dbg(dev, "Get hw/sw rev?"); else dev_dbg(dev, "hw/sw rev %*ph", 4, &buf[offset + 2]); break; case MCE_CMD_RESUME: dev_dbg(dev, "Device resume requested"); break; case MCE_RSP_CMD_ILLEGAL: dev_dbg(dev, "Illegal PORT_SYS command"); break; case MCE_RSP_EQWAKEVERSION: if (!out) dev_dbg(dev, "Wake version, proto: 0x%02x, payload: 0x%02x, address: 0x%02x, version: 0x%02x", data[0], data[1], data[2], data[3]); break; case MCE_RSP_GETPORTSTATUS: if (!out) /* We use data1 + 1 here, to match hw labels */ dev_dbg(dev, "TX port %d: blaster is%s connected", data[0] + 1, data[3] ? " not" : ""); break; case MCE_CMD_FLASHLED: dev_dbg(dev, "Attempting to flash LED"); break; default: dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); break; } break; case MCE_CMD_PORT_IR: switch (subcmd) { case MCE_CMD_SIG_END: dev_dbg(dev, "End of signal"); break; case MCE_CMD_PING: dev_dbg(dev, "Ping"); break; case MCE_CMD_UNKNOWN: dev_dbg(dev, "Resp to 9f 05 of 0x%02x 0x%02x", data[0], data[1]); break; case MCE_RSP_EQIRCFS: if (!data[0] && !data[1]) { dev_dbg(dev, "%s: no carrier", inout); break; } // prescaler should make sense if (data[0] > 8) break; period = DIV_ROUND_CLOSEST((1U << data[0] * 2) * (data[1] + 1), 10); if (!period) break; carrier = USEC_PER_SEC / period; dev_dbg(dev, "%s carrier of %u Hz (period %uus)", inout, carrier, period); break; case MCE_CMD_GETIRCFS: dev_dbg(dev, "Get carrier mode and freq"); break; case MCE_RSP_EQIRTXPORTS: dev_dbg(dev, "%s transmit blaster mask of 0x%02x", inout, data[0]); break; case MCE_RSP_EQIRTIMEOUT: /* value is in units of 50us, so x*50/1000 ms */ period = ((data[0] << 8) | data[1]) * MCE_TIME_UNIT / 1000; dev_dbg(dev, "%s receive timeout of %d ms", inout, period); break; case MCE_CMD_GETIRTIMEOUT: dev_dbg(dev, "Get receive timeout"); break; case MCE_CMD_GETIRTXPORTS: dev_dbg(dev, "Get transmit blaster mask"); break; case MCE_RSP_EQIRRXPORTEN: dev_dbg(dev, "%s %s-range receive sensor in use", inout, data[0] == 0x02 ? "short" : "long"); break; case MCE_CMD_GETIRRXPORTEN: /* aka MCE_RSP_EQIRRXCFCNT */ if (out) dev_dbg(dev, "Get receive sensor"); else dev_dbg(dev, "RX carrier cycle count: %d", ((data[0] << 8) | data[1])); break; case MCE_RSP_EQIRNUMPORTS: if (out) break; dev_dbg(dev, "Num TX ports: %x, num RX ports: %x", data[0], data[1]); break; case MCE_RSP_CMD_ILLEGAL: dev_dbg(dev, "Illegal PORT_IR command"); break; case MCE_RSP_TX_TIMEOUT: dev_dbg(dev, "IR TX timeout (TX buffer underrun)"); break; default: dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); break; } break; default: break; } #endif } /* * Schedule work that can't be done in interrupt handlers * (mceusb_dev_recv() and mce_write_callback()) nor tasklets. * Invokes mceusb_deferred_kevent() for recovering from * error events specified by the kevent bit field. */ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent) { set_bit(kevent, &ir->kevent_flags); if (test_bit(EVENT_RST_PEND, &ir->kevent_flags)) { dev_dbg(ir->dev, "kevent %d dropped pending USB Reset Device", kevent); return; } if (!schedule_work(&ir->kevent)) dev_dbg(ir->dev, "kevent %d already scheduled", kevent); else dev_dbg(ir->dev, "kevent %d scheduled", kevent); } static void mce_write_callback(struct urb *urb) { if (!urb) return; complete(urb->context); } /* * Write (TX/send) data to MCE device USB endpoint out. * Used for IR blaster TX and MCE device commands. * * Return: The number of bytes written (> 0) or errno (< 0). */ static int mce_write(struct mceusb_dev *ir, u8 *data, int size) { int ret; struct urb *urb; struct device *dev = ir->dev; unsigned char *buf_out; struct completion tx_done; unsigned long expire; unsigned long ret_wait; mceusb_dev_printdata(ir, data, size, 0, size, true); urb = usb_alloc_urb(0, GFP_KERNEL); if (unlikely(!urb)) { dev_err(dev, "Error: mce write couldn't allocate urb"); return -ENOMEM; } buf_out = kmalloc(size, GFP_KERNEL); if (!buf_out) { usb_free_urb(urb); return -ENOMEM; } init_completion(&tx_done); /* outbound data */ if (usb_endpoint_xfer_int(ir->usb_ep_out)) usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out, buf_out, size, mce_write_callback, &tx_done, ir->usb_ep_out->bInterval); else usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out, buf_out, size, mce_write_callback, &tx_done); memcpy(buf_out, data, size); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(dev, "Error: mce write submit urb error = %d", ret); kfree(buf_out); usb_free_urb(urb); return ret; } expire = msecs_to_jiffies(USB_TX_TIMEOUT); ret_wait = wait_for_completion_timeout(&tx_done, expire); if (!ret_wait) { dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))", expire, USB_TX_TIMEOUT); usb_kill_urb(urb); ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status); } else { ret = urb->status; } if (ret >= 0) ret = urb->actual_length; /* bytes written */ switch (urb->status) { /* success */ case 0: break; case -ECONNRESET: case -ENOENT: case -EILSEQ: case -ESHUTDOWN: break; case -EPIPE: dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)", urb->status); mceusb_defer_kevent(ir, EVENT_TX_HALT); break; default: dev_err(ir->dev, "Error: mce write urb status = %d", urb->status); break; } dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)", ret, ret_wait, expire, USB_TX_TIMEOUT, urb->actual_length, urb->status); kfree(buf_out); usb_free_urb(urb); return ret; } static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size) { int rsize = sizeof(DEVICE_RESUME); if (ir->need_reset) { ir->need_reset = false; mce_write(ir, DEVICE_RESUME, rsize); msleep(10); } mce_write(ir, data, size); msleep(10); } /* * Transmit IR out the MCE device IR blaster port(s). * * Convert IR pulse/space sequence from LIRC to MCE format. * Break up a long IR sequence into multiple parts (MCE IR data packets). * * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec. * Pulses and spaces are implicit by their position. * The first IR sample, txbuf[0], is always a pulse. * * u8 irbuf[] consists of multiple IR data packets for the MCE device. * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples. * An IR sample is 1-bit pulse/space flag with 7-bit time * in MCE time units (50usec). * * Return: The number of IR samples sent (> 0) or errno (< 0). */ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count) { struct mceusb_dev *ir = dev->priv; u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 }; u8 irbuf[MCE_IRBUF_SIZE]; int ircount = 0; unsigned int irsample; int i, length, ret; /* Send the set TX ports command */ cmdbuf[2] = ir->tx_mask; mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* Generate mce IR data packet */ for (i = 0; i < count; i++) { irsample = txbuf[i] / MCE_TIME_UNIT; /* loop to support long pulses/spaces > 6350us (127*50us) */ while (irsample > 0) { /* Insert IR header every 30th entry */ if (ircount % MCE_PACKET_SIZE == 0) { /* Room for IR header and one IR sample? */ if (ircount >= MCE_IRBUF_SIZE - 1) { /* Send near full buffer */ ret = mce_write(ir, irbuf, ircount); if (ret < 0) return ret; ircount = 0; } irbuf[ircount++] = MCE_IRDATA_HEADER; } /* Insert IR sample */ if (irsample <= MCE_MAX_PULSE_LENGTH) { irbuf[ircount] = irsample; irsample = 0; } else { irbuf[ircount] = MCE_MAX_PULSE_LENGTH; irsample -= MCE_MAX_PULSE_LENGTH; } /* * Even i = IR pulse * Odd i = IR space */ irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT); ircount++; /* IR buffer full? */ if (ircount >= MCE_IRBUF_SIZE) { /* Fix packet length in last header */ length = ircount % MCE_PACKET_SIZE; if (length > 0) irbuf[ircount - length] -= MCE_PACKET_SIZE - length; /* Send full buffer */ ret = mce_write(ir, irbuf, ircount); if (ret < 0) return ret; ircount = 0; } } } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */ /* Fix packet length in last header */ length = ircount % MCE_PACKET_SIZE; if (length > 0) irbuf[ircount - length] -= MCE_PACKET_SIZE - length; /* Append IR trailer (0x80) to final partial (or empty) IR buffer */ irbuf[ircount++] = MCE_IRDATA_TRAILER; /* Send final buffer */ ret = mce_write(ir, irbuf, ircount); if (ret < 0) return ret; return count; } /* Sets active IR outputs -- mce devices typically have two */ static int mceusb_set_tx_mask(struct rc_dev *dev, u32 mask) { struct mceusb_dev *ir = dev->priv; /* return number of transmitters */ int emitters = ir->num_txports ? ir->num_txports : 2; if (mask >= (1 << emitters)) return emitters; if (ir->flags.tx_mask_normal) ir->tx_mask = mask; else ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ? mask ^ MCE_DEFAULT_TX_MASK : mask) << 1; return 0; } /* Sets the send carrier frequency and mode */ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier) { struct mceusb_dev *ir = dev->priv; int clk = 10000000; int prescaler = 0, divisor = 0; unsigned char cmdbuf[4] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRCFS, 0x00, 0x00 }; /* Carrier has changed */ if (ir->carrier != carrier) { if (carrier == 0) { ir->carrier = carrier; cmdbuf[2] = MCE_CMD_SIG_END; cmdbuf[3] = MCE_IRDATA_TRAILER; dev_dbg(ir->dev, "disabling carrier modulation"); mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } for (prescaler = 0; prescaler < 4; ++prescaler) { divisor = (clk >> (2 * prescaler)) / carrier; if (divisor <= 0xff) { ir->carrier = carrier; cmdbuf[2] = prescaler; cmdbuf[3] = divisor; dev_dbg(ir->dev, "requesting %u HZ carrier", carrier); /* Transmit new carrier to mce device */ mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } } return -EINVAL; } return 0; } static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) { u8 cmdbuf[4] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTIMEOUT, 0, 0 }; struct mceusb_dev *ir = dev->priv; unsigned int units; units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT); cmdbuf[2] = units >> 8; cmdbuf[3] = units; mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* get receiver timeout value */ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); return 0; } /* * Select or deselect the 2nd receiver port. * Second receiver is learning mode, wide-band, short-range receiver. * Only one receiver (long or short range) may be active at a time. */ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable) { struct mceusb_dev *ir = dev->priv; unsigned char cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRRXPORTEN, 0x00 }; dev_dbg(ir->dev, "select %s-range receive sensor", enable ? "short" : "long"); if (enable) { ir->wideband_rx_enabled = true; cmdbuf[2] = 2; /* port 2 is short range receiver */ } else { ir->wideband_rx_enabled = false; cmdbuf[2] = 1; /* port 1 is long range receiver */ } mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* response from device sets ir->learning_active */ return 0; } /* * Enable/disable receiver carrier frequency pass through reporting. * Only the short-range receiver has carrier frequency measuring capability. * Implicitly select this receiver when enabling carrier frequency reporting. */ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) { struct mceusb_dev *ir = dev->priv; unsigned char cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRRXPORTEN, 0x00 }; dev_dbg(ir->dev, "%s short-range receiver carrier reporting", enable ? "enable" : "disable"); if (enable) { ir->carrier_report_enabled = true; if (!ir->learning_active) { cmdbuf[2] = 2; /* port 2 is short range receiver */ mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } else { ir->carrier_report_enabled = false; /* * Revert to normal (long-range) receiver only if the * wideband (short-range) receiver wasn't explicitly * enabled. */ if (ir->learning_active && !ir->wideband_rx_enabled) { cmdbuf[2] = 1; /* port 1 is long range receiver */ mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } return 0; } /* * Handle PORT_SYS/IR command response received from the MCE device. * * Assumes single response with all its data (not truncated) * in buf_in[]. The response itself determines its total length * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[]. * * We don't do anything but print debug spew for many of the command bits * we receive from the hardware, but some of them are useful information * we want to store so that we can use them. */ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in) { u8 cmd = buf_in[0]; u8 subcmd = buf_in[1]; u8 *hi = &buf_in[2]; /* read only when required */ u8 *lo = &buf_in[3]; /* read only when required */ struct ir_raw_event rawir = {}; u32 carrier_cycles; u32 cycles_fix; if (cmd == MCE_CMD_PORT_SYS) { switch (subcmd) { /* the one and only 5-byte return value command */ case MCE_RSP_GETPORTSTATUS: if (buf_in[5] == 0 && *hi < 8) ir->txports_cabled |= 1 << *hi; break; /* 1-byte return value commands */ case MCE_RSP_EQEMVER: ir->emver = *hi; break; /* No return value commands */ case MCE_RSP_CMD_ILLEGAL: ir->need_reset = true; break; default: break; } return; } if (cmd != MCE_CMD_PORT_IR) return; switch (subcmd) { /* 2-byte return value commands */ case MCE_RSP_EQIRTIMEOUT: ir->rc->timeout = (*hi << 8 | *lo) * MCE_TIME_UNIT; break; case MCE_RSP_EQIRNUMPORTS: ir->num_txports = *hi; ir->num_rxports = *lo; break; case MCE_RSP_EQIRRXCFCNT: /* * The carrier cycle counter can overflow and wrap around * without notice from the device. So frequency measurement * will be inaccurate with long duration IR. * * The long-range (non learning) receiver always reports * zero count so we always ignore its report. */ if (ir->carrier_report_enabled && ir->learning_active && ir->pulse_tunit > 0) { carrier_cycles = (*hi << 8 | *lo); /* * Adjust carrier cycle count by adding * 1 missed count per pulse "on" */ cycles_fix = ir->flags.rx2 == 2 ? ir->pulse_count : 0; rawir.carrier_report = 1; rawir.carrier = (1000000u / MCE_TIME_UNIT) * (carrier_cycles + cycles_fix) / ir->pulse_tunit; dev_dbg(ir->dev, "RX carrier frequency %u Hz (pulse count = %u, cycles = %u, duration = %u, rx2 = %u)", rawir.carrier, ir->pulse_count, carrier_cycles, ir->pulse_tunit, ir->flags.rx2); ir_raw_event_store(ir->rc, &rawir); } break; /* 1-byte return value commands */ case MCE_RSP_EQIRTXPORTS: ir->tx_mask = *hi; break; case MCE_RSP_EQIRRXPORTEN: ir->learning_active = ((*hi & 0x02) == 0x02); if (ir->rxports_active != *hi) { dev_info(ir->dev, "%s-range (0x%x) receiver active", ir->learning_active ? "short" : "long", *hi); ir->rxports_active = *hi; } break; /* No return value commands */ case MCE_RSP_CMD_ILLEGAL: case MCE_RSP_TX_TIMEOUT: ir->need_reset = true; break; default: break; } } static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) { struct ir_raw_event rawir = {}; bool event = false; int i = 0; /* skip meaningless 0xb1 0x60 header bytes on orig receiver */ if (ir->flags.microsoft_gen1) i = 2; /* if there's no data, just return now */ if (buf_len <= i) return; for (; i < buf_len; i++) { switch (ir->parser_state) { case SUBCMD: ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]); mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1, ir->rem + 2, false); if (i + ir->rem < buf_len) mceusb_handle_command(ir, &ir->buf_in[i - 1]); ir->parser_state = CMD_DATA; break; case PARSE_IRDATA: ir->rem--; rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0); rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK); if (unlikely(!rawir.duration)) { dev_dbg(ir->dev, "nonsensical irdata %02x with duration 0", ir->buf_in[i]); break; } if (rawir.pulse) { ir->pulse_tunit += rawir.duration; ir->pulse_count++; } rawir.duration *= MCE_TIME_UNIT; dev_dbg(ir->dev, "Storing %s %u us (%02x)", rawir.pulse ? "pulse" : "space", rawir.duration, ir->buf_in[i]); if (ir_raw_event_store_with_filter(ir->rc, &rawir)) event = true; break; case CMD_DATA: ir->rem--; break; case CMD_HEADER: ir->cmd = ir->buf_in[i]; if ((ir->cmd == MCE_CMD_PORT_IR) || ((ir->cmd & MCE_PORT_MASK) != MCE_COMMAND_IRDATA)) { /* * got PORT_SYS, PORT_IR, or unknown * command response prefix */ ir->parser_state = SUBCMD; continue; } /* * got IR data prefix (0x80 + num_bytes) * decode MCE packets of the form {0x83, AA, BB, CC} * IR data packets can span USB messages */ ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK); mceusb_dev_printdata(ir, ir->buf_in, buf_len, i, ir->rem + 1, false); if (ir->rem) { ir->parser_state = PARSE_IRDATA; } else { struct ir_raw_event ev = { .timeout = 1, .duration = ir->rc->timeout }; if (ir_raw_event_store_with_filter(ir->rc, &ev)) event = true; ir->pulse_tunit = 0; ir->pulse_count = 0;