Total coverage: 235570 (12%)of 2040239
3 3 3 2039 3 3 20 27 14 1 1 1 1 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_X86_APIC_H #define _ASM_X86_APIC_H #include <linux/cpumask.h> #include <linux/static_call.h> #include <asm/alternative.h> #include <asm/cpufeature.h> #include <asm/apicdef.h> #include <linux/atomic.h> #include <asm/fixmap.h> #include <asm/mpspec.h> #include <asm/msr.h> #include <asm/hardirq.h> #include <asm/io.h> #include <asm/posted_intr.h> #define ARCH_APICTIMER_STOPS_ON_C3 1 /* Macros for apic_extnmi which controls external NMI masking */ #define APIC_EXTNMI_BSP 0 /* Default */ #define APIC_EXTNMI_ALL 1 #define APIC_EXTNMI_NONE 2 /* * Debugging macros */ #define APIC_QUIET 0 #define APIC_VERBOSE 1 #define APIC_DEBUG 2 /* * Define the default level of output to be very little This can be turned * up by using apic=verbose for more information and apic=debug for _lots_ * of information. apic_verbosity is defined in apic.c */ #define apic_printk(v, s, a...) \ do { \ if ((v) <= apic_verbosity) \ printk(s, ##a); \ } while (0) #define apic_pr_verbose(s, a...) apic_printk(APIC_VERBOSE, KERN_INFO s, ##a) #define apic_pr_debug(s, a...) apic_printk(APIC_DEBUG, KERN_DEBUG s, ##a) #define apic_pr_debug_cont(s, a...) apic_printk(APIC_DEBUG, KERN_CONT s, ##a) /* Unconditional debug prints for code which is guarded by apic_verbosity already */ #define apic_dbg(s, a...) printk(KERN_DEBUG s, ##a) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) extern void x86_32_probe_apic(void); #else static inline void x86_32_probe_apic(void) { } #endif extern u32 cpuid_to_apicid[]; #define CPU_ACPIID_INVALID U32_MAX #ifdef CONFIG_X86_LOCAL_APIC extern int apic_verbosity; extern int local_apic_timer_c2_ok; extern bool apic_is_disabled; extern unsigned int lapic_timer_period; extern enum apic_intr_mode_id apic_intr_mode; enum apic_intr_mode_id { APIC_PIC, APIC_VIRTUAL_WIRE, APIC_VIRTUAL_WIRE_NO_CONFIG, APIC_SYMMETRIC_IO, APIC_SYMMETRIC_IO_NO_ROUTING }; /* * With 82489DX we can't rely on apic feature bit * retrieved via cpuid but still have to deal with * such an apic chip so we assume that SMP configuration * is found from MP table (64bit case uses ACPI mostly * which set smp presence flag as well so we are safe * to use this helper too). */ static inline bool apic_from_smp_config(void) { return smp_found_config && !apic_is_disabled; } /* * Basic functions accessing APICs. */ #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #endif static inline void native_apic_mem_write(u32 reg, u32 v) { volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP, ASM_OUTPUT("=r" (v), "=m" (*addr)), ASM_INPUT("0" (v), "m" (*addr))); } static inline u32 native_apic_mem_read(u32 reg) { return readl((void __iomem *)(APIC_BASE + reg)); } static inline void native_apic_mem_eoi(void) { native_apic_mem_write(APIC_EOI, APIC_EOI_ACK); } extern void native_apic_icr_write(u32 low, u32 id); extern u64 native_apic_icr_read(void); static inline bool apic_is_x2apic_enabled(void) { u64 msr; if (rdmsrq_safe(MSR_IA32_APICBASE, &msr)) return false; return msr & X2APIC_ENABLE; } extern void enable_IR_x2apic(void); extern int lapic_get_maxlvt(void); extern void clear_local_APIC(void); extern void disconnect_bsp_APIC(int virt_wire_setup); extern void disable_local_APIC(void); extern void apic_soft_disable(void); extern void lapic_shutdown(void); extern void sync_Arb_IDs(void); extern void init_bsp_APIC(void); extern void apic_intr_mode_select(void); extern void apic_intr_mode_init(void); extern void init_apic_mappings(void); void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); extern void setup_secondary_APIC_clock(void); extern void lapic_update_tsc_freq(void); #ifdef CONFIG_X86_64 static inline bool apic_force_enable(unsigned long addr) { return false; } #else extern bool apic_force_enable(unsigned long addr); #endif extern void apic_ap_setup(void); /* * On 32bit this is mach-xxx local */ #ifdef CONFIG_X86_64 extern int apic_is_clustered_box(void); #else static inline int apic_is_clustered_box(void) { return 0; } #endif extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); extern void lapic_assign_system_vectors(void); extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace); extern void lapic_update_legacy_vectors(void); extern void lapic_online(void); extern void lapic_offline(void); extern bool apic_needs_pit(void); extern void apic_send_IPI_allbutself(unsigned int vector); extern void topology_register_apic(u32 apic_id, u32 acpi_id, bool present); extern void topology_register_boot_apic(u32 apic_id); extern int topology_hotplug_apic(u32 apic_id, u32 acpi_id); extern void topology_hotunplug_apic(unsigned int cpu); extern void topology_apply_cmdline_limits_early(void); extern void topology_init_possible_cpus(void); extern void topology_reset_possible_cpus_up(void); #else /* !CONFIG_X86_LOCAL_APIC */ static inline void lapic_shutdown(void) { } #define local_apic_timer_c2_ok 1 static inline void init_apic_mappings(void) { } static inline void disable_local_APIC(void) { } # define setup_boot_APIC_clock x86_init_noop # define setup_secondary_APIC_clock x86_init_noop static inline void lapic_update_tsc_freq(void) { } static inline void init_bsp_APIC(void) { } static inline void apic_intr_mode_select(void) { } static inline void apic_intr_mode_init(void) { } static inline void lapic_assign_system_vectors(void) { } static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { } static inline bool apic_needs_pit(void) { return true; } static inline void topology_apply_cmdline_limits_early(void) { } static inline void topology_init_possible_cpus(void) { } #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_X2APIC static inline void native_apic_msr_write(u32 reg, u32 v) { if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || reg == APIC_LVR) return; wrmsrq(APIC_BASE_MSR + (reg >> 4), v); } static inline void native_apic_msr_eoi(void) { native_wrmsrq(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK); } static inline u32 native_apic_msr_read(u32 reg) { u64 msr; if (reg == APIC_DFR) return -1; rdmsrq(APIC_BASE_MSR + (reg >> 4), msr); return (u32)msr; } static inline void native_x2apic_icr_write(u32 low, u32 id) { wrmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); } static inline u64 native_x2apic_icr_read(void) { unsigned long val; rdmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), val); return val; } extern int x2apic_mode; extern int x2apic_phys; extern void __init x2apic_set_max_apicid(u32 apicid); extern void x2apic_setup(void); static inline int x2apic_enabled(void) { return boot_cpu_has(X86_FEATURE_X2APIC) && apic_is_x2apic_enabled(); } #define x2apic_supported() (boot_cpu_has(X86_FEATURE_X2APIC)) #else /* !CONFIG_X86_X2APIC */ static inline void x2apic_setup(void) { } static inline int x2apic_enabled(void) { return 0; } static inline u32 native_apic_msr_read(u32 reg) { BUG(); } #define x2apic_mode (0) #define x2apic_supported() (0) #endif /* !CONFIG_X86_X2APIC */ extern void __init check_x2apic(void); struct irq_data; /* * Copyright 2004 James Cleverdon, IBM. * * Generic APIC sub-arch data struct. * * Hacked for x86-64 by James Cleverdon from i386 architecture code by * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ struct apic { /* Hotpath functions first */ void (*eoi)(void); void (*native_eoi)(void); void (*write)(u32 reg, u32 v); u32 (*read)(u32 reg); /* IPI related functions */ void (*wait_icr_idle)(void); u32 (*safe_wait_icr_idle)(void); void (*send_IPI)(int cpu, int vector); void (*send_IPI_mask)(const struct cpumask *mask, int vector); void (*send_IPI_mask_allbutself)(const struct cpumask *msk, int vec); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); void (*send_IPI_self)(int vector); u32 disable_esr : 1, dest_mode_logical : 1, x2apic_set_max_apicid : 1, nmi_to_offline_cpu : 1; u32 (*calc_dest_apicid)(unsigned int cpu); /* ICR related functions */ u64 (*icr_read)(void); void (*icr_write)(u32 low, u32 high); /* The limit of the APIC ID space. */ u32 max_apic_id; /* Probe, setup and smpboot functions */ int (*probe)(void); void (*setup)(void); void (*teardown)(void); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); void (*init_apic_ldr)(void); u32 (*cpu_present_to_apicid)(int mps_cpu); u32 (*get_apic_id)(u32 id); /* wakeup_secondary_cpu */ int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip, unsigned int cpu); /* wakeup secondary CPU using 64-bit wakeup point */ int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip, unsigned int cpu); void (*update_vector)(unsigned int cpu, unsigned int vector, bool set); char *name; }; struct apic_override { void (*eoi)(void); void (*native_eoi)(void); void (*write)(u32 reg, u32 v); u32 (*read)(u32 reg); void (*send_IPI)(int cpu, int vector); void (*send_IPI_mask)(const struct cpumask *mask, int vector); void (*send_IPI_mask_allbutself)(const struct cpumask *msk, int vec); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); void (*send_IPI_self)(int vector); u64 (*icr_read)(void); void (*icr_write)(u32 low, u32 high); int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip, unsigned int cpu); int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip, unsigned int cpu); }; /* * Pointer to the local APIC driver in use on this system (there's * always just one such driver in use - the kernel decides via an * early probing process which one it picks - and then sticks to it): */ extern struct apic *apic; /* * APIC drivers are probed based on how they are listed in the .apicdrivers * section. So the order is important and enforced by the ordering * of different apic driver files in the Makefile. */ #define apic_driver(sym) \ static const struct apic *__apicdrivers_##sym __used \ __aligned(sizeof(struct apic *)) \ __section(".apicdrivers") = { &sym } extern struct apic *__apicdrivers[], *__apicdrivers_end[]; /* * APIC functionality to boot other CPUs - only used on SMP: */ #ifdef CONFIG_SMP extern int lapic_can_unplug_cpu(void); #endif #ifdef CONFIG_X86_LOCAL_APIC extern struct apic_override __x86_apic_override; void __init apic_setup_apic_calls(void); void __init apic_install_driver(struct apic *driver); #define apic_update_callback(_callback, _fn) { \ __x86_apic_override._callback = _fn; \ apic->_callback = _fn; \ static_call_update(apic_call_##_callback, _fn); \ pr_info("APIC: %s() replaced with %ps()\n", #_callback, _fn); \ } #define DECLARE_APIC_CALL(__cb) \ DECLARE_STATIC_CALL(apic_call_##__cb, *apic->__cb) DECLARE_APIC_CALL(eoi); DECLARE_APIC_CALL(native_eoi); DECLARE_APIC_CALL(icr_read); DECLARE_APIC_CALL(icr_write); DECLARE_APIC_CALL(read); DECLARE_APIC_CALL(send_IPI); DECLARE_APIC_CALL(send_IPI_mask); DECLARE_APIC_CALL(send_IPI_mask_allbutself); DECLARE_APIC_CALL(send_IPI_allbutself); DECLARE_APIC_CALL(send_IPI_all); DECLARE_APIC_CALL(send_IPI_self); DECLARE_APIC_CALL(wait_icr_idle); DECLARE_APIC_CALL(wakeup_secondary_cpu); DECLARE_APIC_CALL(wakeup_secondary_cpu_64); DECLARE_APIC_CALL(write); static __always_inline u32 apic_read(u32 reg) { return static_call(apic_call_read)(reg); } static __always_inline void apic_write(u32 reg, u32 val) { static_call(apic_call_write)(reg, val); } static __always_inline void apic_eoi(void) { static_call(apic_call_eoi)(); } static __always_inline void apic_native_eoi(void) { static_call(apic_call_native_eoi)(); } static __always_inline u64 apic_icr_read(void) { return static_call(apic_call_icr_read)(); } static __always_inline void apic_icr_write(u32 low, u32 high) { static_call(apic_call_icr_write)(low, high); } static __always_inline void __apic_send_IPI(int cpu, int vector) { static_call(apic_call_send_IPI)(cpu, vector); } static __always_inline void __apic_send_IPI_mask(const struct cpumask *mask, int vector) { static_call_mod(apic_call_send_IPI_mask)(mask, vector); } static __always_inline void __apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { static_call(apic_call_send_IPI_mask_allbutself)(mask, vector); } static __always_inline void __apic_send_IPI_allbutself(int vector) { static_call(apic_call_send_IPI_allbutself)(vector); } static __always_inline void __apic_send_IPI_all(int vector) { static_call(apic_call_send_IPI_all)(vector); } static __always_inline void __apic_send_IPI_self(int vector) { static_call_mod(apic_call_send_IPI_self)(vector); } static __always_inline void apic_wait_icr_idle(void) { static_call_cond(apic_call_wait_icr_idle)(); } static __always_inline u32 safe_apic_wait_icr_idle(void) { return apic->safe_wait_icr_idle ? apic->safe_wait_icr_idle() : 0; } static __always_inline bool apic_id_valid(u32 apic_id) { return apic_id <= apic->max_apic_id; } static __always_inline void apic_update_vector(unsigned int cpu, unsigned int vector, bool set) { if (apic->update_vector) apic->update_vector(cpu, vector, set); } #else /* CONFIG_X86_LOCAL_APIC */ static inline u32 apic_read(u32 reg) { return 0; } static inline void apic_write(u32 reg, u32 val) { } static inline void apic_eoi(void) { } static inline u64 apic_icr_read(void) { return 0; } static inline void apic_icr_write(u32 low, u32 high) { } static inline void apic_wait_icr_idle(void) { } static inline u32 safe_apic_wait_icr_idle(void) { return 0; } static inline void apic_native_eoi(void) { WARN_ON_ONCE(1); } static inline void apic_setup_apic_calls(void) { } static inline void apic_update_vector(unsigned int cpu, unsigned int vector, bool set) { } #define apic_update_callback(_callback, _fn) do { } while (0) #endif /* CONFIG_X86_LOCAL_APIC */ extern void apic_ack_irq(struct irq_data *data); #define APIC_VECTOR_TO_BIT_NUMBER(v) ((unsigned int)(v) % 32) #define APIC_VECTOR_TO_REG_OFFSET(v) ((unsigned int)(v) / 32 * 0x10) static inline bool lapic_vector_set_in_irr(unsigned int vector) { u32 irr = apic_read(APIC_IRR + APIC_VECTOR_TO_REG_OFFSET(vector)); return !!(irr & (1U << APIC_VECTOR_TO_BIT_NUMBER(vector))); } static inline bool is_vector_pending(unsigned int vector) { return lapic_vector_set_in_irr(vector) || pi_pending_this_cpu(vector); } #define MAX_APIC_VECTOR 256 #define APIC_VECTORS_PER_REG 32 /* * Vector states are maintained by APIC in 32-bit registers that are * 16 bytes aligned. The status of each vector is kept in a single * bit. */ static inline int apic_find_highest_vector(void *bitmap) { int vec; u32 *reg; for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; vec >= 0; vec -= APIC_VECTORS_PER_REG) { reg = bitmap + APIC_VECTOR_TO_REG_OFFSET(vec); if (*reg) return __fls(*reg) + vec; } return -1; } static inline u32 apic_get_reg(void *regs, int reg) { return *((u32 *) (regs + reg)); } static inline void apic_set_reg(void *regs, int reg, u32 val) { *((u32 *) (regs + reg)) = val; } static __always_inline u64 apic_get_reg64(void *regs, int reg) { BUILD_BUG_ON(reg != APIC_ICR); return *((u64 *) (regs + reg)); } static __always_inline void apic_set_reg64(void *regs, int reg, u64 val) { BUILD_BUG_ON(reg != APIC_ICR); *((u64 *) (regs + reg)) = val; } static inline void apic_clear_vector(int vec, void *bitmap) { clear_bit(APIC_VECTOR_TO_BIT_NUMBER(vec), bitmap + APIC_VECTOR_TO_REG_OFFSET(vec)); } static inline void apic_set_vector(int vec, void *bitmap) { set_bit(APIC_VECTOR_TO_BIT_NUMBER(vec), bitmap + APIC_VECTOR_TO_REG_OFFSET(vec)); } static inline int apic_test_vector(int vec, void *bitmap) { return test_bit(APIC_VECTOR_TO_BIT_NUMBER(vec), bitmap + APIC_VECTOR_TO_REG_OFFSET(vec)); } /* * Warm reset vector position: */ #define TRAMPOLINE_PHYS_LOW 0x467 #define TRAMPOLINE_PHYS_HIGH 0x469 #ifdef CONFIG_X86_LOCAL_APIC #include <asm/smp.h> extern struct apic apic_noop; static inline u32 read_apic_id(void) { u32 reg = apic_read(APIC_ID); return apic->get_apic_id(reg); } #ifdef CONFIG_X86_64 typedef int (*wakeup_cpu_handler)(int apicid, unsigned long start_eip); extern int default_acpi_madt_oem_check(char *, char *); extern void x86_64_probe_apic(void); #else static inline int default_acpi_madt_oem_check(char *a, char *b) { return 0; } static inline void x86_64_probe_apic(void) { } #endif extern u32 apic_default_calc_apicid(unsigned int cpu); extern u32 apic_flat_calc_apicid(unsigned int cpu); extern u32 default_cpu_present_to_apicid(int mps_cpu); void apic_send_nmi_to_offline_cpu(unsigned int cpu); #else /* CONFIG_X86_LOCAL_APIC */ static inline u32 read_apic_id(void) { return 0; } #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_SMP void apic_smt_update(void); #else static inline void apic_smt_update(void) { } #endif struct msi_msg; struct irq_cfg; extern void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg, bool dmar); extern void ioapic_zap_locks(void); #endif /* _ASM_X86_APIC_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CONTEXT_TRACKING_STATE_H #define _LINUX_CONTEXT_TRACKING_STATE_H #include <linux/percpu.h> #include <linux/static_key.h> #include <linux/context_tracking_irq.h> /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ #define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1) enum ctx_state { CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */ CT_STATE_KERNEL = 0, CT_STATE_IDLE = 1, CT_STATE_USER = 2, CT_STATE_GUEST = 3, CT_STATE_MAX = 4, }; struct context_tracking { #ifdef CONFIG_CONTEXT_TRACKING_USER /* * When active is false, probes are unset in order * to minimize overhead: TIF flags are cleared * and calls to user_enter/exit are ignored. This * may be further optimized using static keys. */ bool active; int recursion; #endif #ifdef CONFIG_CONTEXT_TRACKING atomic_t state; #endif #ifdef CONFIG_CONTEXT_TRACKING_IDLE long nesting; /* Track process nesting level. */ long nmi_nesting; /* Track irq/NMI nesting level. */ #endif }; /* * We cram two different things within the same atomic variable: * * CT_RCU_WATCHING_START CT_STATE_START * | | * v v * MSB [ RCU watching counter ][ context_state ] LSB * ^ ^ * | | * CT_RCU_WATCHING_END CT_STATE_END * * Bits are used from the LSB upwards, so unused bits (if any) will always be in * upper bits of the variable. */ #ifdef CONFIG_CONTEXT_TRACKING #define CT_SIZE (sizeof(((struct context_tracking *)0)->state) * BITS_PER_BYTE) #define CT_STATE_WIDTH bits_per(CT_STATE_MAX - 1) #define CT_STATE_START 0 #define CT_STATE_END (CT_STATE_START + CT_STATE_WIDTH - 1) #define CT_RCU_WATCHING_MAX_WIDTH (CT_SIZE - CT_STATE_WIDTH) #define CT_RCU_WATCHING_WIDTH (IS_ENABLED(CONFIG_RCU_DYNTICKS_TORTURE) ? 2 : CT_RCU_WATCHING_MAX_WIDTH) #define CT_RCU_WATCHING_START (CT_STATE_END + 1) #define CT_RCU_WATCHING_END (CT_RCU_WATCHING_START + CT_RCU_WATCHING_WIDTH - 1) #define CT_RCU_WATCHING BIT(CT_RCU_WATCHING_START) #define CT_STATE_MASK GENMASK(CT_STATE_END, CT_STATE_START) #define CT_RCU_WATCHING_MASK GENMASK(CT_RCU_WATCHING_END, CT_RCU_WATCHING_START) #define CT_UNUSED_WIDTH (CT_RCU_WATCHING_MAX_WIDTH - CT_RCU_WATCHING_WIDTH) static_assert(CT_STATE_WIDTH + CT_RCU_WATCHING_WIDTH + CT_UNUSED_WIDTH == CT_SIZE); DECLARE_PER_CPU(struct context_tracking, context_tracking); #endif /* CONFIG_CONTEXT_TRACKING */ #ifdef CONFIG_CONTEXT_TRACKING_USER static __always_inline int __ct_state(void) { return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; } #endif #ifdef CONFIG_CONTEXT_TRACKING_IDLE static __always_inline int ct_rcu_watching(void) { return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK; } static __always_inline int ct_rcu_watching_cpu(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK; } static __always_inline int ct_rcu_watching_cpu_acquire(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK; } static __always_inline long ct_nesting(void) { return __this_cpu_read(context_tracking.nesting); } static __always_inline long ct_nesting_cpu(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); return ct->nesting; } static __always_inline long ct_nmi_nesting(void) { return __this_cpu_read(context_tracking.nmi_nesting); } static __always_inline long ct_nmi_nesting_cpu(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); return ct->nmi_nesting; } #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */ #ifdef CONFIG_CONTEXT_TRACKING_USER extern struct static_key_false context_tracking_key; static __always_inline bool context_tracking_enabled(void) { return static_branch_unlikely(&context_tracking_key); } static __always_inline bool context_tracking_enabled_cpu(int cpu) { return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); } static __always_inline bool context_tracking_enabled_this_cpu(void) { return context_tracking_enabled() && __this_cpu_read(context_tracking.active); } /** * ct_state() - return the current context tracking state if known * * Returns the current cpu's context tracking state if context tracking * is enabled. If context tracking is disabled, returns * CT_STATE_DISABLED. This should be used primarily for debugging. */ static __always_inline int ct_state(void) { int ret; if (!context_tracking_enabled()) return CT_STATE_DISABLED; preempt_disable(); ret = __ct_state(); preempt_enable(); return ret; } #else static __always_inline bool context_tracking_enabled(void) { return false; } static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; } static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; } #endif /* CONFIG_CONTEXT_TRACKING_USER */ #endif
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 // SPDX-License-Identifier: GPL-2.0 /* Generic support for BUG() This respects the following config options: CONFIG_BUG - emit BUG traps. Nothing happens without this. CONFIG_GENERIC_BUG - enable this code. CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable (though they're generally always on). CONFIG_GENERIC_BUG is set by each architecture using this code. To use this, your architecture must: 1. Set up the config options: - Enable CONFIG_GENERIC_BUG if CONFIG_BUG 2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON) - Define HAVE_ARCH_BUG - Implement BUG() to generate a faulting instruction - NOTE: struct bug_entry does not have "file" or "line" entries when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate the values accordingly. 3. Implement the trap - In the illegal instruction trap handler (typically), verify that the fault was in kernel mode, and call report_bug() - report_bug() will return whether it was a false alarm, a warning, or an actual bug. - You must implement the is_valid_bugaddr(bugaddr) callback which returns true if the eip is a real kernel address, and it points to the expected BUG trap instruction. Jeremy Fitzhardinge <jeremy@goop.org> 2006 */ #define pr_fmt(fmt) fmt #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/sched.h> #include <linux/rculist.h> #include <linux/ftrace.h> #include <linux/context_tracking.h> extern struct bug_entry __start___bug_table[], __stop___bug_table[]; static inline unsigned long bug_addr(const struct bug_entry *bug) { #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp; #else return bug->bug_addr; #endif } #ifdef CONFIG_MODULES /* Updates are protected by module mutex */ static LIST_HEAD(module_bug_list); static struct bug_entry *module_find_bug(unsigned long bugaddr) { struct bug_entry *bug; struct module *mod; guard(rcu)(); list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { unsigned i; bug = mod->bug_table; for (i = 0; i < mod->num_bugs; ++i, ++bug) if (bugaddr == bug_addr(bug)) return bug; } return NULL; } void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { char *secstrings; unsigned int i; mod->bug_table = NULL; mod->num_bugs = 0; /* Find the __bug_table section, if present */ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (i = 1; i < hdr->e_shnum; i++) { if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) continue; mod->bug_table = (void *) sechdrs[i].sh_addr; mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry); break; } /* * Strictly speaking this should have a spinlock to protect against * traversals, but since we only traverse on BUG()s, a spinlock * could potentially lead to deadlock and thus be counter-productive. * Thus, this uses RCU to safely manipulate the bug list, since BUG * must run in non-interruptive state. */ list_add_rcu(&mod->bug_list, &module_bug_list); } void module_bug_cleanup(struct module *mod) { list_del_rcu(&mod->bug_list); } #else static inline struct bug_entry *module_find_bug(unsigned long bugaddr) { return NULL; } #endif void bug_get_file_line(struct bug_entry *bug, const char **file, unsigned int *line) { #ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS *file = (const char *)&bug->file_disp + bug->file_disp; #else *file = bug->file; #endif *line = bug->line; #else *file = NULL; *line = 0; #endif } static const char *bug_get_format(struct bug_entry *bug) { const char *format = NULL; #ifdef HAVE_ARCH_BUG_FORMAT #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS /* * Allow an architecture to: * - relative encode NULL (difficult vs KASLR); * - use a literal 0 (there are no valid objects inside * the __bug_table itself to refer to after all); * - use an empty string. */ if (bug->format_disp) format = (const char *)&bug->format_disp + bug->format_disp; if (format && format[0] == '\0') format = NULL; #else format = bug->format; #endif #endif return format; } struct bug_entry *find_bug(unsigned long bugaddr) { struct bug_entry *bug; for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) if (bugaddr == bug_addr(bug)) return bug; return module_find_bug(bugaddr); } __diag_push(); __diag_ignore(GCC, all, "-Wsuggest-attribute=format", "Not a valid __printf() conversion candidate."); static void __warn_printf(const char *fmt, struct pt_regs *regs) { if (!fmt) return; #ifdef HAVE_ARCH_BUG_FORMAT_ARGS if (regs) { struct arch_va_list _args; va_list *args = __warn_args(&_args, regs); if (args) { vprintk(fmt, *args); return; } } #endif printk("%s", fmt); } __diag_pop(); static enum bug_trap_type __report_bug(struct bug_entry *bug, unsigned long bugaddr, struct pt_regs *regs) { bool warning, once, done, no_cut, has_args; const char *file, *fmt; unsigned line; if (!bug) { if (!is_valid_bugaddr(bugaddr)) return BUG_TRAP_TYPE_NONE; bug = find_bug(bugaddr); if (!bug) return BUG_TRAP_TYPE_NONE; } disable_trace_on_warning(); bug_get_file_line(bug, &file, &line); fmt = bug_get_format(bug); warning = bug->flags & BUGFLAG_WARNING; once = bug->flags & BUGFLAG_ONCE; done = bug->flags & BUGFLAG_DONE; no_cut = bug->flags & BUGFLAG_NO_CUT_HERE; has_args = bug->flags & BUGFLAG_ARGS; if (warning && once) { if (done) return BUG_TRAP_TYPE_WARN; /* * Since this is the only store, concurrency is not an issue. */ bug->flags |= BUGFLAG_DONE; } /* * BUG() and WARN_ON() families don't print a custom debug message * before triggering the exception handler, so we must add the * "cut here" line now. WARN() issues its own "cut here" before the * extra debugging message it writes before triggering the handler. */ if (!no_cut) { printk(KERN_DEFAULT CUT_HERE); __warn_printf(fmt, has_args ? regs : NULL); } if (warning) { /* this is a WARN_ON rather than BUG/BUG_ON */ __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs, NULL); return BUG_TRAP_TYPE_WARN; } if (file) pr_crit("kernel BUG at %s:%u!\n", file, line); else pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n", (void *)bugaddr); return BUG_TRAP_TYPE_BUG; } enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs) { enum bug_trap_type ret; bool rcu = false; rcu = warn_rcu_enter(); ret = __report_bug(bug, bug_addr(bug), regs); warn_rcu_exit(rcu); return ret; } enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) { enum bug_trap_type ret; bool rcu = false; rcu = warn_rcu_enter(); ret = __report_bug(NULL, bugaddr, regs); warn_rcu_exit(rcu); return ret; } static void clear_once_table(struct bug_entry *start, struct bug_entry *end) { struct bug_entry *bug; for (bug = start; bug < end; bug++) bug->flags &= ~BUGFLAG_DONE; } void generic_bug_clear_once(void) { #ifdef CONFIG_MODULES struct module *mod; scoped_guard(rcu) { list_for_each_entry_rcu(mod, &module_bug_list, bug_list) clear_once_table(mod->bug_table, mod->bug_table + mod->num_bugs); } #endif clear_once_table(__start___bug_table, __stop___bug_table); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 /* SPDX-License-Identifier: GPL-2.0-only */ /* * 9P Client Definitions * * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> */ #ifndef NET_9P_CLIENT_H #define NET_9P_CLIENT_H #include <linux/utsname.h> #include <linux/idr.h> #include <linux/tracepoint-defs.h> /* Number of requests per row */ #define P9_ROW_MAXTAG 255 /* DEFAULT MSIZE = 32 pages worth of payload + P9_HDRSZ + * room for write (16 extra) or read (11 extra) operands. */ #define DEFAULT_MSIZE ((128 * 1024) + P9_IOHDRSZ) /** enum p9_proto_versions - 9P protocol versions * @p9_proto_legacy: 9P Legacy mode, pre-9P2000.u * @p9_proto_2000u: 9P2000.u extension * @p9_proto_2000L: 9P2000.L extension */ enum p9_proto_versions { p9_proto_legacy, p9_proto_2000u, p9_proto_2000L, }; /** * enum p9_trans_status - different states of underlying transports * @Connected: transport is connected and healthy * @Disconnected: transport has been disconnected * @Hung: transport is connected by wedged * * This enumeration details the various states a transport * instatiation can be in. */ enum p9_trans_status { Connected, BeginDisconnect, Disconnected, Hung, }; /** * enum p9_req_status_t - status of a request * @REQ_STATUS_ALLOC: request has been allocated but not sent * @REQ_STATUS_UNSENT: request waiting to be sent * @REQ_STATUS_SENT: request sent to server * @REQ_STATUS_RCVD: response received from server * @REQ_STATUS_FLSHD: request has been flushed * @REQ_STATUS_ERROR: request encountered an error on the client side */ enum p9_req_status_t { REQ_STATUS_ALLOC, REQ_STATUS_UNSENT, REQ_STATUS_SENT, REQ_STATUS_RCVD, REQ_STATUS_FLSHD, REQ_STATUS_ERROR, }; /** * struct p9_req_t - request slots * @status: status of this request slot * @t_err: transport error * @wq: wait_queue for the client to block on for this request * @tc: the request fcall structure * @rc: the response fcall structure * @req_list: link for higher level objects to chain requests */ struct p9_req_t { int status; int t_err; refcount_t refcount; wait_queue_head_t wq; struct p9_fcall tc; struct p9_fcall rc; struct list_head req_list; }; /** * struct p9_client - per client instance state * @lock: protect @fids and @reqs * @msize: maximum data size negotiated by protocol * @proto_version: 9P protocol version to use * @trans_mod: module API instantiated with this client * @status: connection state * @trans: tranport instance state and API * @fids: All active FID handles * @reqs: All active requests. * @name: node name used as client id * * The client structure is used to keep track of various per-client * state that has been instantiated. */ struct p9_client { spinlock_t lock; unsigned int msize; unsigned char proto_version; struct p9_trans_module *trans_mod; enum p9_trans_status status; void *trans; struct kmem_cache *fcall_cache; union { struct { int rfd; int wfd; } fd; struct { u16 port; bool privport; } tcp; } trans_opts; struct idr fids; struct idr reqs; char name[__NEW_UTS_LEN + 1]; }; /** * struct p9_fd_opts - holds client options during parsing * @msize: maximum data size negotiated by protocol * @prot-Oversion: 9P protocol version to use * @trans_mod: module API instantiated with this client * * These parsed options get transferred into client in * apply_client_options() */ struct p9_client_opts { unsigned int msize; unsigned char proto_version; struct p9_trans_module *trans_mod; }; /** * struct p9_fd_opts - per-transport options for fd transport * @rfd: file descriptor for reading (trans=fd) * @wfd: file descriptor for writing (trans=fd) * @port: port to connect to (trans=tcp) * @privport: port is privileged */ struct p9_fd_opts { int rfd; int wfd; u16 port; bool privport; }; /** * struct p9_rdma_opts - Collection of mount options for rdma transport * @port: port of connection * @privport: Whether a privileged port may be used * @sq_depth: The requested depth of the SQ. This really doesn't need * to be any deeper than the number of threads used in the client * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth * @timeout: Time to wait in msecs for CM events */ struct p9_rdma_opts { short port; bool privport; int sq_depth; int rq_depth; long timeout; }; /** * struct p9_session_opts - holds parsed options for v9fs_session_info * @flags: session options of type &p9_session_flags * @nodev: set to 1 to disable device mapping * @debug: debug level * @afid: authentication handle * @cache: cache mode of type &p9_cache_bits * @cachetag: the tag of the cache associated with this session * @uname: string user name to mount hierarchy as * @aname: mount specifier for remote hierarchy * @dfltuid: default numeric userid to mount hierarchy as * @dfltgid: default numeric groupid to mount hierarchy as * @uid: if %V9FS_ACCESS_SINGLE, the numeric uid which mounted the hierarchy * @session_lock_timeout: retry interval for blocking locks * * This strucure holds options which are parsed and will be transferred * to the v9fs_session_info structure when mounted, and therefore largely * duplicates struct v9fs_session_info. */ struct p9_session_opts { unsigned int flags; unsigned char nodev; unsigned short debug; unsigned int afid; unsigned int cache; #ifdef CONFIG_9P_FSCACHE char *cachetag; #endif char *uname; char *aname; kuid_t dfltuid; kgid_t dfltgid; kuid_t uid; long session_lock_timeout; }; /* Used by mount API to store parsed mount options */ struct v9fs_context { struct p9_client_opts client_opts; struct p9_fd_opts fd_opts; struct p9_rdma_opts rdma_opts; struct p9_session_opts session_opts; }; /** * struct p9_fid - file system entity handle * @clnt: back pointer to instantiating &p9_client * @fid: numeric identifier for this handle * @mode: current mode of this fid (enum?) * @qid: the &p9_qid server identifier this handle points to * @iounit: the server reported maximum transaction size for this file * @uid: the numeric uid of the local user who owns this handle * @rdir: readdir accounting structure (allocated on demand) * @dlist: per-dentry fid tracking * * TODO: This needs lots of explanation. */ enum fid_source { FID_FROM_OTHER, FID_FROM_INODE, FID_FROM_DENTRY, }; struct p9_fid { struct p9_client *clnt; u32 fid; refcount_t count; int mode; struct p9_qid qid; u32 iounit; kuid_t uid; void *rdir; struct hlist_node dlist; /* list of all fids attached to a dentry */ struct hlist_node ilist; }; /** * struct p9_dirent - directory entry structure * @qid: The p9 server qid for this dirent * @d_off: offset to the next dirent * @d_type: type of file * @d_name: file name */ struct p9_dirent { struct p9_qid qid; u64 d_off; unsigned char d_type; char d_name[256]; }; struct iov_iter; int p9_show_client_options(struct seq_file *m, struct p9_client *clnt); int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, const char *name); int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, struct p9_fid *newdirfid, const char *new_name); struct p9_client *p9_client_create(struct fs_context *fc); void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); void p9_client_begin_disconnect(struct p9_client *clnt); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, const char *uname, kuid_t n_uname, const char *aname); struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, const unsigned char * const *wnames, int clone); int p9_client_open(struct p9_fid *fid, int mode); int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode, char *extension); int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, const char *newname); int p9_client_symlink(struct p9_fid *fid, const char *name, const char *symname, kgid_t gid, struct p9_qid *qid); int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, u32 mode, kgid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); int p9_client_remove(struct p9_fid *fid); int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags); int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err); int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err); int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err); struct netfs_io_subrequest; void p9_client_write_subreq(struct netfs_io_subrequest *subreq); int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset); int p9dirent_read(struct p9_client *clnt, char *buf, int len, struct p9_dirent *dirent); struct p9_wstat *p9_client_stat(struct p9_fid *fid); int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst); int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, u64 request_mask); int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode, dev_t rdev, kgid_t gid, struct p9_qid *qid); int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode, kgid_t gid, struct p9_qid *qid); int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status); int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); void p9_fcall_fini(struct p9_fcall *fc); struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag); static inline void p9_req_get(struct p9_req_t *r) { refcount_inc(&r->refcount); } static inline int p9_req_try_get(struct p9_req_t *r) { return refcount_inc_not_zero(&r->refcount); } int p9_req_put(struct p9_client *c, struct p9_req_t *r); /* We cannot have the real tracepoints in header files, * use a wrapper function */ DECLARE_TRACEPOINT(9p_fid_ref); void do_trace_9p_fid_get(struct p9_fid *fid); void do_trace_9p_fid_put(struct p9_fid *fid); /* fid reference counting helpers: * - fids used for any length of time should always be referenced through * p9_fid_get(), and released with p9_fid_put() * - v9fs_fid_lookup() or similar will automatically call get for you * and also require a put * - the *_fid_add() helpers will stash the fid in the inode, * at which point it is the responsibility of evict_inode() * to call the put * - the last put will automatically send a clunk to the server */ static inline struct p9_fid *p9_fid_get(struct p9_fid *fid) { if (tracepoint_enabled(9p_fid_ref)) do_trace_9p_fid_get(fid); refcount_inc(&fid->count); return fid; } static inline int p9_fid_put(struct p9_fid *fid) { if (!fid || IS_ERR(fid)) return 0; if (tracepoint_enabled(9p_fid_ref)) do_trace_9p_fid_put(fid); if (!refcount_dec_and_test(&fid->count)) return 0; return p9_client_clunk(fid); } void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status); int p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type, int16_t *tag, int rewind); int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st); void p9stat_free(struct p9_wstat *stbuf); int p9_is_proto_dotu(struct p9_client *clnt); int p9_is_proto_dotl(struct p9_client *clnt); struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid, const char *attr_name, u64 *attr_size); int p9_client_xattrcreate(struct p9_fid *fid, const char *name, u64 attr_size, int flags); int p9_client_readlink(struct p9_fid *fid, char **target); int p9_client_init(void); void p9_client_exit(void); #endif /* NET_9P_CLIENT_H */
68 67 69 68 164 163 163 166 70 166 67 105 105 164 2 2 2 1 2 3 2 2 2 2 3 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 // SPDX-License-Identifier: GPL-2.0 /* * This file contains functions which manage clock event devices. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ #include <linux/clockchips.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/device.h> #include "tick-internal.h" /* The registered clock event devices */ static LIST_HEAD(clockevent_devices); static LIST_HEAD(clockevents_released); /* Protection for the above */ static DEFINE_RAW_SPINLOCK(clockevents_lock); /* Protection for unbind operations */ static DEFINE_MUTEX(clockevents_mutex); struct ce_unbind { struct clock_event_device *ce; int res; }; static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, bool ismax) { u64 clc = (u64) latch << evt->shift; u64 rnd; if (WARN_ON(!evt->mult)) evt->mult = 1; rnd = (u64) evt->mult - 1; /* * Upper bound sanity check. If the backwards conversion is * not equal latch, we know that the above shift overflowed. */ if ((clc >> evt->shift) != (u64)latch) clc = ~0ULL; /* * Scaled math oddities: * * For mult <= (1 << shift) we can safely add mult - 1 to * prevent integer rounding loss. So the backwards conversion * from nsec to device ticks will be correct. * * For mult > (1 << shift), i.e. device frequency is > 1GHz we * need to be careful. Adding mult - 1 will result in a value * which when converted back to device ticks can be larger * than latch by up to (mult - 1) >> shift. For the min_delta * calculation we still want to apply this in order to stay * above the minimum device ticks limit. For the upper limit * we would end up with a latch value larger than the upper * limit of the device, so we omit the add to stay below the * device upper boundary. * * Also omit the add if it would overflow the u64 boundary. */ if ((~0ULL - clc > rnd) && (!ismax || evt->mult <= (1ULL << evt->shift))) clc += rnd; do_div(clc, evt->mult); /* Deltas less than 1usec are pointless noise */ return clc > 1000 ? clc : 1000; } /** * clockevent_delta2ns - Convert a latch value (device ticks) to nanoseconds * @latch: value to convert * @evt: pointer to clock event device descriptor * * Math helper, returns latch value converted to nanoseconds (bound checked) */ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) { return cev_delta2ns(latch, evt, false); } EXPORT_SYMBOL_GPL(clockevent_delta2ns); static int __clockevents_switch_state(struct clock_event_device *dev, enum clock_event_state state) { if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; /* Transition with new state-specific callbacks */ switch (state) { case CLOCK_EVT_STATE_DETACHED: /* The clockevent device is getting replaced. Shut it down. */ case CLOCK_EVT_STATE_SHUTDOWN: if (dev->set_state_shutdown) return dev->set_state_shutdown(dev); return 0; case CLOCK_EVT_STATE_PERIODIC: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) return -ENOSYS; if (dev->set_state_periodic) return dev->set_state_periodic(dev); return 0; case CLOCK_EVT_STATE_ONESHOT: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return -ENOSYS; if (dev->set_state_oneshot) return dev->set_state_oneshot(dev); return 0; case CLOCK_EVT_STATE_ONESHOT_STOPPED: /* Core internal bug */ if (WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", clockevent_get_state(dev))) return -EINVAL; if (dev->set_state_oneshot_stopped) return dev->set_state_oneshot_stopped(dev); else return -ENOSYS; default: return -ENOSYS; } } /** * clockevents_switch_state - set the operating state of a clock event device * @dev: device to modify * @state: new state * * Must be called with interrupts disabled ! */ void clockevents_switch_state(struct clock_event_device *dev, enum clock_event_state state) { if (clockevent_get_state(dev) != state) { if (__clockevents_switch_state(dev, state)) return; clockevent_set_state(dev, state); /* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: */ if (clockevent_state_oneshot(dev)) { if (WARN_ON(!dev->mult)) dev->mult = 1; } } } /** * clockevents_shutdown - shutdown the device and clear next_event * @dev: device to shutdown */ void clockevents_shutdown(struct clock_event_device *dev) { clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); dev->next_event = KTIME_MAX; } /** * clockevents_tick_resume - Resume the tick device before using it again * @dev: device to resume */ int clockevents_tick_resume(struct clock_event_device *dev) { int ret = 0; if (dev->tick_resume) ret = dev->tick_resume(dev); return ret; } #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST /* Limit min_delta to a jiffy */ #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) /** * clockevents_increase_min_delta - raise minimum delta of a clock event device * @dev: device to increase the minimum delta * * Returns 0 on success, -ETIME when the minimum delta reached the limit. */ static int clockevents_increase_min_delta(struct clock_event_device *dev) { /* Nothing to do if we already reached the limit */ if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { printk_deferred(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); dev->next_event = KTIME_MAX; return -ETIME; } if (dev->min_delta_ns < 5000) dev->min_delta_ns = 5000; else dev->min_delta_ns += dev->min_delta_ns >> 1; if (dev->min_delta_ns > MIN_DELTA_LIMIT) dev->min_delta_ns = MIN_DELTA_LIMIT; printk_deferred(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", dev->name ? dev->name : "?", (unsigned long long) dev->min_delta_ns); return 0; } /** * clockevents_program_min_delta - Set clock event device to the minimum delay. * @dev: device to program * * Returns 0 on success, -ETIME when the retry loop failed. */ static int clockevents_program_min_delta(struct clock_event_device *dev) { unsigned long long clc; int64_t delta; int i; for (i = 0;;) { delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); if (clockevent_state_shutdown(dev)) return 0; dev->retries++; clc = ((unsigned long long) delta * dev->mult) >> dev->shift; if (dev->set_next_event((unsigned long) clc, dev) == 0) return 0; if (++i > 2) { /* * We tried 3 times to program the device with the * given min_delta_ns. Try to increase the minimum * delta, if that fails as well get out of here. */ if (clockevents_increase_min_delta(dev)) return -ETIME; i = 0; } } } #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ /** * clockevents_program_min_delta - Set clock event device to the minimum delay. * @dev: device to program * * Returns 0 on success, -ETIME when the retry loop failed. */ static int clockevents_program_min_delta(struct clock_event_device *dev) { unsigned long long clc; int64_t delta = 0; int i; for (i = 0; i < 10; i++) { delta += dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta); if (clockevent_state_shutdown(dev)) return 0; dev->retries++; clc = ((unsigned long long) delta * dev->mult) >> dev->shift; if (dev->set_next_event((unsigned long) clc, dev) == 0) return 0; } return -ETIME; } #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ /** * clockevents_program_event - Reprogram the clock event device. * @dev: device to program * @expires: absolute expiry time (monotonic clock) * @force: program minimum delay if expires can not be set * * Returns 0 on success, -ETIME when the event is in the past. */ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force) { unsigned long long clc; int64_t delta; int rc; if (WARN_ON_ONCE(expires < 0)) return -ETIME; dev->next_event = expires; if (clockevent_state_shutdown(dev)) return 0; /* We must be in ONESHOT state here */ WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", clockevent_get_state(dev)); /* Shortcut for clockevent devices that can deal with ktime. */ if (dev->features & CLOCK_EVT_FEAT_KTIME) return dev->set_next_ktime(expires, dev); delta = ktime_to_ns(ktime_sub(expires, ktime_get())); if (delta <= 0) return force ? clockevents_program_min_delta(dev) : -ETIME; delta = min(delta, (int64_t) dev->max_delta_ns); delta = max(delta, (int64_t) dev->min_delta_ns); clc = ((unsigned long long) delta * dev->mult) >> dev->shift; rc = dev->set_next_event((unsigned long) clc, dev); return (rc && force) ? clockevents_program_min_delta(dev) : rc; } /* * Called after a clockevent has been added which might * have replaced a current regular or broadcast device. A * released normal device might be a suitable replacement * for the current broadcast device. Similarly a released * broadcast device might be a suitable replacement for a * normal device. */ static void clockevents_notify_released(void) { struct clock_event_device *dev; /* * Keep iterating as long as tick_check_new_device() * replaces a device. */ while (!list_empty(&clockevents_released)) { dev = list_entry(clockevents_released.next, struct clock_event_device, list); list_move(&dev->list, &clockevent_devices); tick_check_new_device(dev); } } /* * Try to install a replacement clock event device */ static int clockevents_replace(struct clock_event_device *ced) { struct clock_event_device *dev, *newdev = NULL; list_for_each_entry(dev, &clockevent_devices, list) { if (dev == ced || !clockevent_state_detached(dev)) continue; if (!tick_check_replacement(newdev, dev)) continue; if (!try_module_get(dev->owner)) continue; if (newdev) module_put(newdev->owner); newdev = dev; } if (newdev) { tick_install_replacement(newdev); list_del_init(&ced->list); } return newdev ? 0 : -EBUSY; } /* * Called with clockevents_mutex and clockevents_lock held */ static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) { /* Fast track. Device is unused */ if (clockevent_state_detached(ced)) { list_del_init(&ced->list); return 0; } return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; } /* * SMP function call to unbind a device */ static void __clockevents_unbind(void *arg) { struct ce_unbind *cu = arg; int res; raw_spin_lock(&clockevents_lock); res = __clockevents_try_unbind(cu->ce, smp_processor_id()); if (res == -EAGAIN) res = clockevents_replace(cu->ce); cu->res = res; raw_spin_unlock(&clockevents_lock); } /* * Issues smp function call to unbind a per cpu device. Called with * clockevents_mutex held. */ static int clockevents_unbind(struct clock_event_device *ced, int cpu) { struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); return cu.res; } /* * Unbind a clockevents device. */ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) { int ret; mutex_lock(&clockevents_mutex); ret = clockevents_unbind(ced, cpu); mutex_unlock(&clockevents_mutex); return ret; } EXPORT_SYMBOL_GPL(clockevents_unbind_device); /** * clockevents_register_device - register a clock event device * @dev: device to register */ void clockevents_register_device(struct clock_event_device *dev) { unsigned long flags; /* Initialize state to DETACHED */ clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); if (!dev->cpumask) { WARN_ON(num_possible_cpus() > 1); dev->cpumask = cpumask_of(smp_processor_id()); } if (dev->cpumask == cpu_all_mask) { WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n", dev->name); dev->cpumask = cpu_possible_mask; } raw_spin_lock_irqsave(&clockevents_lock, flags); list_add(&dev->list, &clockevent_devices); tick_check_new_device(dev); clockevents_notify_released(); raw_spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_register_device); static void clockevents_config(struct clock_event_device *dev, u32 freq) { u64 sec; if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return; /* * Calculate the maximum number of seconds we can sleep. Limit * to 10 minutes for hardware which can program more than * 32bit ticks so we still get reasonable conversion values. */ sec = dev->max_delta_ticks; do_div(sec, freq); if (!sec) sec = 1; else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) sec = 600; clockevents_calc_mult_shift(dev, freq, sec); dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); } /** * clockevents_config_and_register - Configure and register a clock event device * @dev: device to register * @freq: The clock frequency * @min_delta: The minimum clock ticks to program in oneshot mode * @max_delta: The maximum clock ticks to program in oneshot mode * * min/max_delta can be 0 for devices which do not support oneshot mode. */ void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta) { dev->min_delta_ticks = min_delta; dev->max_delta_ticks = max_delta; clockevents_config(dev, freq); clockevents_register_device(dev); } EXPORT_SYMBOL_GPL(clockevents_config_and_register); int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) { clockevents_config(dev, freq); if (clockevent_state_oneshot(dev)) return clockevents_program_event(dev, dev->next_event, false); if (clockevent_state_periodic(dev)) return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC); return 0; } /** * clockevents_update_freq - Update frequency and reprogram a clock event device. * @dev: device to modify * @freq: new device frequency * * Reconfigure and reprogram a clock event device in oneshot * mode. Must be called on the cpu for which the device delivers per * cpu timer events. If called for the broadcast device the core takes * care of serialization. * * Returns 0 on success, -ETIME when the event is in the past. */ int clockevents_update_freq(struct clock_event_device *dev, u32 freq) { unsigned long flags; int ret; local_irq_save(flags); ret = tick_broadcast_update_freq(dev, freq); if (ret == -ENODEV) ret = __clockevents_update_freq(dev, freq); local_irq_restore(flags); return ret; } /* * Noop handler when we shut down an event device */ void clockevents_handle_noop(struct clock_event_device *dev) { } /** * clockevents_exchange_device - release and request clock devices * @old: device to release (can be NULL) * @new: device to request (can be NULL) * * Called from various tick functions with clockevents_lock held and * interrupts disabled. */ void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new) { /* * Caller releases a clock event device. We queue it into the * released list and do a notify add later. */ if (old) { module_put(old->owner); clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); list_move(&old->list, &clockevents_released); } if (new) { BUG_ON(!clockevent_state_detached(new)); clockevents_shutdown(new); } } /** * clockevents_suspend - suspend clock devices */ void clockevents_suspend(void) { struct clock_event_device *dev; list_for_each_entry_reverse(dev, &clockevent_devices, list) if (dev->suspend && !clockevent_state_detached(dev)) dev->suspend(dev); } /** * clockevents_resume - resume clock devices */ void clockevents_resume(void) { struct clock_event_device *dev; list_for_each_entry(dev, &clockevent_devices, list) if (dev->resume && !clockevent_state_detached(dev)) dev->resume(dev); } #ifdef CONFIG_HOTPLUG_CPU /** * tick_offline_cpu - Shutdown all clock events related * to this CPU and take it out of the * broadcast mechanism. * @cpu: The outgoing CPU * * Called by the dying CPU during teardown. */ void tick_offline_cpu(unsigned int cpu) { struct clock_event_device *dev, *tmp; raw_spin_lock(&clockevents_lock); tick_broadcast_offline(cpu); tick_shutdown(); /* * Unregister the clock event devices which were * released above. */ list_for_each_entry_safe(dev, tmp, &clockevents_released, list) list_del(&dev->list); /* * Now check whether the CPU has left unused per cpu devices */ list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { if (cpumask_test_cpu(cpu, dev->cpumask) && cpumask_weight(dev->cpumask) == 1 && !tick_is_broadcast_device(dev)) { BUG_ON(!clockevent_state_detached(dev)); list_del(&dev->list); } } raw_spin_unlock(&clockevents_lock); } #endif #ifdef CONFIG_SYSFS static const struct bus_type clockevents_subsys = { .name = "clockevents", .dev_name = "clockevent", }; static DEFINE_PER_CPU(struct device, tick_percpu_dev); static struct tick_device *tick_get_tick_dev(struct device *dev); static ssize_t current_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tick_device *td; ssize_t count = 0; raw_spin_lock_irq(&clockevents_lock); td = tick_get_tick_dev(dev); if (td && td->evtdev) count = sysfs_emit(buf, "%s\n", td->evtdev->name); raw_spin_unlock_irq(&clockevents_lock); return count; } static DEVICE_ATTR_RO(current_device); /* We don't support the abomination of removable broadcast devices */ static ssize_t unbind_device_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char name[CS_NAME_LEN]; ssize_t ret = sysfs_get_uname(buf, name, count); struct clock_event_device *ce = NULL, *iter; if (ret < 0) return ret; ret = -ENODEV; mutex_lock(&clockevents_mutex); raw_spin_lock_irq(&clockevents_lock); list_for_each_entry(iter, &clockevent_devices, list) { if (!strcmp(iter->name, name)) { ret = __clockevents_try_unbind(iter, dev->id); ce = iter; break; } } raw_spin_unlock_irq(&clockevents_lock); /* * We hold clockevents_mutex, so ce can't go away */ if (ret == -EAGAIN) ret = clockevents_unbind(ce, dev->id); mutex_unlock(&clockevents_mutex); return ret ? ret : count; } static DEVICE_ATTR_WO(unbind_device); #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static struct device tick_bc_dev = { .init_name = "broadcast", .id = 0, .bus = &clockevents_subsys, }; static struct tick_device *tick_get_tick_dev(struct device *dev) { return dev == &tick_bc_dev ? tick_get_broadcast_device() : &per_cpu(tick_cpu_device, dev->id); } static __init int tick_broadcast_init_sysfs(void) { int err = device_register(&tick_bc_dev); if (!err) err = device_create_file(&tick_bc_dev, &dev_attr_current_device); return err; } #else static struct tick_device *tick_get_tick_dev(struct device *dev) { return &per_cpu(tick_cpu_device, dev->id); } static inline int tick_broadcast_init_sysfs(void) { return 0; } #endif static int __init tick_init_sysfs(void) { int cpu; for_each_possible_cpu(cpu) { struct device *dev = &per_cpu(tick_percpu_dev, cpu); int err; dev->id = cpu; dev->bus = &clockevents_subsys; err = device_register(dev); if (!err) err = device_create_file(dev, &dev_attr_current_device); if (!err) err = device_create_file(dev, &dev_attr_unbind_device); if (err) return err; } return tick_broadcast_init_sysfs(); } static int __init clockevents_init_sysfs(void) { int err = subsys_system_register(&clockevents_subsys, NULL); if (!err) err = tick_init_sysfs(); return err; } device_initcall(clockevents_init_sysfs); #endif /* SYSFS */
6882 6193 7053 7103 61 7020 2 6910 5982 35 5 5996 6885 5991 6889 6183 6158 5975 5853 5974 5875 6133 5973 5992 6157 6155 6186 6184 6158 6186 6186 6181 6184 6148 6181 6185 6156 6158 261 6154 351 6164 12 13 10 6092 6119 5828 52 51 53 51 15 15 52 52 52 25 47 52 52 51 1626 1627 1634 1629 35 35 35 34 2163 2161 2159 2169 2115 2117 2159 2168 2161 2161 61 2113 2147 2157 12 12 12 12 19 19 19 19 19 19 19 1 19 1 19 19 19 19 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/uaccess.h> #include <linux/fs_struct.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/prefetch.h> #include "mount.h" #include "internal.h" struct prepend_buffer { char *buf; int len; }; #define DECLARE_BUFFER(__name, __buf, __len) \ struct prepend_buffer __name = {.buf = __buf + __len, .len = __len} static char *extract_string(struct prepend_buffer *p) { if (likely(p->len >= 0)) return p->buf; return ERR_PTR(-ENAMETOOLONG); } static bool prepend_char(struct prepend_buffer *p, unsigned char c) { if (likely(p->len > 0)) { p->len--; *--p->buf = c; return true; } p->len = -1; return false; } /* * The source of the prepend data can be an optimistic load * of a dentry name and length. And because we don't hold any * locks, the length and the pointer to the name may not be * in sync if a concurrent rename happens, and the kernel * copy might fault as a result. * * The end result will correct itself when we check the * rename sequence count, but we need to be able to handle * the fault gracefully. */ static bool prepend_copy(void *dst, const void *src, int len) { if (unlikely(copy_from_kernel_nofault(dst, src, len))) { memset(dst, 'x', len); return false; } return true; } static bool prepend(struct prepend_buffer *p, const char *str, int namelen) { // Already overflowed? if (p->len < 0) return false; // Will overflow? if (p->len < namelen) { // Fill as much as possible from the end of the name str += namelen - p->len; p->buf -= p->len; prepend_copy(p->buf, str, p->len); p->len = -1; return false; } // Fits fully p->len -= namelen; p->buf -= namelen; return prepend_copy(p->buf, str, namelen); } /** * prepend_name - prepend a pathname in front of current buffer pointer * @p: prepend buffer which contains buffer pointer and allocated length * @name: name string and length qstr structure * * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to * make sure that either the old or the new name pointer and length are * fetched. However, there may be mismatch between length and pointer. * But since the length cannot be trusted, we need to copy the name very * carefully when doing the prepend_copy(). It also prepends "/" at * the beginning of the name. The sequence number check at the caller will * retry it again when a d_move() does happen. So any garbage in the buffer * due to mismatched pointer and length will be discarded. * * Load acquire is needed to make sure that we see the new name data even * if we might get the length wrong. */ static bool prepend_name(struct prepend_buffer *p, const struct qstr *name) { const char *dname = smp_load_acquire(&name->name); /* ^^^ */ u32 dlen = READ_ONCE(name->len); return prepend(p, dname, dlen) && prepend_char(p, '/'); } static int __prepend_path(const struct dentry *dentry, const struct mount *mnt, const struct path *root, struct prepend_buffer *p) { while (dentry != root->dentry || &mnt->mnt != root->mnt) { const struct dentry *parent = READ_ONCE(dentry->d_parent); if (dentry == mnt->mnt.mnt_root) { struct mount *m = READ_ONCE(mnt->mnt_parent); struct mnt_namespace *mnt_ns; if (likely(mnt != m)) { dentry = READ_ONCE(mnt->mnt_mountpoint); mnt = m; continue; } /* Global root */ mnt_ns = READ_ONCE(mnt->mnt_ns); /* open-coded is_mounted() to use local mnt_ns */ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns)) return 1; // absolute root else return 2; // detached or not attached yet } if (unlikely(dentry == parent)) /* Escaped? */ return 3; prefetch(parent); if (!prepend_name(p, &dentry->d_name)) break; dentry = parent; } return 0; } /** * prepend_path - Prepend path string to a buffer * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @p: prepend buffer which contains buffer pointer and allocated length * * The function will first try to write out the pathname without taking any * lock other than the RCU read lock to make sure that dentries won't go away. * It only checks the sequence number of the global rename_lock as any change * in the dentry's d_seq will be preceded by changes in the rename_lock * sequence number. If the sequence number had been changed, it will restart * the whole pathname back-tracing sequence again by taking the rename_lock. * In this case, there is no need to take the RCU read lock as the recursive * parent pointer references will keep the dentry chain alive as long as no * rename operation is performed. */ static int prepend_path(const struct path *path, const struct path *root, struct prepend_buffer *p) { unsigned seq, m_seq = 0; struct prepend_buffer b; int error; rcu_read_lock(); restart_mnt: read_seqbegin_or_lock(&mount_lock, &m_seq); seq = 0; rcu_read_lock(); restart: b = *p; read_seqbegin_or_lock(&rename_lock, &seq); error = __prepend_path(path->dentry, real_mount(path->mnt), root, &b); if (!(seq & 1)) rcu_read_unlock(); if (need_seqretry(&rename_lock, seq)) { seq = 1; goto restart; } done_seqretry(&rename_lock, seq); if (!(m_seq & 1)) rcu_read_unlock(); if (need_seqretry(&mount_lock, m_seq)) { m_seq = 1; goto restart_mnt; } done_seqretry(&mount_lock, m_seq); if (unlikely(error == 3)) b = *p; if (b.len == p->len) prepend_char(&b, '/'); *p = b; return error; } /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. * * Returns a pointer into the buffer or an error code if the * path was too long. * * "buflen" should be positive. * * If the path is not reachable from the supplied root, return %NULL. */ char *__d_path(const struct path *path, const struct path *root, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); prepend_char(&b, 0); if (unlikely(prepend_path(path, root, &b) > 0)) return NULL; return extract_string(&b); } char *d_absolute_path(const struct path *path, char *buf, int buflen) { struct path root = {}; DECLARE_BUFFER(b, buf, buflen); prepend_char(&b, 0); if (unlikely(prepend_path(path, &root, &b) > 1)) return ERR_PTR(-EINVAL); return extract_string(&b); } static void get_fs_root_rcu(struct fs_struct *fs, struct path *root) { unsigned seq; do { seq = read_seqbegin(&fs->seq); *root = fs->root; } while (read_seqretry(&fs->seq, seq)); } /** * d_path - return the path of a dentry * @path: path to report * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * * Returns a pointer into the buffer or an error code if the path was * too long. Note: Callers should use the returned pointer, not the passed * in buffer, to use the name! The implementation often starts at an offset * into the buffer, and may leave 0 bytes at the start. * * "buflen" should be positive. */ char *d_path(const struct path *path, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); struct path root; /* * We have various synthetic filesystems that never get mounted. On * these filesystems dentries are never used for lookup purposes, and * thus don't need to be hashed. They also don't need a name until a * user wants to identify the object in /proc/pid/fd/. The little hack * below allows us to generate a name for these objects on demand: * * Some pseudo inodes are mountable. When they are mounted * path->dentry == path->mnt->mnt_root. In that case don't call d_dname * and instead have d_path return the mounted path. */ if (path->dentry->d_op && path->dentry->d_op->d_dname && (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); rcu_read_lock(); get_fs_root_rcu(current->fs, &root); if (unlikely(d_unlinked(path->dentry))) prepend(&b, " (deleted)", 11); else prepend_char(&b, 0); prepend_path(path, &root, &b); rcu_read_unlock(); return extract_string(&b); } EXPORT_SYMBOL(d_path); /* * Helper function for dentry_operations.d_dname() members */ char *dynamic_dname(char *buffer, int buflen, const char *fmt, ...) { va_list args; char temp[64]; int sz; va_start(args, fmt); sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; va_end(args); if (sz > sizeof(temp) || sz > buflen) return ERR_PTR(-ENAMETOOLONG); buffer += buflen - sz; return memcpy(buffer, temp, sz); } char *simple_dname(struct dentry *dentry, char *buffer, int buflen) { DECLARE_BUFFER(b, buffer, buflen); /* these dentries are never renamed, so d_lock is not needed */ prepend(&b, " (deleted)", 11); prepend(&b, dentry->d_name.name, dentry->d_name.len); prepend_char(&b, '/'); return extract_string(&b); } /* * Write full pathname from the root of the filesystem into the buffer. */ static char *__dentry_path(const struct dentry *d, struct prepend_buffer *p) { const struct dentry *dentry; struct prepend_buffer b; int seq = 0; rcu_read_lock(); restart: dentry = d; b = *p; read_seqbegin_or_lock(&rename_lock, &seq); while (!IS_ROOT(dentry)) { const struct dentry *parent = dentry->d_parent; prefetch(parent); if (!prepend_name(&b, &dentry->d_name)) break; dentry = parent; } if (!(seq & 1)) rcu_read_unlock(); if (need_seqretry(&rename_lock, seq)) { seq = 1; goto restart; } done_seqretry(&rename_lock, seq); if (b.len == p->len) prepend_char(&b, '/'); return extract_string(&b); } char *dentry_path_raw(const struct dentry *dentry, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); prepend_char(&b, 0); return __dentry_path(dentry, &b); } EXPORT_SYMBOL(dentry_path_raw); char *dentry_path(const struct dentry *dentry, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); if (unlikely(d_unlinked(dentry))) prepend(&b, "//deleted", 10); else prepend_char(&b, 0); return __dentry_path(dentry, &b); } static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root, struct path *pwd) { unsigned seq; do { seq = read_seqbegin(&fs->seq); *root = fs->root; *pwd = fs->pwd; } while (read_seqretry(&fs->seq, seq)); } /* * NOTE! The user-level library version returns a * character pointer. The kernel system call just * returns the length of the buffer filled (which * includes the ending '\0' character), or a negative * error value. So libc would do something like * * char *getcwd(char * buf, size_t size) * { * int retval; * * retval = sys_getcwd(buf, size); * if (retval >= 0) * return buf; * errno = -retval; * return NULL; * } */ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) { int error; struct path pwd, root; char *page = __getname(); if (!page) return -ENOMEM; rcu_read_lock(); get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); if (unlikely(d_unlinked(pwd.dentry))) { rcu_read_unlock(); error = -ENOENT; } else { unsigned len; DECLARE_BUFFER(b, page, PATH_MAX); prepend_char(&b, 0); if (unlikely(prepend_path(&pwd, &root, &b) > 0)) prepend(&b, "(unreachable)", 13); rcu_read_unlock(); len = PATH_MAX - b.len; if (unlikely(len > PATH_MAX)) error = -ENAMETOOLONG; else if (unlikely(len > size)) error = -ERANGE; else if (copy_to_user(buf, b.buf, len)) error = -EFAULT; else error = len; } __putname(page); return error; }
4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/string.h> #include <linux/skbuff.h> #include <linux/export.h> #include <net/caif/cfpkt.h> #define PKT_PREFIX 48 #define PKT_POSTFIX 2 #define PKT_LEN_WHEN_EXTENDING 128 #define PKT_ERROR(pkt, errmsg) \ do { \ cfpkt_priv(pkt)->erronous = true; \ skb_reset_tail_pointer(&pkt->skb); \ pr_warn(errmsg); \ } while (0) /* * net/caif/ is generic and does not * understand SKB, so we do this typecast */ struct cfpkt { struct sk_buff skb; }; /* Private data inside SKB */ struct cfpkt_priv_data { struct dev_info dev_info; bool erronous; }; static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) { return (struct cfpkt_priv_data *) pkt->skb.cb; } static inline bool is_erronous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) { return &pkt->skb; } static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) { return (struct cfpkt *) skb; } struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) { struct cfpkt *pkt = skb_to_pkt(nativepkt); cfpkt_priv(pkt)->erronous = false; return pkt; } EXPORT_SYMBOL(cfpkt_fromnative); void *cfpkt_tonative(struct cfpkt *pkt) { return (void *) pkt; } EXPORT_SYMBOL(cfpkt_tonative); static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; skb = alloc_skb(len + pfx, GFP_ATOMIC); if (unlikely(skb == NULL)) return NULL; skb_reserve(skb, pfx); return skb_to_pkt(skb); } inline struct cfpkt *cfpkt_create(u16 len) { return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); } void cfpkt_destroy(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); kfree_skb(skb); } inline bool cfpkt_more(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len > 0; } int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (skb_headlen(skb) >= len) { memcpy(data, skb->data, len); return 0; } return !cfpkt_extr_head(pkt, data, len) && !cfpkt_add_head(pkt, data, len); } int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(len > skb->len)) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } if (unlikely(len > skb_headlen(skb))) { if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } } from = skb_pull(skb, len); from -= len; if (data) memcpy(data, from, len); return 0; } EXPORT_SYMBOL(cfpkt_extr_head); int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *data = dta; u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } if (unlikely(skb->data + len > skb_tail_pointer(skb))) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } from = skb_tail_pointer(skb) - len; skb_trim(skb, skb->len - len); memcpy(data, from, len); return 0; } int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) { return cfpkt_add_body(pkt, NULL, len); } int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; u16 addlen = 0; if (unlikely(is_erronous(pkt))) return -EPROTO; lastskb = skb; /* Check whether we need to add space at the tail */ if (unlikely(skb_tailroom(skb) < len)) { if (likely(len < PKT_LEN_WHEN_EXTENDING)) addlen = PKT_LEN_WHEN_EXTENDING; else addlen = len; } /* Check whether we need to change the SKB before writing to the tail */ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { /* Make sure data is writable */ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { PKT_ERROR(pkt, "cow failed\n"); return -EPROTO; } } /* All set to put the last SKB and optionally write data there. */ to = pskb_put(skb, lastskb, len); if (likely(data)) memcpy(to, data, len); return 0; } inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) { return cfpkt_add_body(pkt, &data, 1); } int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; const u8 *data = data2; int ret; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_headroom(skb) < len)) { PKT_ERROR(pkt, "no headroom\n"); return -EPROTO; } /* Make sure data is writable */ ret = skb_cow_data(skb, 0, &lastskb); if (unlikely(ret < 0)) { PKT_ERROR(pkt, "cow failed\n"); return ret; } to = skb_push(skb, len); memcpy(to, data, len); return 0; } EXPORT_SYMBOL(cfpkt_add_head); inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) { return cfpkt_add_body(pkt, data, len); } inline u16 cfpkt_getlen(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len; } int cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16, void *, u16), u16 data) { /* * Don't care about the performance hit of linearizing, * Checksum should not be used on high-speed interfaces anyway. */ if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(&pkt->skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); } int cfpkt_setlen(struct cfpkt *pkt, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (unlikely(is_erronous(pkt))) return -EPROTO; if (likely(len <= skb->len)) { if (unlikely(skb->data_len)) ___pskb_trim(skb, len); else skb_trim(skb, len); return cfpkt_getlen(pkt); } /* Need to expand SKB */ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) PKT_ERROR(pkt, "skb_pad_trail failed\n"); return cfpkt_getlen(pkt); } struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt, u16 expectlen) { struct sk_buff *dst = pkt_to_skb(dstpkt); struct sk_buff *add = pkt_to_skb(addpkt); u16 addlen = skb_headlen(add); u16 neededtailspace; struct sk_buff *tmp; u16 dstlen; u16 createlen; if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { return dstpkt; } neededtailspace = max(expectlen, addlen); if (dst->tail + neededtailspace > dst->end) { /* Create a dumplicate of 'dst' with more tail space */ struct cfpkt *tmppkt; dstlen = skb_headlen(dst); createlen = dstlen + neededtailspace; tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); if (tmppkt == NULL) return NULL; tmp = pkt_to_skb(tmppkt); skb_put_data(tmp, dst->data, dstlen); cfpkt_destroy(dstpkt); dst = tmp; } skb_put_data(dst, add->data, skb_headlen(add)); cfpkt_destroy(addpkt); return skb_to_pkt(dst); } struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) { struct sk_buff *skb2; struct sk_buff *skb = pkt_to_skb(pkt); struct cfpkt *tmppkt; u8 *split = skb->data + pos; u16 len2nd = skb_tail_pointer(skb) - split; if (unlikely(is_erronous(pkt))) return NULL; if (skb->data + pos > skb_tail_pointer(skb)) { PKT_ERROR(pkt, "trying to split beyond end of packet\n"); return NULL; } /* Create a new packet for the second part of the data */ tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, PKT_PREFIX); if (tmppkt == NULL) return NULL; skb2 = pkt_to_skb(tmppkt); if (skb2 == NULL) return NULL; skb_put_data(skb2, split, len2nd); /* Reduce the length of the original packet */ skb_trim(skb, pos); skb2->priority = skb->priority; return skb_to_pkt(skb2); } bool cfpkt_erroneous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) { return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } EXPORT_SYMBOL(cfpkt_info); void cfpkt_set_prio(struct cfpkt *pkt, int prio) { pkt_to_skb(pkt)->priority = prio; } EXPORT_SYMBOL(cfpkt_set_prio);
11 16 11 55 47 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 1999-2002 Vojtech Pavlik */ #ifndef _SERIO_H #define _SERIO_H #include <linux/cleanup.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <uapi/linux/serio.h> extern const struct bus_type serio_bus; struct serio { void *port_data; char name[32]; char phys[32]; char firmware_id[128]; bool manual_bind; struct serio_device_id id; /* Protects critical sections from port's interrupt handler */ spinlock_t lock; int (*write)(struct serio *, unsigned char); int (*open)(struct serio *); void (*close)(struct serio *); int (*start)(struct serio *); void (*stop)(struct serio *); struct serio *parent; /* Entry in parent->children list */ struct list_head child_node; struct list_head children; /* Level of nesting in serio hierarchy */ unsigned int depth; /* * serio->drv is accessed from interrupt handlers; when modifying * caller should acquire serio->drv_mutex and serio->lock. */ struct serio_driver *drv; /* Protects serio->drv so attributes can pin current driver */ struct mutex drv_mutex; struct device dev; struct list_head node; /* * For use by PS/2 layer when several ports share hardware and * may get indigestion when exposed to concurrent access (i8042). */ struct mutex *ps2_cmd_mutex; }; #define to_serio_port(d) container_of(d, struct serio, dev) struct serio_driver { const char *description; const struct serio_device_id *id_table; bool manual_bind; void (*write_wakeup)(struct serio *); irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); int (*connect)(struct serio *, struct serio_driver *drv); int (*reconnect)(struct serio *); int (*fast_reconnect)(struct serio *); void (*disconnect)(struct serio *); void (*cleanup)(struct serio *); struct device_driver driver; }; #define to_serio_driver(d) container_of_const(d, struct serio_driver, driver) int serio_open(struct serio *serio, struct serio_driver *drv); void serio_close(struct serio *serio); void serio_rescan(struct serio *serio); void serio_reconnect(struct serio *serio); irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int flags); void __serio_register_port(struct serio *serio, struct module *owner); /* use a define to avoid include chaining to get THIS_MODULE */ #define serio_register_port(serio) \ __serio_register_port(serio, THIS_MODULE) void serio_unregister_port(struct serio *serio); void serio_unregister_child_port(struct serio *serio); int __must_check __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define serio_register_driver(drv) \ __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) void serio_unregister_driver(struct serio_driver *drv); /** * module_serio_driver() - Helper macro for registering a serio driver * @__serio_driver: serio_driver struct * * Helper macro for serio drivers which do not do anything special in * module init/exit. This eliminates a lot of boilerplate. Each module * may only use this macro once, and calling it replaces module_init() * and module_exit(). */ #define module_serio_driver(__serio_driver) \ module_driver(__serio_driver, serio_register_driver, \ serio_unregister_driver) static inline int serio_write(struct serio *serio, unsigned char data) { if (serio->write) return serio->write(serio, data); else return -1; } static inline void serio_drv_write_wakeup(struct serio *serio) { if (serio->drv && serio->drv->write_wakeup) serio->drv->write_wakeup(serio); } /* * Use the following functions to manipulate serio's per-port * driver-specific data. */ static inline void *serio_get_drvdata(struct serio *serio) { return dev_get_drvdata(&serio->dev); } static inline void serio_set_drvdata(struct serio *serio, void *data) { dev_set_drvdata(&serio->dev, data); } /* * Use the following functions to protect critical sections in * driver code from port's interrupt handler */ static inline void serio_pause_rx(struct serio *serio) { spin_lock_irq(&serio->lock); } static inline void serio_continue_rx(struct serio *serio) { spin_unlock_irq(&serio->lock); } DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T)) #endif
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. */ #ifndef LINUX_DMAENGINE_H #define LINUX_DMAENGINE_H #include <linux/device.h> #include <linux/err.h> #include <linux/uio.h> #include <linux/bug.h> #include <linux/scatterlist.h> #include <linux/bitmap.h> #include <linux/types.h> #include <asm/page.h> /** * typedef dma_cookie_t - an opaque DMA cookie * * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code */ typedef s32 dma_cookie_t; #define DMA_MIN_COOKIE 1 static inline int dma_submit_error(dma_cookie_t cookie) { return cookie < 0 ? cookie : 0; } /** * enum dma_status - DMA transaction status * @DMA_COMPLETE: transaction completed * @DMA_IN_PROGRESS: transaction not yet processed * @DMA_PAUSED: transaction is paused * @DMA_ERROR: transaction failed */ enum dma_status { DMA_COMPLETE, DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, DMA_OUT_OF_ORDER, }; /** * enum dma_transaction_type - DMA transaction types/indexes * * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is * automatically set as dma devices are registered. */ enum dma_transaction_type { DMA_MEMCPY, DMA_XOR, DMA_PQ, DMA_XOR_VAL, DMA_PQ_VAL, DMA_MEMSET, DMA_MEMSET_SG, DMA_INTERRUPT, DMA_PRIVATE, DMA_ASYNC_TX, DMA_SLAVE, DMA_CYCLIC, DMA_INTERLEAVE, DMA_COMPLETION_NO_ORDER, DMA_REPEAT, DMA_LOAD_EOT, /* last transaction type for creation of the capabilities mask */ DMA_TX_TYPE_END, }; /** * enum dma_transfer_direction - dma transfer mode and direction indicator * @DMA_MEM_TO_MEM: Async/Memcpy mode * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory * @DMA_DEV_TO_DEV: Slave mode & From Device to Device */ enum dma_transfer_direction { DMA_MEM_TO_MEM, DMA_MEM_TO_DEV, DMA_DEV_TO_MEM, DMA_DEV_TO_DEV, DMA_TRANS_NONE, }; /* * Interleaved Transfer Request * ---------------------------- * A chunk is collection of contiguous bytes to be transferred. * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). * ICGs may or may not change between chunks. * A FRAME is the smallest series of contiguous {chunk,icg} pairs, * that when repeated an integral number of times, specifies the transfer. * A transfer template is specification of a Frame, the number of times * it is to be repeated and other per-transfer attributes. * * Practically, a client driver would have ready a template for each * type of transfer it is going to need during its lifetime and * set only 'src_start' and 'dst_start' before submitting the requests. * * * | Frame-1 | Frame-2 | ~ | Frame-'numf' | * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...| * * == Chunk size * ... ICG */ /** * struct data_chunk - Element of scatter-gather list that makes a frame. * @size: Number of bytes to read from source. * size_dst := fn(op, size_src), so doesn't mean much for destination. * @icg: Number of bytes to jump after last src/dst address of this * chunk and before first src/dst address for next chunk. * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. * @dst_icg: Number of bytes to jump after last dst address of this * chunk and before the first dst address for next chunk. * Ignored if dst_inc is true and dst_sgl is false. * @src_icg: Number of bytes to jump after last src address of this * chunk and before the first src address for next chunk. * Ignored if src_inc is true and src_sgl is false. */ struct data_chunk { size_t size; size_t icg; size_t dst_icg; size_t src_icg; }; /** * struct dma_interleaved_template - Template to convey DMAC the transfer pattern * and attributes. * @src_start: Bus address of source for the first chunk. * @dst_start: Bus address of destination for the first chunk. * @dir: Specifies the type of Source and Destination. * @src_inc: If the source address increments after reading from it. * @dst_inc: If the destination address increments after writing to it. * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read). * Otherwise, source is read contiguously (icg ignored). * Ignored if src_inc is false. * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write). * Otherwise, destination is filled contiguously (icg ignored). * Ignored if dst_inc is false. * @numf: Number of frames in this template. * @frame_size: Number of chunks in a frame i.e, size of sgl[]. * @sgl: Array of {chunk,icg} pairs that make up a frame. */ struct dma_interleaved_template { dma_addr_t src_start; dma_addr_t dst_start; enum dma_transfer_direction dir; bool src_inc; bool dst_inc; bool src_sgl; bool dst_sgl; size_t numf; size_t frame_size; struct data_chunk sgl[]; }; /** * struct dma_vec - DMA vector * @addr: Bus address of the start of the vector * @len: Length in bytes of the DMA vector */ struct dma_vec { dma_addr_t addr; size_t len; }; /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, * control completion, and communicate status. * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of * this transaction * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * acknowledges receipt, i.e. has a chance to establish any dependency * chains * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as * sources that were the result of a previous operation, in the case of a PQ * operation it continues the calculation with new sources * @DMA_PREP_FENCE - tell the driver that subsequent operations depend * on the result of this operation * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till * cleared or freed * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command * data and the descriptor should be in different format from normal * data descriptors. * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically * repeated when it ends until a transaction is issued on the same channel * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to * interleaved transactions and is ignored for all other transaction types. * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the * repeated transaction ends. Not setting this flag when the previously queued * transaction is marked with DMA_PREP_REPEAT will cause the new transaction * to never be processed and stay in the issued queue forever. The flag is * ignored if the previous transaction is not a repeated transaction. */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), DMA_PREP_PQ_DISABLE_P = (1 << 2), DMA_PREP_PQ_DISABLE_Q = (1 << 3), DMA_PREP_CONTINUE = (1 << 4), DMA_PREP_FENCE = (1 << 5), DMA_CTRL_REUSE = (1 << 6), DMA_PREP_CMD = (1 << 7), DMA_PREP_REPEAT = (1 << 8), DMA_PREP_LOAD_EOT = (1 << 9), }; /** * enum sum_check_bits - bit position of pq_check_flags */ enum sum_check_bits { SUM_CHECK_P = 0, SUM_CHECK_Q = 1, }; /** * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise */ enum sum_check_flags { SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), }; /** * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. * See linux/cpumask.h */ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** * enum dma_desc_metadata_mode - per descriptor metadata mode types supported * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the * client driver and it is attached (via the dmaengine_desc_attach_metadata() * helper) to the descriptor. * * Client drivers interested to use this mode can follow: * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: * 1. prepare the descriptor (dmaengine_prep_*) * construct the metadata in the client's buffer * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the * descriptor * 3. submit the transfer * - DMA_DEV_TO_MEM: * 1. prepare the descriptor (dmaengine_prep_*) * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the * descriptor * 3. submit the transfer * 4. when the transfer is completed, the metadata should be available in the * attached buffer * * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA * driver. The client driver can ask for the pointer, maximum size and the * currently used size of the metadata and can directly update or read it. * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is * provided as helper functions. * * Note: the metadata area for the descriptor is no longer valid after the * transfer has been completed (valid up to the point when the completion * callback returns if used). * * Client drivers interested to use this mode can follow: * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: * 1. prepare the descriptor (dmaengine_prep_*) * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's * metadata area * 3. update the metadata at the pointer * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount * of data the client has placed into the metadata buffer * 5. submit the transfer * - DMA_DEV_TO_MEM: * 1. prepare the descriptor (dmaengine_prep_*) * 2. submit the transfer * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the * pointer to the engine's metadata area * 4. Read out the metadata from the pointer * * Warning: the two modes are not compatible and clients must use one mode for a * descriptor. */ enum dma_desc_metadata_mode { DESC_METADATA_NONE = 0, DESC_METADATA_CLIENT = BIT(0), DESC_METADATA_ENGINE = BIT(1), }; /** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @memcpy_count: transaction counter * @bytes_transferred: byte counter */ struct dma_chan_percpu { /* stats */ unsigned long memcpy_count; unsigned long bytes_transferred; }; /** * struct dma_router - DMA router structure * @dev: pointer to the DMA router device * @route_free: function to be called when the route can be disconnected */ struct dma_router { struct device *dev; void (*route_free)(struct device *dev, void *route_data); }; /** * struct dma_chan - devices supply DMA channels, clients use them * @device: ptr to the dma device who supplies this channel, always !%NULL * @slave: ptr to the device using this channel * @cookie: last cookie value returned to client * @completed_cookie: last completed cookie for this channel * @chan_id: channel ID for sysfs * @dev: class device for sysfs * @name: backlink name for sysfs * @dbg_client_name: slave name for debugfs in format: * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx" * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client_count: how many clients are using this channel * @table_count: number of appearances in the mem-to-mem allocation table * @router: pointer to the DMA router structure * @route_data: channel specific data for the router * @private: private data for certain client-channel associations */ struct dma_chan { struct dma_device *device; struct device *slave; dma_cookie_t cookie; dma_cookie_t completed_cookie; /* sysfs */ int chan_id; struct dma_chan_dev *dev; const char *name; #ifdef CONFIG_DEBUG_FS char *dbg_client_name; #endif struct list_head device_node; struct dma_chan_percpu __percpu *local; int client_count; int table_count; /* DMA router */ struct dma_router *router; void *route_data; void *private; }; /** * struct dma_chan_dev - relate sysfs device node to backing channel device * @chan: driver channel device * @device: sysfs device * @dev_id: parent dma_device dev_id * @chan_dma_dev: The channel is using custom/different dma-mapping * compared to the parent dma_device */ struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; bool chan_dma_dev; }; /** * enum dma_slave_buswidth - defines bus width of the DMA slave * device, source or target buses */ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, DMA_SLAVE_BUSWIDTH_1_BYTE = 1, DMA_SLAVE_BUSWIDTH_2_BYTES = 2, DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, DMA_SLAVE_BUSWIDTH_16_BYTES = 16, DMA_SLAVE_BUSWIDTH_32_BYTES = 32, DMA_SLAVE_BUSWIDTH_64_BYTES = 64, DMA_SLAVE_BUSWIDTH_128_BYTES = 128, }; /** * struct dma_slave_config - dma slave channel runtime config * @direction: whether the data shall go in or out on this slave * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are * legal values. DEPRECATED, drivers should use the direction argument * to the device_prep_slave_sg and device_prep_dma_cyclic functions or * the dir field in the dma_interleaved_template structure. * @src_addr: this is the physical address where DMA slave data * should be read (RX), if the source is memory this argument is * ignored. * @dst_addr: this is the physical address where DMA slave data * should be written (TX), if the destination is memory this argument * is ignored. * @src_addr_width: this is the width in bytes of the source (RX) * register where DMA data shall be read. If the source * is memory this may be ignored depending on architecture. * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128. * @dst_addr_width: same as src_addr_width but for destination * target (TX) mutatis mutandis. * @src_maxburst: the maximum number of words (note: words, as in * units of the src_addr_width member, not bytes) that can be sent * in one burst to the device. Typically something like half the * FIFO depth on I/O peripherals so you don't overflow it. This * may or may not be applicable on memory sources. * @dst_maxburst: same as src_maxburst but for destination target * mutatis mutandis. * @src_port_window_size: The length of the register area in words the data need * to be accessed on the device side. It is only used for devices which is using * an area instead of a single register to receive the data. Typically the DMA * loops in this area in order to transfer the data. * @dst_port_window_size: same as src_port_window_size but for the destination * port. * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill * with 'true' if peripheral should be flow controller. Direction will be * selected at Runtime. * @peripheral_config: peripheral configuration for programming peripheral * for dmaengine transfer * @peripheral_size: peripheral configuration buffer size * * This struct is passed in as configuration data to a DMA engine * in order to set up a certain channel for DMA transport at runtime. * The DMA device/engine has to provide support for an additional * callback in the dma_device structure, device_config and this struct * will then be passed in as an argument to the function. * * The rationale for adding configuration information to this struct is as * follows: if it is likely that more than one DMA slave controllers in * the world will support the configuration option, then make it generic. * If not: if it is fixed so that it be sent in static from the platform * data, then prefer to do that. */ struct dma_slave_config { enum dma_transfer_direction direction; phys_addr_t src_addr; phys_addr_t dst_addr; enum dma_slave_buswidth src_addr_width; enum dma_slave_buswidth dst_addr_width; u32 src_maxburst; u32 dst_maxburst; u32 src_port_window_size; u32 dst_port_window_size; bool device_fc; void *peripheral_config; size_t peripheral_size; }; /** * enum dma_residue_granularity - Granularity of the reported transfer residue * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The * DMA channel is only able to tell whether a descriptor has been completed or * not, which means residue reporting is not supported by this channel. The * residue field of the dma_tx_state field will always be 0. * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully * completed segment of the transfer (For cyclic transfers this is after each * period). This is typically implemented by having the hardware generate an * interrupt after each transferred segment and then the drivers updates the * outstanding residue by the size of the segment. Another possibility is if * the hardware supports scatter-gather and the segment descriptor has a field * which gets set after the segment has been completed. The driver then counts * the number of segments without the flag set to compute the residue. * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred * burst. This is typically only supported if the hardware has a progress * register of some sort (E.g. a register with the current read/write address * or a register with the amount of bursts/beats/bytes that have been * transferred or still need to be transferred). */ enum dma_residue_granularity { DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, DMA_RESIDUE_GRANULARITY_SEGMENT = 1, DMA_RESIDUE_GRANULARITY_BURST = 2, }; /** * struct dma_slave_caps - expose capabilities of a slave channel only * @src_addr_widths: bit mask of src addr widths the channel supports. * Width is specified in bytes, e.g. for a channel supporting * a width of 4 the mask should have BIT(4) set. * @dst_addr_widths: bit mask of dst addr widths the channel supports * @directions: bit mask of slave directions the channel supports. * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT(<TYPE>) and same * should be checked by controller as well * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer * @max_sg_burst: max number of SG list entries executed in a single burst * DMA tansaction with no software intervention for reinitialization. * Zero value means unlimited number of entries. * @cmd_pause: true, if pause is supported (i.e. for reading residue or * for resume later) * @cmd_resume: true, if resume is supported * @cmd_terminate: true, if terminate cmd is supported * @residue_granularity: granularity of the reported transfer residue * @descriptor_reuse: if a descriptor can be reused by client and * resubmitted multiple times */ struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; u32 min_burst; u32 max_burst; u32 max_sg_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; enum dma_residue_granularity residue_granularity; bool descriptor_reuse; }; static inline const char *dma_chan_name(struct dma_chan *chan) { return dev_name(&chan->dev->device); } /** * typedef dma_filter_fn - callback filter for dma_request_channel * @chan: channel to be reviewed * @filter_param: opaque parameter passed through dma_request_channel * * When this optional parameter is specified in a call to dma_request_channel a * suitable channel is passed to this routine for further dispositioning before * being returned. Where 'suitable' indicates a non-busy channel that * satisfies the given capability mask. It returns 'true' to indicate that the * channel is suitable. */ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); enum dmaengine_tx_result { DMA_TRANS_NOERROR = 0, /* SUCCESS */ DMA_TRANS_READ_FAILED, /* Source DMA read failed */ DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */ DMA_TRANS_ABORTED, /* Op never submitted / aborted */ }; struct dmaengine_result { enum dmaengine_tx_result result; u32 residue; }; typedef void (*dma_async_tx_callback_result)(void *dma_async_param, const struct dmaengine_result *result); struct dmaengine_unmap_data { #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) u16 map_cnt; #else u8 map_cnt; #endif u8 to_cnt; u8 from_cnt; u8 bidi_cnt; struct device *dev; struct kref kref; size_t len; dma_addr_t addr[]; }; struct dma_async_tx_descriptor; struct dma_descriptor_metadata_ops { int (*attach)(struct dma_async_tx_descriptor *desc, void *data, size_t len); void *(*get_ptr)(struct dma_async_tx_descriptor *desc, size_t *payload_len, size_t *max_len); int (*set_len)(struct dma_async_tx_descriptor *desc, size_t payload_len); }; /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- * @cookie: tracking cookie for this transaction, set to -EBUSY if * this tx is sitting on a dependency list * @flags: flags to augment operation preparation, control completion, and * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation * @tx_submit: accept the descriptor, assign ordered cookie and mark the * descriptor pending. To be pushed on .issue_pending() call * @desc_free: driver's callback function to free a resusable descriptor * after completion * @callback: routine to call after this operation is complete * @callback_result: error result from a DMA transaction * @callback_param: general parameter to pass to the callback routine * @unmap: hook for generic DMA unmap data * @desc_metadata_mode: core managed metadata mode to protect mixed use of * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise * DESC_METADATA_NONE * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the * DMA driver if metadata mode is supported with the descriptor * ---async_tx api specific fields--- * @next: at completion submit this descriptor * @parent: pointer to the next level up in the dependency chain * @lock: protect the parent and next pointers */ struct dma_async_tx_descriptor { dma_cookie_t cookie; enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ dma_addr_t phys; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); int (*desc_free)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; enum dma_desc_metadata_mode desc_metadata_mode; struct dma_descriptor_metadata_ops *metadata_ops; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; spinlock_t lock; #endif }; #ifdef CONFIG_DMA_ENGINE static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, struct dmaengine_unmap_data *unmap) { kref_get(&unmap->kref); tx->unmap = unmap; } struct dmaengine_unmap_data * dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); #else static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, struct dmaengine_unmap_data *unmap) { } static inline struct dmaengine_unmap_data * dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) { return NULL; } static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) { } #endif static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) { if (!tx->unmap) return; dmaengine_unmap_put(tx->unmap); tx->unmap = NULL; } #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH static inline void txd_lock(struct dma_async_tx_descriptor *txd) { } static inline void txd_unlock(struct dma_async_tx_descriptor *txd) { } static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) { BUG(); } static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) { } static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) { } static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) { return NULL; } static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) { return NULL; } #else static inline void txd_lock(struct dma_async_tx_descriptor *txd) { spin_lock_bh(&txd->lock); } static inline void txd_unlock(struct dma_async_tx_descriptor *txd) { spin_unlock_bh(&txd->lock); } static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) { txd->next = next; next->parent = txd; } static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) { txd->parent = NULL; } static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) { txd->next = NULL; } static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) { return txd->parent; } static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) { return txd->next; } #endif /** * struct dma_tx_state - filled in to report the status of * a transfer. * @last: last completed DMA cookie * @used: last issued DMA cookie (i.e. the one in progress) * @residue: the remaining number of bytes left to transmit * on the selected transfer for states DMA_IN_PROGRESS and * DMA_PAUSED if this is implemented in the driver, else 0 * @in_flight_bytes: amount of data in bytes cached by the DMA. */ struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; u32 in_flight_bytes; }; /** * enum dmaengine_alignment - defines alignment of the DMA async tx * buffers */ enum dmaengine_alignment { DMAENGINE_ALIGN_1_BYTE = 0, DMAENGINE_ALIGN_2_BYTES = 1, DMAENGINE_ALIGN_4_BYTES = 2, DMAENGINE_ALIGN_8_BYTES = 3, DMAENGINE_ALIGN_16_BYTES = 4, DMAENGINE_ALIGN_32_BYTES = 5, DMAENGINE_ALIGN_64_BYTES = 6, DMAENGINE_ALIGN_128_BYTES = 7, DMAENGINE_ALIGN_256_BYTES = 8, }; /** * struct dma_slave_map - associates slave device and it's slave channel with * parameter to be used by a filter function * @devname: name of the device * @slave: slave channel name * @param: opaque parameter to pass to struct dma_filter.fn */ struct dma_slave_map { const char *devname; const char *slave; void *param; }; /** * struct dma_filter - information for slave device/channel to filter_fn/param * mapping * @fn: filter function callback * @mapcnt: number of slave device/channel in the map * @map: array of channel to filter mapping data */ struct dma_filter { dma_filter_fn fn; int mapcnt; const struct dma_slave_map *map; }; /** * struct dma_device - info on the entity supplying DMA services * @ref: reference is taken and put every time a channel is allocated or freed * @chancnt: how many DMA channels are supported * @privatecnt: how many DMA channels are requested by dma_request_channel * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list * @filter: information for device/slave to filter function/param mapping * @cap_mask: one or more dma_capability flags * @desc_metadata_modes: supported metadata modes by the DMA device * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability * @copy_align: alignment shift for memcpy operations * @xor_align: alignment shift for xor operations * @pq_align: alignment shift for pq operations * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @owner: owner module (automatically set based on the provided dev) * @chan_ida: unique channel ID * @src_addr_widths: bit mask of src addr widths the device supports * Width is specified in bytes, e.g. for a device supporting * a width of 4 the mask should have BIT(4) set. * @dst_addr_widths: bit mask of dst addr widths the device supports * @directions: bit mask of slave directions the device supports. * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT(<TYPE>) and same * should be checked by controller as well * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer * @max_sg_burst: max number of SG list entries executed in a single burst * DMA tansaction with no software intervention for reinitialization. * Zero value means unlimited number of entries. * @descriptor_reuse: a submitted transfer can be resubmitted after completion * @residue_granularity: granularity of the transfer residue reported * by tx_status * @device_alloc_chan_resources: allocate resources and return the * number of allocated descriptors * @device_router_config: optional callback for DMA router configuration * @device_free_chan_resources: release DMA channel's resources * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation * @device_prep_dma_xor_val: prepares a xor validation operation * @device_prep_dma_pq: prepares a pq operation * @device_prep_dma_pq_val: prepares a pqzero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer, * where the address and size of each segment is located in one entry of * the dma_vec array. * @device_prep_slave_sg: prepares a slave dma operation * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. * The function takes a buffer of size buf_len. The callback function will * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_caps: May be used to override the generic DMA slave capabilities * with per-channel specific ones * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns * 0 or an error code * @device_resume: Resumes any transfer on a channel previously * paused. Returns 0 or an error code * @device_terminate_all: Aborts all transfers on a channel. Returns 0 * or an error code * @device_synchronize: Synchronizes the termination of a transfers to the * current context. * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware * @device_release: called sometime atfer dma_async_device_unregister() is * called and there are no further references to this structure. This * must be implemented to free resources however many existing drivers * do not and are therefore not safe to unbind while in use. * @dbg_summary_show: optional routine to show contents in debugfs; default code * will be used when this is omitted, but custom code can show extra, * controller specific information. * @dbg_dev_root: the root folder in debugfs for this device */ struct dma_device { struct kref ref; unsigned int chancnt; unsigned int privatecnt; struct list_head channels; struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; enum dma_desc_metadata_mode desc_metadata_modes; unsigned short max_xor; unsigned short max_pq; enum dmaengine_alignment copy_align; enum dmaengine_alignment xor_align; enum dmaengine_alignment pq_align; enum dmaengine_alignment fill_align; #define DMA_HAS_PQ_CONTINUE (1 << 15) int dev_id; struct device *dev; struct module *owner; struct ida chan_ida; u32 src_addr_widths; u32 dst_addr_widths; u32 directions; u32 min_burst; u32 max_burst; u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *chan); int (*device_router_config)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor)( struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_pq)( struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)( struct dma_chan *chan, struct scatterlist *sg, unsigned int nents, int value, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)( struct dma_chan *chan, const struct dma_vec *vecs, size_t nents, enum dma_transfer_direction direction, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_slave_sg)( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context); struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); int (*device_terminate_all)(struct dma_chan *chan); void (*device_synchronize)(struct dma_chan *chan); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); void (*device_release)(struct dma_device *dev); /* debugfs support */ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); struct dentry *dbg_dev_root; }; static inline int dmaengine_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { if (chan->device->device_config) return chan->device->device_config(chan, config); return -ENOSYS; } static inline bool is_slave_direction(enum dma_transfer_direction direction) { return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) || (direction == DMA_DEV_TO_DEV); } static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( struct dma_chan *chan, dma_addr_t buf, size_t len, enum dma_transfer_direction dir, unsigned long flags) { struct scatterlist sg; sg_init_table(&sg, 1); sg_dma_address(&sg) = buf; sg_dma_len(&sg) = len; if (!chan || !chan->device || !chan->device->device_prep_slave_sg) return NULL; return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags, NULL); } /** * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor * @chan: The channel to be used for this descriptor * @vecs: The array of DMA vectors that should be transferred * @nents: The number of DMA vectors in the array * @dir: Specifies the direction of the data transfer * @flags: DMA engine flags */ static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec( struct dma_chan *chan, const struct dma_vec *vecs, size_t nents, enum dma_transfer_direction dir, unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec) return NULL; return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents, dir, flags); } static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_slave_sg) return NULL; return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, NULL); } #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct rio_dma_ext; static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags, struct rio_dma_ext *rio_ext) { if (!chan || !chan->device || !chan->device->device_prep_slave_sg) return NULL; return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, rio_ext); } #endif static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) return NULL; return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, period_len, dir, flags); } static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) return NULL; if (flags & DMA_PREP_REPEAT && !test_bit(DMA_REPEAT, chan->device->cap_mask.bits)) return NULL; return chan->device->device_prep_interleaved_dma(chan, xt, flags); } /** * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor. * @chan: The channel to be used for this descriptor * @dest: Address of buffer to be set * @value: Treated as a single byte value that fills the destination buffer * @len: The total size of dest * @flags: DMA engine flags */ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_dma_memset) return NULL; return chan->device->device_prep_dma_memset(chan, dest, value, len, flags); } static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy) return NULL; return chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); } static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, enum dma_desc_metadata_mode mode) { if (!chan) return false; return !!(chan->device->desc_metadata_modes & mode); } #ifdef CONFIG_DMA_ENGINE int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, void *data, size_t len); void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, size_t *payload_len, size_t *max_len); int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, size_t payload_len); #else /* CONFIG_DMA_ENGINE */ static inline int dmaengine_desc_attach_metadata( struct dma_async_tx_descriptor *desc, void *data, size_t len) { return -EINVAL; } static inline void *dmaengine_desc_get_metadata_ptr( struct dma_async_tx_descriptor *desc, size_t *payload_len, size_t *max_len) { return NULL; } static inline int dmaengine_desc_set_metadata_len( struct dma_async_tx_descriptor *desc, size_t payload_len) { return -EINVAL; } #endif /* CONFIG_DMA_ENGINE */ /** * dmaengine_terminate_all() - Terminate all active DMA transfers * @chan: The channel for which to terminate the transfers * * This function is DEPRECATED use either dmaengine_terminate_sync() or * dmaengine_terminate_async() instead. */ static inline int dmaengine_terminate_all(struct dma_chan *chan) { if (chan->device->device_terminate_all) return chan->device->device_terminate_all(chan); return -ENOSYS; } /** * dmaengine_terminate_async() - Terminate all active DMA transfers * @chan: The channel for which to terminate the transfers * * Calling this function will terminate all active and pending descriptors * that have previously been submitted to the channel. It is not guaranteed * though that the transfer for the active descriptor has stopped when the * function returns. Furthermore it is possible the complete callback of a * submitted transfer is still running when this function returns. * * dmaengine_synchronize() needs to be called before it is safe to free * any memory that is accessed by previously submitted descriptors or before * freeing any resources accessed from within the completion callback of any * previously submitted descriptors. * * This function can be called from atomic context as well as from within a * complete callback of a descriptor submitted on the same channel. * * If none of the two conditions above apply consider using * dmaengine_terminate_sync() instead. */ static inline int dmaengine_terminate_async(struct dma_chan *chan) { if (chan->device->device_terminate_all) return chan->device->device_terminate_all(chan); return -EINVAL; } /** * dmaengine_synchronize() - Synchronize DMA channel termination * @chan: The channel to synchronize * * Synchronizes to the DMA channel termination to the current context. When this * function returns it is guaranteed that all transfers for previously issued * descriptors have stopped and it is safe to free the memory associated * with them. Furthermore it is guaranteed that all complete callback functions * for a previously submitted descriptor have finished running and it is safe to * free resources accessed from within the complete callbacks. * * The behavior of this function is undefined if dma_async_issue_pending() has * been called between dmaengine_terminate_async() and this function. * * This function must only be called from non-atomic context and must not be * called from within a complete callback of a descriptor submitted on the same * channel. */ static inline void dmaengine_synchronize(struct dma_chan *chan) { might_sleep(); if (chan->device->device_synchronize) chan->device->device_synchronize(chan); } /** * dmaengine_terminate_sync() - Terminate all active DMA transfers * @chan: The channel for which to terminate the transfers * * Calling this function will terminate all active and pending transfers * that have previously been submitted to the channel. It is similar to * dmaengine_terminate_async() but guarantees that the DMA transfer has actually * stopped and that all complete callbacks have finished running when the * function returns. * * This function must only be called from non-atomic context and must not be * called from within a complete callback of a descriptor submitted on the same * channel. */ static inline int dmaengine_terminate_sync(struct dma_chan *chan) { int ret; ret = dmaengine_terminate_async(chan); if (ret) return ret; dmaengine_synchronize(chan); return 0; } static inline int dmaengine_pause(struct dma_chan *chan) { if (chan->device->device_pause) return chan->device->device_pause(chan); return -ENOSYS; } static inline int dmaengine_resume(struct dma_chan *chan) { if (chan->device->device_resume) return chan->device->device_resume(chan); return -ENOSYS; } static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *state) { return chan->device->device_tx_status(chan, cookie, state); } static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) { return desc->tx_submit(desc); } static inline bool dmaengine_check_align(enum dmaengine_alignment align, size_t off1, size_t off2, size_t len) { return !(((1 << align) - 1) & (off1 | off2 | len)); } static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, size_t off2, size_t len) { return dmaengine_check_align(dev->copy_align, off1, off2, len); } static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, size_t off2, size_t len) { return dmaengine_check_align(dev->xor_align, off1, off2, len); } static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, size_t off2, size_t len) { return dmaengine_check_align(dev->pq_align, off1, off2, len); } static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, size_t off2, size_t len) { return dmaengine_check_align(dev->fill_align, off1, off2, len); } static inline void dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) { dma->max_pq = maxpq; if (has_pq_continue) dma->max_pq |= DMA_HAS_PQ_CONTINUE; } static inline bool dmaf_continue(enum dma_ctrl_flags flags) { return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; } static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) { enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; return (flags & mask) == mask; } static inline bool dma_dev_has_pq_continue(struct dma_device *dma) { return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; } static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) { return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; } /* dma_maxpq - reduce maxpq in the face of continued operations * @dma - dma device with PQ capability * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set * * When an engine does not support native continuation we need 3 extra * source slots to reuse P and Q with the following coefficients: * 1/ {00} * P : remove P from Q', but use it as a source for P' * 2/ {01} * Q : use Q to continue Q' calculation * 3/ {00} * Q : subtract Q from P' to cancel (2) * * In the case where P is disabled we only need 1 extra source: * 1/ {01} * Q : use Q to continue Q' calculation */ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) { if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) return dma_dev_to_maxpq(dma); if (dmaf_p_disabled_continue(flags)) return dma_dev_to_maxpq(dma) - 1; if (dmaf_continue(flags)) return dma_dev_to_maxpq(dma) - 3; BUG(); } static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, size_t dir_icg) { if (inc) { if (dir_icg) return dir_icg; if (sgl) return icg; } return 0; } static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt, struct data_chunk *chunk) { return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl, chunk->icg, chunk->dst_icg); } static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt, struct data_chunk *chunk) { return dmaengine_get_icg(xt->src_inc, xt->src_sgl, chunk->icg, chunk->src_icg); } /* --- public DMA engine API --- */ #ifdef CONFIG_DMA_ENGINE void dmaengine_get(void); void dmaengine_put(void); #else static inline void dmaengine_get(void) { } static inline void dmaengine_put(void) { } #endif #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) #else #define async_dma_find_channel(type) dma_find_channel(type) #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ #else static inline void async_dmaengine_get(void) { } static inline void async_dmaengine_put(void) { } static inline struct dma_chan * async_dma_find_channel(enum dma_transaction_type type) { return NULL; } #endif /* CONFIG_ASYNC_TX_DMA */ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan); static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { tx->flags |= DMA_CTRL_ACK; } static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) { tx->flags &= ~DMA_CTRL_ACK; } static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) { return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; } #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) static inline void __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) { set_bit(tx_type, dstp->bits); } #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) static inline void __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) { clear_bit(tx_type, dstp->bits); } #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) static inline void __dma_cap_zero(dma_cap_mask_t *dstp) { bitmap_zero(dstp->bits, DMA_TX_TYPE_END); } #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) static inline int __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) { return test_bit(tx_type, srcp->bits); } #define for_each_dma_cap_mask(cap, mask) \ for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) /** * dma_async_issue_pending - flush pending transactions to HW * @chan: target DMA channel * * This allows drivers to push copies to HW in batches, * reducing MMIO writes where possible. */ static inline void dma_async_issue_pending(struct dma_chan *chan) { chan->device->device_issue_pending(chan); } /** * dma_async_is_tx_complete - poll for transaction completion * @chan: DMA channel * @cookie: transaction identifier to check status of * @last: returns last completed cookie, can be NULL * @used: returns last issued cookie, can be NULL * * If @last and @used are passed in, upon return they reflect the driver * internal state and can be used with dma_async_is_complete() to check * the status of multiple cookies without re-checking hardware state. */ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) { struct dma_tx_state state; enum dma_status status; status = chan->device->device_tx_status(chan, cookie, &state); if (last) *last = state.last; if (used) *used = state.used; return status; } /** * dma_async_is_complete - test a cookie against chan state * @cookie: transaction identifier to test status of * @last_complete: last know completed transaction * @last_used: last cookie value handed out * * dma_async_is_complete() is used in dma_async_is_tx_complete() * the test logic is separated for lightweight testing of multiple cookies */ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, dma_cookie_t last_complete, dma_cookie_t last_used) { if (last_complete <= last_used) { if ((cookie <= last_complete) || (cookie > last_used)) return DMA_COMPLETE; } else { if ((cookie <= last_complete) && (cookie > last_used)) return DMA_COMPLETE; } return DMA_IN_PROGRESS; } static inline void dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) { if (!st) return; st->last = last; st->used = used; st->residue = residue; } #ifdef CONFIG_DMA_ENGINE struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param, struct device_node *np); struct dma_chan *dma_request_chan(struct device *dev, const char *name); struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name); void dma_release_channel(struct dma_chan *chan); int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); #else static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) { return NULL; } static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) { return DMA_COMPLETE; } static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { return DMA_COMPLETE; } static inline void dma_issue_pending_all(void) { } static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param, struct device_node *np) { return NULL; } static inline struct dma_chan *dma_request_chan(struct device *dev, const char *name) { return ERR_PTR(-ENODEV); } static inline struct dma_chan *dma_request_chan_by_mask( const dma_cap_mask_t *mask) { return ERR_PTR(-ENODEV); } static inline struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name) { return ERR_PTR(-ENODEV); } static inline void dma_release_channel(struct dma_chan *chan) { } static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { return -ENXIO; } #endif static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) { struct dma_slave_caps caps; int ret; ret = dma_get_slave_caps(tx->chan, &caps); if (ret) return ret; if (!caps.descriptor_reuse) return -EPERM; tx->flags |= DMA_CTRL_REUSE; return 0; } static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) { tx->flags &= ~DMA_CTRL_REUSE; } static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) { return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE; } static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) { /* this is supported for reusable desc, so check that */ if (!dmaengine_desc_test_reuse(desc)) return -EPERM; return desc->desc_free(desc); } /* --- DMA device --- */ int dma_async_device_register(struct dma_device *device); int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); int dma_async_device_channel_register(struct dma_device *device, struct dma_chan *chan, const char *name); void dma_async_device_channel_unregister(struct dma_device *device, struct dma_chan *chan); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) /* Deprecated, please use dma_request_chan() directly */ static inline struct dma_chan * __deprecated dma_request_slave_channel(struct device *dev, const char *name) { struct dma_chan *ch = dma_request_chan(dev, name); return IS_ERR(ch) ? NULL : ch; } static inline struct dma_chan *dma_request_slave_channel_compat(const dma_cap_mask_t mask, dma_filter_fn fn, void *fn_param, struct device *dev, const char *name) { struct dma_chan *chan; chan = dma_request_chan(dev, name); if (!IS_ERR(chan)) return chan; if (!fn || !fn_param) return NULL; return dma_request_channel(mask, fn, fn_param); } static inline char * dmaengine_get_direction_text(enum dma_transfer_direction dir) { switch (dir) { case DMA_DEV_TO_MEM: return "DEV_TO_MEM"; case DMA_MEM_TO_DEV: return "MEM_TO_DEV"; case DMA_MEM_TO_MEM: return "MEM_TO_MEM"; case DMA_DEV_TO_DEV: return "DEV_TO_DEV"; default: return "invalid"; } } static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan) { if (chan->dev->chan_dma_dev) return &chan->dev->device; return chan->device->dev; } #endif /* DMAENGINE_H */
11 11 11 8 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: psopinfo - AML opcode information functions and dispatch tables * * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "acopcode.h" #include "amlcode.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psopinfo") static const u8 acpi_gbl_argument_count[] = { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 }; /******************************************************************************* * * FUNCTION: acpi_ps_get_opcode_info * * PARAMETERS: opcode - The AML opcode * * RETURN: A pointer to the info about the opcode. * * DESCRIPTION: Find AML opcode description based on the opcode. * NOTE: This procedure must ALWAYS return a valid pointer! * ******************************************************************************/ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode) { #if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT const char *opcode_name = "Unknown AML opcode"; #endif ACPI_FUNCTION_NAME(ps_get_opcode_info); /* * Detect normal 8-bit opcode or extended 16-bit opcode */ if (!(opcode & 0xFF00)) { /* Simple (8-bit) opcode: 0-255, can't index beyond table */ return (&acpi_gbl_aml_op_info [acpi_gbl_short_op_index[(u8)opcode]]); } if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) && (((u8)opcode) <= MAX_EXTENDED_OPCODE)) { /* Valid extended (16-bit) opcode */ return (&acpi_gbl_aml_op_info [acpi_gbl_long_op_index[(u8)opcode]]); } #if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT #include "asldefine.h" switch (opcode) { case AML_RAW_DATA_BYTE: opcode_name = "-Raw Data Byte-"; break; case AML_RAW_DATA_WORD: opcode_name = "-Raw Data Word-"; break; case AML_RAW_DATA_DWORD: opcode_name = "-Raw Data Dword-"; break; case AML_RAW_DATA_QWORD: opcode_name = "-Raw Data Qword-"; break; case AML_RAW_DATA_BUFFER: opcode_name = "-Raw Data Buffer-"; break; case AML_RAW_DATA_CHAIN: opcode_name = "-Raw Data Buffer Chain-"; break; case AML_PACKAGE_LENGTH: opcode_name = "-Package Length-"; break; case AML_UNASSIGNED_OPCODE: opcode_name = "-Unassigned Opcode-"; break; case AML_DEFAULT_ARG_OP: opcode_name = "-Default Arg-"; break; default: break; } /* Unknown AML opcode */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode)); #endif return (&acpi_gbl_aml_op_info[_UNK]); } /******************************************************************************* * * FUNCTION: acpi_ps_get_opcode_name * * PARAMETERS: opcode - The AML opcode * * RETURN: A pointer to the name of the opcode (ASCII String) * Note: Never returns NULL. * * DESCRIPTION: Translate an opcode into a human-readable string * ******************************************************************************/ const char *acpi_ps_get_opcode_name(u16 opcode) { #if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT) const struct acpi_opcode_info *op; op = acpi_ps_get_opcode_info(opcode); /* Always guaranteed to return a valid pointer */ return (op->name); #else return ("OpcodeName unavailable"); #endif } /******************************************************************************* * * FUNCTION: acpi_ps_get_argument_count * * PARAMETERS: op_type - Type associated with the AML opcode * * RETURN: Argument count * * DESCRIPTION: Obtain the number of expected arguments for an AML opcode * ******************************************************************************/ u8 acpi_ps_get_argument_count(u32 op_type) { if (op_type <= AML_TYPE_EXEC_6A_0T_1R) { return (acpi_gbl_argument_count[op_type]); } return (0); } /* * This table is directly indexed by the opcodes It returns * an index into the opcode table (acpi_gbl_aml_op_info) */ const u8 acpi_gbl_short_op_index[256] = { /* 0 1 2 3 4 5 6 7 */ /* 8 9 A B C D E F */ /* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK, /* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK, /* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, 0x81, _UNK, _UNK, /* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX, /* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D, /* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC, /* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, /* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK, /* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, /* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, /* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30, /* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72, /* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74, /* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A, /* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61, /* 0xA8 */ 0x62, 0x82, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK, /* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45, }; /* * This table is indexed by the second opcode of the extended opcode * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) */ const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { /* 0 1 2 3 4 5 6 7 */ /* 8 9 A B C D E F */ /* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK, /* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B, /* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, /* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK, /* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x88 */ 0x7C, };
217 12 116 67 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 #undef TRACE_SYSTEM #define TRACE_SYSTEM qdisc #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_QDISC_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> #include <linux/pkt_sched.h> #include <net/sch_generic.h> TRACE_EVENT(qdisc_dequeue, TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, int packets, struct sk_buff *skb), TP_ARGS(qdisc, txq, packets, skb), TP_STRUCT__entry( __field( struct Qdisc *, qdisc ) __field(const struct netdev_queue *, txq ) __field( int, packets ) __field( void *, skbaddr ) __field( int, ifindex ) __field( u32, handle ) __field( u32, parent ) __field( unsigned long, txq_state) ), /* skb==NULL indicate packets dequeued was 0, even when packets==1 */ TP_fast_assign( __entry->qdisc = qdisc; __entry->txq = txq; __entry->packets = skb ? packets : 0; __entry->skbaddr = skb; __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; __entry->handle = qdisc->handle; __entry->parent = qdisc->parent; __entry->txq_state = txq->state; ), TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); TRACE_EVENT(qdisc_enqueue, TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb), TP_ARGS(qdisc, txq, skb), TP_STRUCT__entry( __field(struct Qdisc *, qdisc) __field(const struct netdev_queue *, txq) __field(void *, skbaddr) __field(int, ifindex) __field(u32, handle) __field(u32, parent) ), TP_fast_assign( __entry->qdisc = qdisc; __entry->txq = txq; __entry->skbaddr = skb; __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; __entry->handle = qdisc->handle; __entry->parent = qdisc->parent; ), TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr) ); TRACE_EVENT(qdisc_reset, TP_PROTO(struct Qdisc *q), TP_ARGS(q), TP_STRUCT__entry( __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" ) __string( kind, q->ops->id ) __field( u32, parent ) __field( u32, handle ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) ); TRACE_EVENT(qdisc_destroy, TP_PROTO(struct Qdisc *q), TP_ARGS(q), TP_STRUCT__entry( __string( dev, qdisc_dev(q)->name ) __string( kind, q->ops->id ) __field( u32, parent ) __field( u32, handle ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) ); TRACE_EVENT(qdisc_create, TP_PROTO(const struct Qdisc_ops *ops, struct net_device *dev, u32 parent), TP_ARGS(ops, dev, parent), TP_STRUCT__entry( __string( dev, dev->name ) __string( kind, ops->id ) __field( u32, parent ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = parent; ), TP_printk("dev=%s kind=%s parent=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent)) ); #endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
25 25 25 20 20 20 20 20 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 // SPDX-License-Identifier: GPL-2.0-or-later /* Signature verification with an asymmetric key * * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) "SIG: "fmt #include <keys/asymmetric-subtype.h> #include <linux/export.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/keyctl.h> #include <crypto/public_key.h> #include <keys/user-type.h> #include "asymmetric_keys.h" /* * Destroy a public key signature. */ void public_key_signature_free(struct public_key_signature *sig) { int i; if (sig) { for (i = 0; i < ARRAY_SIZE(sig->auth_ids); i++) kfree(sig->auth_ids[i]); kfree(sig->s); kfree(sig->digest); kfree(sig); } } EXPORT_SYMBOL_GPL(public_key_signature_free); /** * query_asymmetric_key - Get information about an asymmetric key. * @params: Various parameters. * @info: Where to put the information. */ int query_asymmetric_key(const struct kernel_pkey_params *params, struct kernel_pkey_query *info) { const struct asymmetric_key_subtype *subtype; struct key *key = params->key; int ret; pr_devel("==>%s()\n", __func__); if (key->type != &key_type_asymmetric) return -EINVAL; subtype = asymmetric_key_subtype(key); if (!subtype || !key->payload.data[0]) return -EINVAL; if (!subtype->query) return -ENOTSUPP; ret = subtype->query(params, info); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL_GPL(query_asymmetric_key); /** * verify_signature - Initiate the use of an asymmetric key to verify a signature * @key: The asymmetric key to verify against * @sig: The signature to check * * Returns 0 if successful or else an error. */ int verify_signature(const struct key *key, const struct public_key_signature *sig) { const struct asymmetric_key_subtype *subtype; int ret; pr_devel("==>%s()\n", __func__); if (key->type != &key_type_asymmetric) return -EINVAL; subtype = asymmetric_key_subtype(key); if (!subtype || !key->payload.data[0]) return -EINVAL; if (!subtype->verify_signature) return -ENOTSUPP; ret = subtype->verify_signature(key, sig); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL_GPL(verify_signature);
8 8 8 8 1 8 1 8 8 8 1 8 19 19 19 19 19 19 19 19 19 19 28 19 11 11 11 11 28 36 36 28 36 8 36 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 // SPDX-License-Identifier: GPL-2.0 /* * Functions related to generic helpers functions */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include "blk.h" static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) { unsigned int discard_granularity = bdev_discard_granularity(bdev); sector_t granularity_aligned_sector; if (bdev_is_partition(bdev)) sector += bdev->bd_start_sect; granularity_aligned_sector = round_up(sector, discard_granularity >> SECTOR_SHIFT); /* * Make sure subsequent bios start aligned to the discard granularity if * it needs to be split. */ if (granularity_aligned_sector != sector) return granularity_aligned_sector - sector; /* * Align the bio size to the discard granularity to make splitting the bio * at discard granularity boundaries easier in the driver if needed. */ return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; } struct bio *blk_alloc_discard_bio(struct block_device *bdev, sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) { sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); struct bio *bio; if (!bio_sects) return NULL; bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); if (!bio) return NULL; bio->bi_iter.bi_sector = *sector; bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT; *sector += bio_sects; *nr_sects -= bio_sects; /* * We can loop for a long time in here if someone does full device * discards (like mkfs). Be nice and allow us to schedule out to avoid * softlocking if preempt is disabled. */ cond_resched(); return bio; } int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) { struct bio *bio; while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects, gfp_mask))) *biop = bio_chain_and_submit(*biop, bio); return 0; } EXPORT_SYMBOL(__blkdev_issue_discard); /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { struct bio *bio = NULL; struct blk_plug plug; int ret = 0; blk_start_plug(&plug); __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); if (bio) { ret = submit_bio_wait(bio); if (ret == -EOPNOTSUPP) ret = 0; bio_put(bio); } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(blkdev_issue_discard); static sector_t bio_write_zeroes_limit(struct block_device *bdev) { sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; return min(bdev_write_zeroes_sectors(bdev), (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask); } /* * There is no reliable way for the SCSI subsystem to determine whether a * device supports a WRITE SAME operation without actually performing a write * to media. As a result, write_zeroes is enabled by default and will be * disabled if a zeroing operation subsequently fails. This means that this * queue limit is likely to change at runtime. */ static void __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags, sector_t limit) { while (nr_sects) { unsigned int len = min(nr_sects, limit); struct bio *bio; if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) break; bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); bio->bi_iter.bi_sector = sector; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; bio->bi_iter.bi_size = len << SECTOR_SHIFT; *biop = bio_chain_and_submit(*biop, bio); nr_sects -= len; sector += len; cond_resched(); } } static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp, unsigned flags) { sector_t limit = bio_write_zeroes_limit(bdev); struct bio *bio = NULL; struct blk_plug plug; int ret = 0; blk_start_plug(&plug); __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags, limit); if (bio) { if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) { bio_await_chain(bio); blk_finish_plug(&plug); return -EINTR; } ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); /* * For some devices there is no non-destructive way to verify whether * WRITE ZEROES is actually supported. These will clear the capability * on an I/O error, in which case we'll turn any error into * "not supported" here. */ if (ret && !bdev_write_zeroes_sectors(bdev)) return -EOPNOTSUPP; return ret; } /* * Convert a number of 512B sectors to a number of pages. * The result is limited to a number of pages that can fit into a BIO. * Also make sure that the result is always at least 1 (page) for the cases * where nr_sects is lower than the number of sectors in a page. */ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) { sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); return min(pages, (sector_t)BIO_MAX_VECS); } static void __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned int flags) { struct folio *zero_folio = largest_zero_folio(); while (nr_sects) { unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects); struct bio *bio; if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) break; bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); bio->bi_iter.bi_sector = sector; do { unsigned int len; len = min_t(sector_t, folio_size(zero_folio), nr_sects << SECTOR_SHIFT); if (!bio_add_folio(bio, zero_folio, len, 0)) break; nr_sects -= len >> SECTOR_SHIFT; sector += len >> SECTOR_SHIFT; } while (nr_sects); *biop = bio_chain_and_submit(*biop, bio); cond_resched(); } } static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp, unsigned flags) { struct bio *bio = NULL; struct blk_plug plug; int ret = 0; if (flags & BLKDEV_ZERO_NOFALLBACK) return -EOPNOTSUPP; blk_start_plug(&plug); __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags); if (bio) { if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) { bio_await_chain(bio); blk_finish_plug(&plug); return -EINTR; } ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; } /** * __blkdev_issue_zeroout - generate number of zero filed write bios * @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @biop: pointer to anchor bio * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. * * If a device is using logical block provisioning, the underlying space will * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. * * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. */ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { sector_t limit = bio_write_zeroes_limit(bdev); if (bdev_read_only(bdev)) return -EPERM; if (limit) { __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, biop, flags, limit); } else { if (flags & BLKDEV_ZERO_NOFALLBACK) return -EOPNOTSUPP; __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, biop, flags); } return 0; } EXPORT_SYMBOL(__blkdev_issue_zeroout); /** * blkdev_issue_zeroout - zero-fill a block range * @bdev: blockdev to write * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. See __blkdev_issue_zeroout() for the * valid values for %flags. */ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) { int ret; if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1)) return -EINVAL; if (bdev_read_only(bdev)) return -EPERM; if (bdev_write_zeroes_sectors(bdev)) { ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, flags); if (ret != -EOPNOTSUPP) return ret; } return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags); } EXPORT_SYMBOL(blkdev_issue_zeroout); int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp) { sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); struct bio *bio = NULL; struct blk_plug plug; int ret = 0; /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ if (max_sectors > UINT_MAX >> SECTOR_SHIFT) max_sectors = UINT_MAX >> SECTOR_SHIFT; max_sectors &= ~bs_mask; if (max_sectors == 0) return -EOPNOTSUPP; if ((sector | nr_sects) & bs_mask) return -EINVAL; if (bdev_read_only(bdev)) return -EPERM; blk_start_plug(&plug); while (nr_sects) { unsigned int len = min_t(sector_t, nr_sects, max_sectors); bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_size = len << SECTOR_SHIFT; sector += len; nr_sects -= len; cond_resched(); } if (bio) { ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(blkdev_issue_secure_erase);
589 589 14 591 3 588 588 6 589 5 585 586 19 163 2 161 187 21 21 26 32 32 35 262 262 161 34 158 260 101 105 104 11 97 49 96 464 51 463 463 461 455 24 93 49 94 94 42 531 79 30 37 2 37 16 36 14 36 36 2 35 35 3 32 16 36 8 29 29 1 29 2 27 27 27 27 16 16 27 1 27 1 26 16 25 2 24 4 23 16 22 2 21 17 20 1 27 16 27 27 25 25 15 24 3 24 25 42 42 41 9 32 8 32 31 37 36 8 29 35 25 37 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2006 IBM Corporation * * Author: Serge Hallyn <serue@us.ibm.com> * * Jun 2006 - namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> */ #include <linux/slab.h> #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/init_task.h> #include <linux/mnt_namespace.h> #include <linux/utsname.h> #include <linux/pid_namespace.h> #include <net/net_namespace.h> #include <linux/ipc_namespace.h> #include <linux/time_namespace.h> #include <linux/fs_struct.h> #include <linux/proc_fs.h> #include <linux/proc_ns.h> #include <linux/file.h> #include <linux/syscalls.h> #include <linux/cgroup.h> #include <linux/perf_event.h> #include <linux/nstree.h> static struct kmem_cache *nsproxy_cachep; struct nsproxy init_nsproxy = { .count = REFCOUNT_INIT(1), .uts_ns = &init_uts_ns, #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) .ipc_ns = &init_ipc_ns, #endif .mnt_ns = NULL, .pid_ns_for_children = &init_pid_ns, #ifdef CONFIG_NET .net_ns = &init_net, #endif #ifdef CONFIG_CGROUPS .cgroup_ns = &init_cgroup_ns, #endif #ifdef CONFIG_TIME_NS .time_ns = &init_time_ns, .time_ns_for_children = &init_time_ns, #endif }; static inline struct nsproxy *create_nsproxy(void) { struct nsproxy *nsproxy; nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL); if (nsproxy) refcount_set(&nsproxy->count, 1); return nsproxy; } static inline void nsproxy_free(struct nsproxy *ns) { put_mnt_ns(ns->mnt_ns); put_uts_ns(ns->uts_ns); put_ipc_ns(ns->ipc_ns); put_pid_ns(ns->pid_ns_for_children); put_time_ns(ns->time_ns); put_time_ns(ns->time_ns_for_children); put_cgroup_ns(ns->cgroup_ns); put_net(ns->net_ns); kmem_cache_free(nsproxy_cachep, ns); } void deactivate_nsproxy(struct nsproxy *ns) { nsproxy_ns_active_put(ns); nsproxy_free(ns); } /* * Create new nsproxy and all of its the associated namespaces. * Return the newly created nsproxy. Do not attach this to the task, * leave it to the caller to do proper locking and attach it to task. */ static struct nsproxy *create_new_namespaces(u64 flags, struct task_struct *tsk, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct nsproxy *new_nsp; int err; new_nsp = create_nsproxy(); if (!new_nsp) return ERR_PTR(-ENOMEM); new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs); if (IS_ERR(new_nsp->mnt_ns)) { err = PTR_ERR(new_nsp->mnt_ns); goto out_ns; } new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns); if (IS_ERR(new_nsp->uts_ns)) { err = PTR_ERR(new_nsp->uts_ns); goto out_uts; } new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns); if (IS_ERR(new_nsp->ipc_ns)) { err = PTR_ERR(new_nsp->ipc_ns); goto out_ipc; } new_nsp->pid_ns_for_children = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children); if (IS_ERR(new_nsp->pid_ns_for_children)) { err = PTR_ERR(new_nsp->pid_ns_for_children); goto out_pid; } new_nsp->cgroup_ns = copy_cgroup_ns(flags, user_ns, tsk->nsproxy->cgroup_ns); if (IS_ERR(new_nsp->cgroup_ns)) { err = PTR_ERR(new_nsp->cgroup_ns); goto out_cgroup; } new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns); if (IS_ERR(new_nsp->net_ns)) { err = PTR_ERR(new_nsp->net_ns); goto out_net; } new_nsp->time_ns_for_children = copy_time_ns(flags, user_ns, tsk->nsproxy->time_ns_for_children); if (IS_ERR(new_nsp->time_ns_for_children)) { err = PTR_ERR(new_nsp->time_ns_for_children); goto out_time; } new_nsp->time_ns = get_time_ns(tsk->nsproxy->time_ns); return new_nsp; out_time: put_net(new_nsp->net_ns); out_net: put_cgroup_ns(new_nsp->cgroup_ns); out_cgroup: put_pid_ns(new_nsp->pid_ns_for_children); out_pid: put_ipc_ns(new_nsp->ipc_ns); out_ipc: put_uts_ns(new_nsp->uts_ns); out_uts: put_mnt_ns(new_nsp->mnt_ns); out_ns: kmem_cache_free(nsproxy_cachep, new_nsp); return ERR_PTR(err); } /* * called from clone. This now handles copy for nsproxy and all * namespaces therein. */ int copy_namespaces(u64 flags, struct task_struct *tsk) { struct nsproxy *old_ns = tsk->nsproxy; struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns); struct nsproxy *new_ns; if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWPID | CLONE_NEWNET | CLONE_NEWCGROUP | CLONE_NEWTIME)))) { if ((flags & CLONE_VM) || likely(old_ns->time_ns_for_children == old_ns->time_ns)) { get_nsproxy(old_ns); return 0; } } else if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; /* * CLONE_NEWIPC must detach from the undolist: after switching * to a new ipc namespace, the semaphore arrays from the old * namespace are unreachable. In clone parlance, CLONE_SYSVSEM * means share undolist with parent, so we must forbid using * it along with CLONE_NEWIPC. */ if ((flags & (CLONE_NEWIPC | CLONE_SYSVSEM)) == (CLONE_NEWIPC | CLONE_SYSVSEM)) return -EINVAL; new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs); if (IS_ERR(new_ns)) return PTR_ERR(new_ns); if ((flags & CLONE_VM) == 0) timens_on_fork(new_ns, tsk); nsproxy_ns_active_get(new_ns); tsk->nsproxy = new_ns; return 0; } /* * Called from unshare. Unshare all the namespaces part of nsproxy. * On success, returns the new nsproxy. */ int unshare_nsproxy_namespaces(unsigned long unshare_flags, struct nsproxy **new_nsp, struct cred *new_cred, struct fs_struct *new_fs) { struct user_namespace *user_ns; int err = 0; if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWNET | CLONE_NEWPID | CLONE_NEWCGROUP | CLONE_NEWTIME))) return 0; user_ns = new_cred ? new_cred->user_ns : current_user_ns(); if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; *new_nsp = create_new_namespaces(unshare_flags, current, user_ns, new_fs ? new_fs : current->fs); if (IS_ERR(*new_nsp)) { err = PTR_ERR(*new_nsp); goto out; } out: return err; } void switch_task_namespaces(struct task_struct *p, struct nsproxy *new) { struct nsproxy *ns; might_sleep(); if (new) nsproxy_ns_active_get(new); task_lock(p); ns = p->nsproxy; p->nsproxy = new; task_unlock(p); if (ns) put_nsproxy(ns); } void exit_nsproxy_namespaces(struct task_struct *p) { switch_task_namespaces(p, NULL); } void switch_cred_namespaces(const struct cred *old, const struct cred *new) { ns_ref_active_get(new->user_ns); ns_ref_active_put(old->user_ns); } void get_cred_namespaces(struct task_struct *tsk) { ns_ref_active_get(tsk->real_cred->user_ns); } void exit_cred_namespaces(struct task_struct *tsk) { ns_ref_active_put(tsk->real_cred->user_ns); } int exec_task_namespaces(void) { struct task_struct *tsk = current; struct nsproxy *new; if (tsk->nsproxy->time_ns_for_children == tsk->nsproxy->time_ns) return 0; new = create_new_namespaces(0, tsk, current_user_ns(), tsk->fs); if (IS_ERR(new)) return PTR_ERR(new); timens_on_fork(new, tsk); switch_task_namespaces(tsk, new); return 0; } static int check_setns_flags(unsigned long flags) { if (!flags || (flags & ~(CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWNET | CLONE_NEWTIME | CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWCGROUP))) return -EINVAL; #ifndef CONFIG_USER_NS if (flags & CLONE_NEWUSER) return -EINVAL; #endif #ifndef CONFIG_PID_NS if (flags & CLONE_NEWPID) return -EINVAL; #endif #ifndef CONFIG_UTS_NS if (flags & CLONE_NEWUTS) return -EINVAL; #endif #ifndef CONFIG_IPC_NS if (flags & CLONE_NEWIPC) return -EINVAL; #endif #ifndef CONFIG_CGROUPS if (flags & CLONE_NEWCGROUP) return -EINVAL; #endif #ifndef CONFIG_NET_NS if (flags & CLONE_NEWNET) return -EINVAL; #endif #ifndef CONFIG_TIME_NS if (flags & CLONE_NEWTIME) return -EINVAL; #endif return 0; } static void put_nsset(struct nsset *nsset) { unsigned flags = nsset->flags; if (flags & CLONE_NEWUSER) put_cred(nsset_cred(nsset)); /* * We only created a temporary copy if we attached to more than just * the mount namespace. */ if (nsset->fs && (flags & CLONE_NEWNS) && (flags & ~CLONE_NEWNS)) free_fs_struct(nsset->fs); if (nsset->nsproxy) nsproxy_free(nsset->nsproxy); } static int prepare_nsset(unsigned flags, struct nsset *nsset) { struct task_struct *me = current; nsset->nsproxy = create_new_namespaces(0, me, current_user_ns(), me->fs); if (IS_ERR(nsset->nsproxy)) return PTR_ERR(nsset->nsproxy); if (flags & CLONE_NEWUSER) nsset->cred = prepare_creds(); else nsset->cred = current_cred(); if (!nsset->cred) goto out; /* Only create a temporary copy of fs_struct if we really need to. */ if (flags == CLONE_NEWNS) { nsset->fs = me->fs; } else if (flags & CLONE_NEWNS) { nsset->fs = copy_fs_struct(me->fs); if (!nsset->fs) goto out; } nsset->flags = flags; return 0; out: put_nsset(nsset); return -ENOMEM; } static inline int validate_ns(struct nsset *nsset, struct ns_common *ns) { return ns->ops->install(nsset, ns); } /* * This is the inverse operation to unshare(). * Ordering is equivalent to the standard ordering used everywhere else * during unshare and process creation. The switch to the new set of * namespaces occurs at the point of no return after installation of * all requested namespaces was successful in commit_nsset(). */ static int validate_nsset(struct nsset *nsset, struct pid *pid) { int ret = 0; unsigned flags = nsset->flags; struct user_namespace *user_ns = NULL; struct pid_namespace *pid_ns = NULL; struct nsproxy *nsp; struct task_struct *tsk; /* Take a "snapshot" of the target task's namespaces. */ rcu_read_lock(); tsk = pid_task(pid, PIDTYPE_PID); if (!tsk) { rcu_read_unlock(); return -ESRCH; } if (!ptrace_may_access(tsk, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); return -EPERM; } task_lock(tsk); nsp = tsk->nsproxy; if (nsp) get_nsproxy(nsp); task_unlock(tsk); if (!nsp) { rcu_read_unlock(); return -ESRCH; } #ifdef CONFIG_PID_NS if (flags & CLONE_NEWPID) { pid_ns = task_active_pid_ns(tsk); if (unlikely(!pid_ns)) { rcu_read_unlock(); ret = -ESRCH; goto out; } get_pid_ns(pid_ns); } #endif #ifdef CONFIG_USER_NS if (flags & CLONE_NEWUSER) user_ns = get_user_ns(__task_cred(tsk)->user_ns); #endif rcu_read_unlock(); /* * Install requested namespaces. The caller will have * verified earlier that the requested namespaces are * supported on this kernel. We don't report errors here * if a namespace is requested that isn't supported. */ #ifdef CONFIG_USER_NS if (flags & CLONE_NEWUSER) { ret = validate_ns(nsset, &user_ns->ns); if (ret) goto out; } #endif if (flags & CLONE_NEWNS) { ret = validate_ns(nsset, from_mnt_ns(nsp->mnt_ns)); if (ret) goto out; } #ifdef CONFIG_UTS_NS if (flags & CLONE_NEWUTS) { ret = validate_ns(nsset, &nsp->uts_ns->ns); if (ret) goto out; } #endif #ifdef CONFIG_IPC_NS if (flags & CLONE_NEWIPC) { ret = validate_ns(nsset, &nsp->ipc_ns->ns); if (ret) goto out; } #endif #ifdef CONFIG_PID_NS if (flags & CLONE_NEWPID) { ret = validate_ns(nsset, &pid_ns->ns); if (ret) goto out; } #endif #ifdef CONFIG_CGROUPS if (flags & CLONE_NEWCGROUP) { ret = validate_ns(nsset, &nsp->cgroup_ns->ns); if (ret) goto out; } #endif #ifdef CONFIG_NET_NS if (flags & CLONE_NEWNET) { ret = validate_ns(nsset, &nsp->net_ns->ns); if (ret) goto out; } #endif #ifdef CONFIG_TIME_NS if (flags & CLONE_NEWTIME) { ret = validate_ns(nsset, &nsp->time_ns->ns); if (ret) goto out; } #endif out: if (pid_ns) put_pid_ns(pid_ns); if (nsp) put_nsproxy(nsp); put_user_ns(user_ns); return ret; } /* * This is the point of no return. There are just a few namespaces * that do some actual work here and it's sufficiently minimal that * a separate ns_common operation seems unnecessary for now. * Unshare is doing the same thing. If we'll end up needing to do * more in a given namespace or a helper here is ultimately not * exported anymore a simple commit handler for each namespace * should be added to ns_common. */ static void commit_nsset(struct nsset *nsset) { unsigned flags = nsset->flags; struct task_struct *me = current; #ifdef CONFIG_USER_NS if (flags & CLONE_NEWUSER) { /* transfer ownership */ commit_creds(nsset_cred(nsset)); nsset->cred = NULL; } #endif /* We only need to commit if we have used a temporary fs_struct. */ if ((flags & CLONE_NEWNS) && (flags & ~CLONE_NEWNS)) { set_fs_root(me->fs, &nsset->fs->root); set_fs_pwd(me->fs, &nsset->fs->pwd); } #ifdef CONFIG_IPC_NS if (flags & CLONE_NEWIPC) exit_sem(me); #endif #ifdef CONFIG_TIME_NS if (flags & CLONE_NEWTIME) timens_commit(me, nsset->nsproxy->time_ns); #endif /* transfer ownership */ switch_task_namespaces(me, nsset->nsproxy); nsset->nsproxy = NULL; } SYSCALL_DEFINE2(setns, int, fd, int, flags) { CLASS(fd, f)(fd); struct ns_common *ns = NULL; struct nsset nsset = {}; int err = 0; if (fd_empty(f)) return -EBADF; if (proc_ns_file(fd_file(f))) { ns = get_proc_ns(file_inode(fd_file(f))); if (flags && (ns->ns_type != flags)) err = -EINVAL; flags = ns->ns_type; } else if (!IS_ERR(pidfd_pid(fd_file(f)))) { err = check_setns_flags(flags); } else { err = -EINVAL; } if (err) goto out; err = prepare_nsset(flags, &nsset); if (err) goto out; if (proc_ns_file(fd_file(f))) err = validate_ns(&nsset, ns); else err = validate_nsset(&nsset, pidfd_pid(fd_file(f))); if (!err) { commit_nsset(&nsset); perf_event_namespaces(current); } put_nsset(&nsset); out: return err; } int __init nsproxy_cache_init(void) { nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC|SLAB_ACCOUNT); return 0; }
23 4 4 4 3 1 3 3 1 7 2 9 9 9 9 9 23 22 23 15 13 7 5 23 23 22 16 23 6 2 4 4 4 2 2 2 1 1 1 1 6 2 1 1 5 2 4 3 4 5 5 5 13 9 12 12 11 4 10 11 2 4 10 10 10 9 2 7 1 6 9 9 2 7 10 1 1 1 1 7 6 6 6 6 3 6 6 5 2 1 1 6 5 5 4 6 3 10 9 9 9 9 9 9 8 8 8 1 1 8 7 8 8 8 1 8 7 8 8 7 7 7 7 7 4 3 2 4 9 9 5 5 1 5 34 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 // SPDX-License-Identifier: GPL-2.0 /* * Provide access to virtual console memory. * /dev/vcs: the screen as it is being viewed right now (possibly scrolled) * /dev/vcsN: the screen of /dev/ttyN (1 <= N <= 63) * [minor: N] * * /dev/vcsaN: idem, but including attributes, and prefixed with * the 4 bytes lines,columns,x,y (as screendump used to give). * Attribute/character pair is in native endianity. * [minor: N+128] * * /dev/vcsuN: similar to /dev/vcsaN but using 4-byte unicode values * instead of 1-byte screen glyph values. * [minor: N+64] * * /dev/vcsuaN: same idea as /dev/vcsaN for unicode (not yet implemented). * * This replaces screendump and part of selection, so that the system * administrator can control access using file system permissions. * * aeb@cwi.nl - efter Friedas begravelse - 950211 * * machek@k332.feld.cvut.cz - modified not to send characters to wrong console * - fixed some fatal off-by-one bugs (0-- no longer == -1 -> looping and looping and looping...) * - making it shorter - scr_readw are macros which expand in PRETTY long code */ #include <linux/kernel.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/tty.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/kbd_kern.h> #include <linux/console.h> #include <linux/device.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #include <linux/unaligned.h> #define HEADER_SIZE 4u #define CON_BUF_SIZE (IS_ENABLED(CONFIG_BASE_SMALL) ? 256 : PAGE_SIZE) DEFINE_FREE(free_page_ptr, void *, if (_T) free_page((unsigned long)_T)); /* * Our minor space: * * 0 ... 63 glyph mode without attributes * 64 ... 127 unicode mode without attributes * 128 ... 191 glyph mode with attributes * 192 ... 255 unused (reserved for unicode with attributes) * * This relies on MAX_NR_CONSOLES being <= 63, meaning 63 actual consoles * with minors 0, 64, 128 and 192 being proxies for the foreground console. */ #if MAX_NR_CONSOLES > 63 #warning "/dev/vcs* devices may not accommodate more than 63 consoles" #endif #define console(inode) (iminor(inode) & 63) #define use_unicode(inode) (iminor(inode) & 64) #define use_attributes(inode) (iminor(inode) & 128) struct vcs_poll_data { struct notifier_block notifier; unsigned int cons_num; int event; wait_queue_head_t waitq; struct fasync_struct *fasync; }; static int vcs_notifier(struct notifier_block *nb, unsigned long code, void *_param) { struct vt_notifier_param *param = _param; struct vc_data *vc = param->vc; struct vcs_poll_data *poll = container_of(nb, struct vcs_poll_data, notifier); int currcons = poll->cons_num; int fa_band; switch (code) { case VT_UPDATE: fa_band = POLL_PRI; break; case VT_DEALLOCATE: fa_band = POLL_HUP; break; default: return NOTIFY_DONE; } if (currcons == 0) currcons = fg_console; else currcons--; if (currcons != vc->vc_num) return NOTIFY_DONE; poll->event = code; wake_up_interruptible(&poll->waitq); kill_fasync(&poll->fasync, SIGIO, fa_band); return NOTIFY_OK; } static void vcs_poll_data_free(struct vcs_poll_data *poll) { unregister_vt_notifier(&poll->notifier); kfree(poll); } static struct vcs_poll_data * vcs_poll_data_get(struct file *file) { struct vcs_poll_data *poll = file->private_data, *kill = NULL; if (poll) return poll; poll = kzalloc(sizeof(*poll), GFP_KERNEL); if (!poll) return NULL; poll->cons_num = console(file_inode(file)); init_waitqueue_head(&poll->waitq); poll->notifier.notifier_call = vcs_notifier; /* * In order not to lose any update event, we must pretend one might * have occurred before we have a chance to register our notifier. * This is also how user space has come to detect which kernels * support POLLPRI on /dev/vcs* devices i.e. using poll() with * POLLPRI and a zero timeout. */ poll->event = VT_UPDATE; if (register_vt_notifier(&poll->notifier) != 0) { kfree(poll); return NULL; } /* * This code may be called either through ->poll() or ->fasync(). * If we have two threads using the same file descriptor, they could * both enter this function, both notice that the structure hasn't * been allocated yet and go ahead allocating it in parallel, but * only one of them must survive and be shared otherwise we'd leak * memory with a dangling notifier callback. */ spin_lock(&file->f_lock); if (!file->private_data) { file->private_data = poll; } else { /* someone else raced ahead of us */ kill = poll; poll = file->private_data; } spin_unlock(&file->f_lock); if (kill) vcs_poll_data_free(kill); return poll; } /** * vcs_vc - return VC for @inode * @inode: inode for which to return a VC * @viewed: returns whether this console is currently foreground (viewed) * * Must be called with console_lock. */ static struct vc_data *vcs_vc(struct inode *inode, bool *viewed) { unsigned int currcons = console(inode); WARN_CONSOLE_UNLOCKED(); if (currcons == 0) { currcons = fg_console; if (viewed) *viewed = true; } else { currcons--; if (viewed) *viewed = false; } return vc_cons[currcons].d; } /** * vcs_size - return size for a VC in @vc * @vc: which VC * @attr: does it use attributes? * @unicode: is it unicode? * * Must be called with console_lock. */ static int vcs_size(const struct vc_data *vc, bool attr, bool unicode) { int size; WARN_CONSOLE_UNLOCKED(); size = vc->vc_rows * vc->vc_cols; if (attr) { if (unicode) return -EOPNOTSUPP; size = 2 * size + HEADER_SIZE; } else if (unicode) size *= 4; return size; } static loff_t vcs_lseek(struct file *file, loff_t offset, int orig) { struct inode *inode = file_inode(file); struct vc_data *vc; int size; scoped_guard(console_lock) { vc = vcs_vc(inode, NULL); if (!vc) return -ENXIO; size = vcs_size(vc, use_attributes(inode), use_unicode(inode)); } if (size < 0) return size; return fixed_size_llseek(file, offset, orig, size); } static int vcs_read_buf_uni(struct vc_data *vc, char *con_buf, unsigned int pos, unsigned int count, bool viewed) { unsigned int nr, row, col, maxcol = vc->vc_cols; int ret; ret = vc_uniscr_check(vc); if (ret) return ret; pos /= 4; row = pos / maxcol; col = pos % maxcol; nr = maxcol - col; do { if (nr > count / 4) nr = count / 4; vc_uniscr_copy_line(vc, con_buf, viewed, row, col, nr); con_buf += nr * 4; count -= nr * 4; row++; col = 0; nr = maxcol; } while (count); return 0; } static void vcs_read_buf_noattr(const struct vc_data *vc, char *con_buf, unsigned int pos, unsigned int count, bool viewed) { u16 *org; unsigned int col, maxcol = vc->vc_cols; org = screen_pos(vc, pos, viewed); col = pos % maxcol; pos += maxcol - col; while (count-- > 0) { *con_buf++ = (vcs_scr_readw(vc, org++) & 0xff); if (++col == maxcol) { org = screen_pos(vc, pos, viewed); col = 0; pos += maxcol; } } } static unsigned int vcs_read_buf(const struct vc_data *vc, char *con_buf, unsigned int pos, unsigned int count, bool viewed, unsigned int *skip) { u16 *org, *con_buf16; unsigned int col, maxcol = vc->vc_cols; unsigned int filled = count; if (pos < HEADER_SIZE) { /* clamp header values if they don't fit */ con_buf[0] = min(vc->vc_rows, 0xFFu); con_buf[1] = min(vc->vc_cols, 0xFFu); getconsxy(vc, con_buf + 2); *skip += pos; count += pos; if (count > CON_BUF_SIZE) { count = CON_BUF_SIZE; filled = count - pos; } /* Advance state pointers and move on. */ count -= min(HEADER_SIZE, count); pos = HEADER_SIZE; con_buf += HEADER_SIZE; /* If count >= 0, then pos is even... */ } else if (pos & 1) { /* * Skip first byte for output if start address is odd. Update * region sizes up/down depending on free space in buffer. */ (*skip)++; if (count < CON_BUF_SIZE) count++; else filled--; } if (!count) return filled; pos -= HEADER_SIZE; pos /= 2; col = pos % maxcol; org = screen_pos(vc, pos, viewed); pos += maxcol - col; /* * Buffer has even length, so we can always copy character + attribute. * We do not copy last byte to userspace if count is odd. */ count = (count + 1) / 2; con_buf16 = (u16 *)con_buf; while (count) { *con_buf16++ = vcs_scr_readw(vc, org++); count--; if (++col == maxcol) { org = screen_pos(vc, pos, viewed); col = 0; pos += maxcol; } } return filled; } static ssize_t vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(file); struct vc_data *vc; struct vcs_poll_data *poll; unsigned int read; ssize_t ret; loff_t pos; bool viewed, attr, uni_mode; char *con_buf __free(free_page_ptr) = (char *)__get_free_page(GFP_KERNEL); if (!con_buf) return -ENOMEM; pos = *ppos; /* Select the proper current console and verify * sanity of the situation under the console lock. */ guard(console_lock)(); uni_mode = use_unicode(inode); attr = use_attributes(inode); if (pos < 0) return -EINVAL; /* we enforce 32-bit alignment for pos and count in unicode mode */ if (uni_mode && (pos | count) & 3) return -EINVAL; poll = file->private_data; if (count && poll) poll->event = 0; read = 0; ret = 0; while (count) { unsigned int this_round, skip = 0; int size; vc = vcs_vc(inode, &viewed); if (!vc) { ret = -ENXIO; break; } /* Check whether we are above size each round, * as copy_to_user at the end of this loop * could sleep. */ size = vcs_size(vc, attr, uni_mode); if (size < 0) { ret = size; break; } if (pos >= size) break; if (count > size - pos) count = size - pos; this_round = count; if (this_round > CON_BUF_SIZE) this_round = CON_BUF_SIZE; /* Perform the whole read into the local con_buf. * Then we can drop the console spinlock and safely * attempt to move it to userspace. */ if (uni_mode) { ret = vcs_read_buf_uni(vc, con_buf, pos, this_round, viewed); if (ret) break; } else if (!attr) { vcs_read_buf_noattr(vc, con_buf, pos, this_round, viewed); } else { this_round = vcs_read_buf(vc, con_buf, pos, this_round, viewed, &skip); } /* Finally, release the console semaphore while we push * all the data to userspace from our temporary buffer. * * AKPM: Even though it's a semaphore, we should drop it because * the pagefault handling code may want to call printk(). */ console_unlock(); ret = copy_to_user(buf, con_buf + skip, this_round); console_lock(); if (ret) { read += this_round - ret; ret = -EFAULT; break; } buf += this_round; pos += this_round; read += this_round; count -= this_round; } *ppos += read; if (read) return read; return ret; } static u16 *vcs_write_buf_noattr(struct vc_data *vc, const char *con_buf, unsigned int pos, unsigned int count, bool viewed, u16 **org0) { u16 *org; unsigned int col, maxcol = vc->vc_cols; *org0 = org = screen_pos(vc, pos, viewed); col = pos % maxcol; pos += maxcol - col; while (count > 0) { unsigned char c = *con_buf++; count--; vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff00) | c, org); org++; if (++col == maxcol) { org = screen_pos(vc, pos, viewed); col = 0; pos += maxcol; } } return org; } /* * Compilers (gcc 10) are unable to optimize the swap in cpu_to_le16. So do it * the poor man way. */ static inline u16 vc_compile_le16(u8 hi, u8 lo) { #ifdef __BIG_ENDIAN return (lo << 8u) | hi; #else return (hi << 8u) | lo; #endif } static u16 *vcs_write_buf(struct vc_data *vc, const char *con_buf, unsigned int pos, unsigned int count, bool viewed, u16 **org0) { u16 *org; unsigned int col, maxcol = vc->vc_cols; unsigned char c; /* header */ if (pos < HEADER_SIZE) { char header[HEADER_SIZE]; getconsxy(vc, header + 2); while (pos < HEADER_SIZE && count > 0) { count--; header[pos++] = *con_buf++; } if (!viewed) putconsxy(vc, header + 2); } if (!count) return NULL; pos -= HEADER_SIZE; col = (pos/2) % maxcol; *org0 = org = screen_pos(vc, pos/2, viewed); /* odd pos -- the first single character */ if (pos & 1) { count--; c = *con_buf++; vcs_scr_writew(vc, vc_compile_le16(c, vcs_scr_readw(vc, org)), org); org++; pos++; if (++col == maxcol) { org = screen_pos(vc, pos/2, viewed); col = 0; } } pos /= 2; pos += maxcol - col; /* even pos -- handle attr+character pairs */ while (count > 1) { unsigned short w; w = get_unaligned(((unsigned short *)con_buf)); vcs_scr_writew(vc, w, org++); con_buf += 2; count -= 2; if (++col == maxcol) { org = screen_pos(vc, pos, viewed); col = 0; pos += maxcol; } } if (!count) return org; /* odd pos -- the remaining character */ c = *con_buf++; vcs_scr_writew(vc, vc_compile_le16(vcs_scr_readw(vc, org) >> 8, c), org); return org; } static ssize_t vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(file); struct vc_data *vc; u16 *org0, *org; unsigned int written; int size; ssize_t ret; loff_t pos; bool viewed, attr; if (use_unicode(inode)) return -EOPNOTSUPP; char *con_buf __free(free_page_ptr) = (char *)__get_free_page(GFP_KERNEL); if (!con_buf) return -ENOMEM; pos = *ppos; /* Select the proper current console and verify * sanity of the situation under the console lock. */ guard(console_lock)(); attr = use_attributes(inode); vc = vcs_vc(inode, &viewed); if (!vc) return -ENXIO; size = vcs_size(vc, attr, false); if (size < 0) return size; if (pos < 0 || pos > size) return -EINVAL; if (count > size - pos) count = size - pos; written = 0; while (count) { unsigned int this_round = count; if (this_round > CON_BUF_SIZE) this_round = CON_BUF_SIZE; /* Temporarily drop the console lock so that we can read * in the write data from userspace safely. */ console_unlock(); ret = copy_from_user(con_buf, buf, this_round); console_lock(); if (ret) { this_round -= ret; if (!this_round) { /* Abort loop if no data were copied. Otherwise * fail with -EFAULT. */ if (written) break; return -EFAULT; } } /* The vc might have been freed or vcs_size might have changed * while we slept to grab the user buffer, so recheck. * Return data written up to now on failure. */ vc = vcs_vc(inode, &viewed); if (!vc) { if (written) break; return -ENXIO; } size = vcs_size(vc, attr, false); if (size < 0) { if (written) break; return size; } if (pos >= size) break; if (this_round > size - pos) this_round = size - pos; /* OK, now actually push the write to the console * under the lock using the local kernel buffer. */ if (attr) org = vcs_write_buf(vc, con_buf, pos, this_round, viewed, &org0); else org = vcs_write_buf_noattr(vc, con_buf, pos, this_round, viewed, &org0); count -= this_round; written += this_round; buf += this_round; pos += this_round; if (org) update_region(vc, (unsigned long)(org0), org - org0); } *ppos += written; ret = written; if (written) vcs_scr_updated(vc); return ret; } static __poll_t vcs_poll(struct file *file, poll_table *wait) { struct vcs_poll_data *poll = vcs_poll_data_get(file); __poll_t ret = DEFAULT_POLLMASK|EPOLLERR; if (poll) { poll_wait(file, &poll->waitq, wait); switch (poll->event) { case VT_UPDATE: ret = DEFAULT_POLLMASK|EPOLLPRI; break; case VT_DEALLOCATE: ret = DEFAULT_POLLMASK|EPOLLHUP|EPOLLERR; break; case 0: ret = DEFAULT_POLLMASK; break; } } return ret; } static int vcs_fasync(int fd, struct file *file, int on) { struct vcs_poll_data *poll = file->private_data; if (!poll) { /* don't allocate anything if all we want is disable fasync */ if (!on) return 0; poll = vcs_poll_data_get(file); if (!poll) return -ENOMEM; } return fasync_helper(fd, file, on, &poll->fasync); } static int vcs_open(struct inode *inode, struct file *filp) { unsigned int currcons = console(inode); bool attr = use_attributes(inode); bool uni_mode = use_unicode(inode); /* we currently don't support attributes in unicode mode */ if (attr && uni_mode) return -EOPNOTSUPP; guard(console_lock)(); if (currcons && !vc_cons_allocated(currcons - 1)) return -ENXIO; return 0; } static int vcs_release(struct inode *inode, struct file *file) { struct vcs_poll_data *poll = file->private_data; if (poll) vcs_poll_data_free(poll); return 0; } static const struct file_operations vcs_fops = { .llseek = vcs_lseek, .read = vcs_read, .write = vcs_write, .poll = vcs_poll, .fasync = vcs_fasync, .open = vcs_open, .release = vcs_release, }; static const struct class vc_class = { .name = "vc", }; void vcs_make_sysfs(int index) { device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, index + 1), NULL, "vcs%u", index + 1); device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, index + 65), NULL, "vcsu%u", index + 1); device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, index + 129), NULL, "vcsa%u", index + 1); } void vcs_remove_sysfs(int index) { device_destroy(&vc_class, MKDEV(VCS_MAJOR, index + 1)); device_destroy(&vc_class, MKDEV(VCS_MAJOR, index + 65)); device_destroy(&vc_class, MKDEV(VCS_MAJOR, index + 129)); } int __init vcs_init(void) { unsigned int i; if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops)) panic("unable to get major %d for vcs device", VCS_MAJOR); if (class_register(&vc_class)) panic("unable to create vc_class"); device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs"); device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, 64), NULL, "vcsu"); device_create(&vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa"); for (i = 0; i < MIN_NR_CONSOLES; i++) vcs_make_sysfs(i); return 0; }
11 4 5 18 184 172 172 8 91 65 56 65 65 13 68 176 185 176 10739 10465 20 20 20 7 5 4 2 7 7 7 7 5 7 7 19 19 20 14 14 13 19 19 19 19 9 7 7 20 5 3 18 6 18 20 6 6 10 10 18 10 10 18 71 97 71 68 71 1 1 1 9 1 45 45 45 45 42 43 44 44 44 3 3 43 43 20 32 29 29 29 28 9 22 22 31 2 44 3 9 9 9 9 8 6 6 6 6 6 8 8 8 8 21 21 21 21 21 21 21 21 21 11 11 107 106 107 106 106 1 107 1 105 105 96 97 97 97 1 1 63 63 63 63 63 63 63 63 63 63 11 6 5 11 5 5 6 11 11 11 7 14 4 2 2 10 9 9 9 9 9 9 10 10 9 13 11 11 11 11 11 11 14 11 14 4 8 8 8 8 8 5 8 8 32 31 32 32 32 32 8 8 7 5 5 2 5 7 2 2 8 8 8 8 8 8 7 7 7 7 7 7 7 7 7 11 10 11 11 11 11 11 11 11 11 11 11 6 11 6 5 5 5 10 11 11 11 11 4 11 10 10 11 7 7 7 7 7 7 7 7 7 7 7 5 5 5 7 12 42 6 11 11 28 28 28 28 28 28 28 28 1 28 28 28 28 13 13 15 28 13 13 13 9 26 34 34 26 15 15 15 15 3 6 1 5 5 5 5 5 5 5 5 6 6 6 6 1 29 29 29 12 6 6 29 28 28 1 1 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 3 2 1 1 2 4 4 4 3 3 3 3 3 3 3 3 3 3 1 3 3 6 5 3 3 6 2 2 2 2 2 2 2 2 2 2 2 2 4 4 3 3 4 2 2 2 2 5 3 5 5 4 4 4 3 4 2 1 2 2 2 2 1 2 4 3 2 2 4 2 2 2 2 2 2 2 2 14 14 15 1 18 18 18 15 18 14 3 14 4 52 52 50 4 50 2 2 2 2 1 3 3 2 1 1 3 1 1 1 1 2 1 3 1 3 3 3 1 2 1 10 10 1 1 6 4 2 2 11 11 11 1 11 11 11 2 11 11 11 11 3 6 6 6 6 35 35 17 17 6 6 5 1 1 6 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 3 3 3 3 2 5 5 5 5 5 5 5 5 5 3 5 2 5 5 3 3 5 5 4 4 2 2 2 2 2 2 2 2 2 2 2 4 6 6 5 5 6 3 4 4 4 1 4 4 1 4 2 9 9 9 5 5 2 2 5 5 5 5 5 5 4 4 4 4 8 8 8 8 8 8 8 8 4 4 4 8 8 8 8 8 8 1 4 4 4 3 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 5 5 5 5 5 5 5 5 5 3 3 3 3 4 4 4 4 3 3 3 3 8 8 2 8 8 8 4 2 2 2 2 2 1 1 1 1 3 2 1 1 3 1 1 1 1 2 2 2 2 2 2 2 2 23 23 23 23 28 29 29 44 43 44 44 44 43 4 2 4 2 1 2 1 1 1 1 1 1 1 1 1 5 5 5 4 5 1 1 2 3 2 3 18 18 18 18 18 18 18 18 8 7 8 7 1 7 7 7 7 7 7 7 5 4 4 3 3 2 3 1 3 3 3 2 13 18 18 18 2 18 18 8 8 4 4 8 8 8 8 3 3 3 2 2 8 8 8 8 8 6 5 2 8 8 9 6 4 1 9 3 1 1 13 13 13 3 3 3 3 3 3 3 2 2 5 1 4 4 4 1 3 5 5 5 5 5 2 2 2 5 2 2 2 5 5 2 5 5 5 6 2 2 2 1 1 1 1 1 1 1 1 2 2 4 2 2 1 4 2 2 3 1 3 3 2 1 1 3 3 3 21 21 21 21 21 21 34 34 34 33 33 31 31 3 29 28 1 1 29 1 31 27 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285 10286 10287 10288 10289 10290 10291 10292 10293 10294 10295 10296 10297 10298 10299 10300 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10382 10383 10384 10385 10386 10387 10388 10389 10390 10391 10392 10393 10394 10395 10396 10397 10398 10399 10400 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10482 10483 10484 10485 10486 10487 10488 10489 10490 10491 10492 10493 10494 10495 10496 10497 10498 10499 10500 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10582 10583 10584 10585 10586 10587 10588 10589 10590 10591 10592 10593 10594 10595 10596 10597 10598 10599 10600 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10682 10683 10684 10685 10686 10687 10688 10689 10690 10691 10692 10693 10694 10695 10696 10697 10698 10699 10700 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 10743 10744 10745 10746 10747 10748 10749 10750 10751 10752 10753 10754 10755 10756 10757 10758 10759 10760 10761 10762 10763 10764 10765 10766 10767 10768 10769 10770 10771 10772 10773 10774 10775 10776 10777 10778 10779 10780 10781 10782 10783 10784 10785 10786 10787 10788 10789 10790 10791 10792 10793 10794 10795 10796 10797 10798 10799 10800 10801 10802 10803 10804 10805 10806 10807 10808 10809 10810 10811 10812 10813 10814 10815 10816 10817 10818 10819 10820 10821 10822 10823 10824 10825 10826 10827 10828 10829 10830 10831 10832 10833 10834 10835 10836 10837 10838 10839 10840 10841 10842 10843 10844 10845 10846 10847 10848 10849 10850 10851 10852 10853 10854 10855 10856 10857 10858 10859 10860 10861 10862 10863 10864 10865 10866 10867 10868 10869 10870 10871 10872 10873 10874 10875 10876 10877 10878 10879 10880 10881 10882 10883 10884 10885 10886 10887 10888 10889 10890 10891 10892 10893 10894 10895 10896 10897 10898 10899 10900 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917 10918 10919 10920 10921 10922 10923 10924 10925 10926 10927 10928 10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 10940 10941 10942 10943 10944 10945 10946 10947 10948 10949 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962 10963 10964 10965 10966 10967 10968 10969 10970 10971 10972 10973 10974 10975 10976 10977 10978 10979 10980 10981 10982 10983 10984 10985 10986 10987 10988 10989 10990 10991 10992 10993 10994 10995 10996 10997 10998 10999 11000 11001 11002 11003 11004 11005 11006 11007 11008 11009 11010 11011 11012 11013 11014 11015 11016 11017 11018 11019 11020 11021 11022 11023 11024 11025 11026 11027 11028 11029 11030 11031 11032 11033 11034 11035 11036 11037 11038 11039 11040 11041 11042 11043 11044 11045 11046 11047 11048 11049 11050 11051 11052 11053 11054 11055 11056 11057 11058 11059 11060 11061 11062 11063 11064 11065 11066 11067 11068 11069 11070 11071 11072 11073 11074 11075 11076 11077 11078 11079 11080 11081 11082 11083 11084 11085 11086 11087 11088 11089 11090 11091 11092 11093 11094 11095 11096 11097 11098 11099 11100 11101 11102 11103 11104 11105 11106 11107 11108 11109 11110 11111 11112 11113 11114 11115 11116 11117 11118 11119 11120 11121 11122 11123 11124 11125 11126 11127 11128 11129 11130 11131 11132 11133 11134 11135 11136 11137 11138 11139 11140 11141 11142 11143 11144 11145 11146 11147 11148 11149 11150 11151 11152 11153 11154 11155 11156 11157 11158 11159 11160 11161 11162 11163 11164 11165 11166 11167 11168 11169 11170 11171 11172 11173 11174 11175 11176 11177 11178 11179 11180 11181 11182 11183 11184 11185 11186 11187 11188 11189 11190 11191 11192 11193 11194 11195 11196 11197 11198 11199 11200 11201 11202 11203 11204 11205 11206 11207 11208 11209 11210 11211 11212 11213 11214 11215 11216 11217 11218 11219 11220 11221 11222 11223 11224 11225 11226 11227 11228 11229 11230 11231 11232 11233 11234 11235 11236 11237 11238 11239 11240 11241 11242 11243 11244 11245 11246 11247 11248 11249 11250 11251 11252 11253 11254 11255 11256 11257 11258 11259 11260 11261 11262 11263 11264 11265 11266 11267 11268 11269 11270 11271 11272 11273 11274 11275 11276 11277 11278 11279 11280 11281 11282 11283 11284 11285 11286 11287 11288 11289 11290 11291 11292 11293 11294 11295 11296 11297 11298 11299 11300 11301 11302 11303 11304 11305 11306 11307 11308 11309 11310 11311 11312 11313 11314 11315 11316 11317 11318 11319 11320 11321 11322 11323 11324 11325 11326 11327 11328 11329 11330 11331 11332 11333 11334 11335 11336 11337 11338 11339 11340 11341 11342 11343 11344 11345 11346 11347 11348 11349 11350 11351 11352 11353 11354 11355 11356 11357 11358 11359 11360 11361 11362 11363 11364 11365 11366 11367 11368 11369 11370 11371 11372 11373 11374 11375 11376 11377 11378 11379 11380 11381 11382 11383 11384 11385 11386 11387 11388 11389 11390 11391 11392 11393 11394 11395 11396 11397 11398 11399 11400 11401 11402 11403 11404 11405 11406 11407 11408 11409 11410 11411 11412 11413 11414 11415 11416 11417 11418 11419 11420 11421 11422 11423 11424 11425 11426 11427 11428 11429 11430 11431 11432 11433 11434 11435 11436 11437 11438 11439 11440 11441 11442 11443 11444 11445 11446 11447 11448 11449 11450 11451 11452 11453 11454 11455 11456 11457 11458 11459 11460 11461 11462 11463 11464 11465 11466 11467 11468 11469 11470 11471 11472 11473 11474 11475 11476 11477 11478 11479 11480 11481 11482 11483 11484 11485 11486 11487 11488 11489 11490 11491 11492 11493 11494 11495 11496 11497 11498 11499 11500 11501 11502 11503 11504 11505 11506 11507 11508 11509 11510 11511 11512 11513 11514 11515 11516 11517 11518 11519 11520 11521 11522 11523 11524 11525 11526 11527 11528 11529 11530 11531 11532 11533 11534 11535 11536 11537 11538 11539 11540 11541 11542 11543 11544 11545 11546 11547 11548 11549 11550 11551 11552 11553 11554 11555 11556 11557 11558 11559 11560 11561 11562 11563 11564 11565 11566 11567 11568 11569 11570 11571 11572 11573 11574 11575 11576 11577 11578 11579 11580 11581 11582 11583 11584 11585 11586 11587 11588 11589 11590 11591 11592 11593 11594 11595 11596 11597 11598 11599 11600 11601 11602 11603 11604 11605 11606 11607 11608 11609 11610 11611 11612 11613 11614 11615 11616 11617 11618 11619 11620 11621 11622 11623 11624 11625 11626 11627 11628 11629 11630 11631 11632 11633 11634 11635 11636 11637 11638 11639 11640 11641 11642 11643 11644 11645 11646 11647 11648 11649 11650 11651 11652 11653 11654 11655 11656 11657 11658 11659 11660 11661 11662 11663 11664 11665 11666 11667 11668 11669 11670 11671 11672 11673 11674 11675 11676 11677 11678 11679 11680 11681 11682 11683 11684 11685 11686 11687 11688 11689 11690 11691 11692 11693 11694 11695 11696 11697 11698 11699 11700 11701 11702 11703 11704 11705 11706 11707 11708 11709 11710 11711 11712 11713 11714 11715 11716 11717 11718 11719 11720 11721 11722 11723 11724 11725 11726 11727 11728 11729 11730 11731 11732 11733 11734 11735 11736 11737 11738 11739 11740 11741 11742 11743 11744 11745 11746 11747 11748 11749 11750 11751 11752 11753 11754 11755 11756 11757 11758 11759 11760 11761 11762 11763 11764 11765 11766 11767 11768 11769 11770 11771 11772 11773 11774 11775 11776 11777 11778 11779 11780 11781 // SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/ring_buffer.h> #include <linux/utsname.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/irqflags.h> #include <linux/syscalls.h> #include <linux/debugfs.h> #include <linux/tracefs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/cleanup.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/mount.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/panic_notifier.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/trace.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include <linux/fsnotify.h> #include <linux/irq_work.h> #include <linux/workqueue.h> #include <linux/sort.h> #include <linux/io.h> /* vmap_page_range() */ #include <linux/fs_context.h> #include <asm/setup.h> /* COMMAND_LINE_SIZE */ #include "trace.h" #include "trace_output.h" #ifdef CONFIG_FTRACE_STARTUP_TEST /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If boot-time tracing including tracers/events via kernel cmdline * is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; void __init disable_tracing_selftest(const char *reason) { if (!tracing_selftest_disabled) { tracing_selftest_disabled = true; pr_info("Ftrace startup test is disabled due to %s\n", reason); } } #else #define tracing_selftest_running 0 #define tracing_selftest_disabled 0 #endif /* Pipe tracepoints to printk */ static struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; static bool tracepoint_printk_stop_on_boot __initdata; static bool traceoff_after_boot __initdata; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* Store tracers and their flags per instance */ struct tracers { struct list_head list; struct tracer *tracer; struct tracer_flags *flags; }; /* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; #define MAX_TRACER_SIZE 100 /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputting it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops * Set instance name if you want to dump the specific trace instance * Multiple instance dump is also supported, and instances are separated * by commas. */ /* Set to string format zero to disable by default */ char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; /* When set, tracing will stop when a WARN*() is hit */ static int __disable_trace_on_warning; int tracepoint_printk_sysctl(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); static const struct ctl_table trace_sysctl_table[] = { { .procname = "ftrace_dump_on_oops", .data = &ftrace_dump_on_oops, .maxlen = MAX_TRACER_SIZE, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "traceoff_on_warning", .data = &__disable_trace_on_warning, .maxlen = sizeof(__disable_trace_on_warning), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "tracepoint_printk", .data = &tracepoint_printk, .maxlen = sizeof(tracepoint_printk), .mode = 0644, .proc_handler = tracepoint_printk_sysctl, }, }; static int __init init_trace_sysctls(void) { register_sysctl_init("kernel", trace_sysctl_table); return 0; } subsys_initcall(init_trace_sysctls); #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ int tracing_set_tracer(struct trace_array *tr, const char *buf); static void ftrace_trace_userstack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx); static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; static bool snapshot_at_boot; static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; static int boot_instance_index; static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; static int boot_snapshot_index; static int __init set_cmdline_ftrace(char *str) { strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ trace_set_ring_buffer_expanded(NULL); return 1; } __setup("ftrace=", set_cmdline_ftrace); int ftrace_dump_on_oops_enabled(void) { if (!strcmp("0", ftrace_dump_on_oops)) return 0; else return 1; } static int __init set_ftrace_dump_on_oops(char *str) { if (!*str) { strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); return 1; } if (*str == ',') { strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1); return 1; } if (*str++ == '=') { strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE); return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; } __setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { char *slot = boot_snapshot_info + boot_snapshot_index; int left = sizeof(boot_snapshot_info) - boot_snapshot_index; int ret; if (str[0] == '=') { str++; if (strlen(str) >= left) return -1; ret = snprintf(slot, left, "%s\t", str); boot_snapshot_index += ret; } else { allocate_snapshot = true; /* We also need the main ring buffer expanded */ trace_set_ring_buffer_expanded(NULL); } return 1; } __setup("alloc_snapshot", boot_alloc_snapshot); static int __init boot_snapshot(char *str) { snapshot_at_boot = true; boot_alloc_snapshot(str); return 1; } __setup("ftrace_boot_snapshot", boot_snapshot); static int __init boot_instance(char *str) { char *slot = boot_instance_info + boot_instance_index; int left = sizeof(boot_instance_info) - boot_instance_index; int ret; if (strlen(str) >= left) return -1; ret = snprintf(slot, left, "%s\t", str); boot_instance_index += ret; return 1; } __setup("trace_instance=", boot_instance); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 1; } __setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata; static int __init set_trace_boot_clock(char *str) { strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 1; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { /* Ignore the "tp_printk_stop_on_boot" param */ if (*str == '_') return 0; if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; } __setup("tp_printk", set_tracepoint_printk); static int __init set_tracepoint_printk_stop(char *str) { tracepoint_printk_stop_on_boot = true; return 1; } __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); static int __init set_traceoff_after_boot(char *str) { traceoff_after_boot = true; return 1; } __setup("traceoff_after_boot", set_traceoff_after_boot); unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } static void trace_process_export(struct trace_export *export, struct ring_buffer_event *event, int flag) { struct trace_entry *entry; unsigned int size = 0; if (export->flags & flag) { entry = ring_buffer_event_data(event); size = ring_buffer_event_length(event); export->write(export, entry, size); } } static DEFINE_MUTEX(ftrace_export_lock); static struct trace_export __rcu *ftrace_exports_list __read_mostly; static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); static inline void ftrace_exports_enable(struct trace_export *export) { if (export->flags & TRACE_EXPORT_FUNCTION) static_branch_inc(&trace_function_exports_enabled); if (export->flags & TRACE_EXPORT_EVENT) static_branch_inc(&trace_event_exports_enabled); if (export->flags & TRACE_EXPORT_MARKER) static_branch_inc(&trace_marker_exports_enabled); } static inline void ftrace_exports_disable(struct trace_export *export) { if (export->flags & TRACE_EXPORT_FUNCTION) static_branch_dec(&trace_function_exports_enabled); if (export->flags & TRACE_EXPORT_EVENT) static_branch_dec(&trace_event_exports_enabled); if (export->flags & TRACE_EXPORT_MARKER) static_branch_dec(&trace_marker_exports_enabled); } static void ftrace_exports(struct ring_buffer_event *event, int flag) { struct trace_export *export; guard(preempt_notrace)(); export = rcu_dereference_raw_check(ftrace_exports_list); while (export) { trace_process_export(export, event, flag); export = rcu_dereference_raw_check(export->next); } } static inline void add_trace_export(struct trace_export **list, struct trace_export *export) { rcu_assign_pointer(export->next, *list); /* * We are entering export into the list but another * CPU might be walking that list. We need to make sure * the export->next pointer is valid before another CPU sees * the export pointer included into the list. */ rcu_assign_pointer(*list, export); } static inline int rm_trace_export(struct trace_export **list, struct trace_export *export) { struct trace_export **p; for (p = list; *p != NULL; p = &(*p)->next) if (*p == export) break; if (*p != export) return -1; rcu_assign_pointer(*p, (*p)->next); return 0; } static inline void add_ftrace_export(struct trace_export **list, struct trace_export *export) { ftrace_exports_enable(export); add_trace_export(list, export); } static inline int rm_ftrace_export(struct trace_export **list, struct trace_export *export) { int ret; ret = rm_trace_export(list, export); ftrace_exports_disable(export); return ret; } int register_ftrace_export(struct trace_export *export) { if (WARN_ON_ONCE(!export->write)) return -1; guard(mutex)(&ftrace_export_lock); add_ftrace_export(&ftrace_exports_list, export); return 0; } EXPORT_SYMBOL_GPL(register_ftrace_export); int unregister_ftrace_export(struct trace_export *export) { guard(mutex)(&ftrace_export_lock); return rm_ftrace_export(&ftrace_exports_list, export); } EXPORT_SYMBOL_GPL(unregister_ftrace_export); /* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | FPROFILE_DEFAULT_FLAGS | \ TRACE_ITER(PRINT_PARENT) | TRACE_ITER(PRINTK) | \ TRACE_ITER(ANNOTATE) | TRACE_ITER(CONTEXT_INFO) | \ TRACE_ITER(RECORD_CMD) | TRACE_ITER(OVERWRITE) | \ TRACE_ITER(IRQ_INFO) | TRACE_ITER(MARKERS) | \ TRACE_ITER(HASH_PTR) | TRACE_ITER(TRACE_PRINTK) | \ TRACE_ITER(COPY_MARKER)) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER(PRINTK) | \ TRACE_ITER(PRINTK_MSGONLY) | TRACE_ITER(RECORD_CMD) | \ TRACE_ITER(PROF_TEXT_OFFSET) | FPROFILE_DEFAULT_FLAGS) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER(EVENT_FORK) | TRACE_ITER(FUNC_FORK) | TRACE_ITER(TRACE_PRINTK) | \ TRACE_ITER(COPY_MARKER)) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; static struct trace_array *printk_trace = &global_trace; /* List of trace_arrays interested in the top level trace_marker */ static LIST_HEAD(marker_copies); static __always_inline bool printk_binsafe(struct trace_array *tr) { /* * The binary format of traceprintk can cause a crash if used * by a buffer from another boot. Force the use of the * non binary version of trace_printk if the trace_printk * buffer is a boot mapped ring buffer. */ return !(tr->flags & TRACE_ARRAY_FL_BOOT); } static void update_printk_trace(struct trace_array *tr) { if (printk_trace == tr) return; printk_trace->trace_flags &= ~TRACE_ITER(TRACE_PRINTK); printk_trace = tr; tr->trace_flags |= TRACE_ITER(TRACE_PRINTK); } /* Returns true if the status of tr changed */ static bool update_marker_trace(struct trace_array *tr, int enabled) { lockdep_assert_held(&event_mutex); if (enabled) { if (!list_empty(&tr->marker_list)) return false; list_add_rcu(&tr->marker_list, &marker_copies); tr->trace_flags |= TRACE_ITER(COPY_MARKER); return true; } if (list_empty(&tr->marker_list)) return false; list_del_init(&tr->marker_list); tr->trace_flags &= ~TRACE_ITER(COPY_MARKER); return true; } void trace_set_ring_buffer_expanded(struct trace_array *tr) { if (!tr) tr = &global_trace; tr->ring_buffer_expanded = true; } LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; return 0; } } return -ENODEV; } static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; } /** * trace_array_put - Decrement the reference counter for this trace array. * @this_tr : pointer to the trace array * * NOTE: Use this when we no longer need the trace array returned by * trace_array_get_by_name(). This ensures the trace array can be later * destroyed. * */ void trace_array_put(struct trace_array *this_tr) { if (!this_tr) return; guard(mutex)(&trace_types_lock); __trace_array_put(this_tr); } EXPORT_SYMBOL_GPL(trace_array_put); int tracing_check_open_get_tr(struct trace_array *tr) { int ret; ret = security_locked_down(LOCKDOWN_TRACEFS); if (ret) return ret; if (tracing_disabled) return -ENODEV; if (tr && trace_array_get(tr) < 0) return -ENODEV; return 0; } /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is found in @filtered_pids, and false otherwise. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { return trace_pid_list_is_set(filtered_pids, search_pid); } /** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @filtered_no_pids: The list of pids not to be traced * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */ bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct trace_pid_list *filtered_no_pids, struct task_struct *task) { /* * If filtered_no_pids is not empty, and the task's pid is listed * in filtered_no_pids, then return true. * Otherwise, if filtered_pids is empty, that means we can * trace all tasks. If it has content, then only trace pids * within filtered_pids. */ return (filtered_pids && !trace_find_filtered_pid(filtered_pids, task->pid)) || (filtered_no_pids && trace_find_filtered_pid(filtered_no_pids, task->pid)); } /** * trace_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* "self" is set for forks, and NULL for exits */ if (self) trace_pid_list_set(pid_list, task->pid); else trace_pid_list_clear(pid_list, task->pid); } /** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { long pid = (unsigned long)v; unsigned int next; (*pos)++; /* pid already is +1 of the actual previous bit */ if (trace_pid_list_next(pid_list, pid, &next) < 0) return NULL; pid = next; /* Return pid + 1 to allow zero to be represented */ return (void *)(pid + 1); } /** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; unsigned int first; loff_t l = 0; if (trace_pid_list_first(pid_list, &first) < 0) return NULL; pid = first; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; } /** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */ int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; } /* 128 should be much more than enough */ #define PID_BUF_SIZE 127 int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = trace_pid_list_alloc(); if (!pid_list) { trace_parser_put(&parser); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ ret = trace_pid_list_first(filtered_pids, &pid); while (!ret) { ret = trace_pid_list_set(pid_list, pid); if (ret < 0) goto out; ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); nr_pids++; } } ret = 0; while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0) break; read += ret; ubuf += ret; cnt -= ret; if (!trace_parser_loaded(&parser)) break; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; pid = (pid_t)val; if (trace_pid_list_set(pid_list, pid) < 0) { ret = -1; break; } nr_pids++; trace_parser_clear(&parser); ret = 0; } out: trace_parser_put(&parser); if (ret < 0) { trace_pid_list_free(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_pid_list_free(pid_list); pid_list = NULL; } *new_pid_list = pid_list; return read; } static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.array_buffer, cpu); } /** * tracing_is_enabled - Show if global_trace has been enabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */ int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ return !global_trace.buffer_disabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewritten * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned long trace_ctx, int skip, struct pt_regs *regs) { } #endif static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned int trace_ctx) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, type, trace_ctx); } static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned int trace_ctx) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, trace_ctx); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->array_buffer.buffer) ring_buffer_record_on(tr->array_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { tracer_tracing_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); /* ring_buffer_unlock_commit() enables preemption */ preempt_enable_notrace(); } else ring_buffer_unlock_commit(buffer); } int __trace_array_puts(struct trace_array *tr, unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct trace_buffer *buffer; struct print_entry *entry; unsigned int trace_ctx; int alloc; if (!(tr->trace_flags & TRACE_ITER(PRINTK))) return 0; if (unlikely(tracing_selftest_running && tr == &global_trace)) return 0; if (unlikely(tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ trace_ctx = tracing_gen_ctx(); buffer = tr->array_buffer.buffer; guard(ring_buffer_nest)(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, trace_ctx); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); return size; } EXPORT_SYMBOL_GPL(__trace_array_puts); /** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */ int __trace_puts(unsigned long ip, const char *str, int size) { return __trace_array_puts(printk_trace, ip, str, size); } EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */ int __trace_bputs(unsigned long ip, const char *str) { struct trace_array *tr = READ_ONCE(printk_trace); struct ring_buffer_event *event; struct trace_buffer *buffer; struct bputs_entry *entry; unsigned int trace_ctx; int size = sizeof(struct bputs_entry); if (!printk_binsafe(tr)) return __trace_puts(ip, str, strlen(str)); if (!(tr->trace_flags & TRACE_ITER(PRINTK))) return 0; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; trace_ctx = tracing_gen_ctx(); buffer = tr->array_buffer.buffer; guard(ring_buffer_nest)(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, trace_ctx); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); return 1; } EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT static void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data) { struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); trace_array_puts(tr, "*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); trace_array_puts(tr, "*** stopping trace here! ***\n"); tracer_tracing_off(tr); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); return; } if (tr->mapped) { trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); return; } local_irq_save(flags); update_max_tr(tr, current, smp_processor_id(), cond_data); local_irq_restore(flags); } void tracing_snapshot_instance(struct trace_array *tr) { tracing_snapshot_instance_cond(tr, NULL); } /** * tracing_snapshot - take a snapshot of the current buffer. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. * * Note, make sure to allocate the snapshot with either * a tracing_snapshot_alloc(), or by doing it manually * with: echo 1 > /sys/kernel/tracing/snapshot * * If the snapshot buffer is not allocated, it will stop tracing. * Basically making a permanent snapshot. */ void tracing_snapshot(void) { struct trace_array *tr = &global_trace; tracing_snapshot_instance(tr); } EXPORT_SYMBOL_GPL(tracing_snapshot); /** * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. * @tr: The tracing instance to snapshot * @cond_data: The data to be tested conditionally, and possibly saved * * This is the same as tracing_snapshot() except that the snapshot is * conditional - the snapshot will only happen if the * cond_snapshot.update() implementation receiving the cond_data * returns true, which means that the trace array's cond_snapshot * update() operation used the cond_data to determine whether the * snapshot should be taken, and if it was, presumably saved it along * with the snapshot. */ void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { tracing_snapshot_instance_cond(tr, cond_data); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); /** * tracing_cond_snapshot_data - get the user data associated with a snapshot * @tr: The tracing instance * * When the user enables a conditional snapshot using * tracing_snapshot_cond_enable(), the user-defined cond_data is saved * with the snapshot. This accessor is used to retrieve it. * * Should not be called from cond_snapshot.update(), since it takes * the tr->max_lock lock, which the code calling * cond_snapshot.update() has already done. * * Returns the cond_data associated with the trace array's snapshot. */ void *tracing_cond_snapshot_data(struct trace_array *tr) { void *cond_data = NULL; local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) cond_data = tr->cond_snapshot->cond_data; arch_spin_unlock(&tr->max_lock); local_irq_enable(); return cond_data; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, struct array_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct array_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { int order; int ret; if (!tr->allocated_snapshot) { /* Make the snapshot buffer have the same order as main buffer */ order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); if (ret < 0) return ret; /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->array_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; tr->allocated_snapshot = true; } return 0; } static void free_snapshot(struct trace_array *tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0); ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&tr->max_buffer, 1); tracing_reset_online_cpus(&tr->max_buffer); tr->allocated_snapshot = false; } static int tracing_arm_snapshot_locked(struct trace_array *tr) { int ret; lockdep_assert_held(&trace_types_lock); spin_lock(&tr->snapshot_trigger_lock); if (tr->snapshot == UINT_MAX || tr->mapped) { spin_unlock(&tr->snapshot_trigger_lock); return -EBUSY; } tr->snapshot++; spin_unlock(&tr->snapshot_trigger_lock); ret = tracing_alloc_snapshot_instance(tr); if (ret) { spin_lock(&tr->snapshot_trigger_lock); tr->snapshot--; spin_unlock(&tr->snapshot_trigger_lock); } return ret; } int tracing_arm_snapshot(struct trace_array *tr) { guard(mutex)(&trace_types_lock); return tracing_arm_snapshot_locked(tr); } void tracing_disarm_snapshot(struct trace_array *tr) { spin_lock(&tr->snapshot_trigger_lock); if (!WARN_ON(!tr->snapshot)) tr->snapshot--; spin_unlock(&tr->snapshot_trigger_lock); } /** * tracing_alloc_snapshot - allocate snapshot buffer. * * This only allocates the snapshot buffer if it isn't already * allocated - it doesn't also take a snapshot. * * This is meant to be used in cases where the snapshot buffer needs * to be set up for events that can't sleep but need to be able to * trigger a snapshot. */ int tracing_alloc_snapshot(void) { struct trace_array *tr = &global_trace; int ret; ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); /** * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. * * This is similar to tracing_snapshot(), but it will allocate the * snapshot buffer if it isn't already allocated. Use this only * where it is safe to sleep, as the allocation may sleep. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. */ void tracing_snapshot_alloc(void) { int ret; ret = tracing_alloc_snapshot(); if (ret < 0) return; tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); /** * tracing_snapshot_cond_enable - enable conditional snapshot for an instance * @tr: The tracing instance * @cond_data: User data to associate with the snapshot * @update: Implementation of the cond_snapshot update function * * Check whether the conditional snapshot for the given instance has * already been enabled, or if the current tracer is already using a * snapshot; if so, return -EBUSY, else create a cond_snapshot and * save the cond_data and update function inside. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { struct cond_snapshot *cond_snapshot __free(kfree) = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); int ret; if (!cond_snapshot) return -ENOMEM; cond_snapshot->cond_data = cond_data; cond_snapshot->update = update; guard(mutex)(&trace_types_lock); if (tr->current_trace->use_max_tr) return -EBUSY; /* * The cond_snapshot can only change to NULL without the * trace_types_lock. We don't care if we race with it going * to NULL, but we want to make sure that it's not set to * something other than NULL when we get here, which we can * do safely with only holding the trace_types_lock and not * having to take the max_lock. */ if (tr->cond_snapshot) return -EBUSY; ret = tracing_arm_snapshot_locked(tr); if (ret) return ret; local_irq_disable(); arch_spin_lock(&tr->max_lock); tr->cond_snapshot = no_free_ptr(cond_snapshot); arch_spin_unlock(&tr->max_lock); local_irq_enable(); return 0; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); /** * tracing_snapshot_cond_disable - disable conditional snapshot for an instance * @tr: The tracing instance * * Check whether the conditional snapshot for the given instance is * enabled; if so, free the cond_snapshot associated with it, * otherwise return -EINVAL. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_disable(struct trace_array *tr) { int ret = 0; local_irq_disable(); arch_spin_lock(&tr->max_lock); if (!tr->cond_snapshot) ret = -EINVAL; else { kfree(tr->cond_snapshot); tr->cond_snapshot = NULL; } arch_spin_unlock(&tr->max_lock); local_irq_enable(); tracing_disarm_snapshot(tr); return ret; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #else void tracing_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot); void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); int tracing_alloc_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); void tracing_snapshot_alloc(void) { /* Give warning */ tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); void *tracing_cond_snapshot_data(struct trace_array *tr) { return NULL; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); int tracing_snapshot_cond_disable(struct trace_array *tr) { return false; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #define free_snapshot(tr) do { } while (0) #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; }) #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) { if (tr->array_buffer.buffer) ring_buffer_record_off(tr->array_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 1; } /** * tracer_tracing_disable() - temporary disable the buffer from write * @tr: The trace array to disable its buffer for * * Expects trace_tracing_enable() to re-enable tracing. * The difference between this and tracer_tracing_off() is that this * is a counter and can nest, whereas, tracer_tracing_off() can * be called multiple times and a single trace_tracing_on() will * enable it. */ void tracer_tracing_disable(struct trace_array *tr) { if (WARN_ON_ONCE(!tr->array_buffer.buffer)) return; ring_buffer_record_disable(tr->array_buffer.buffer); } /** * tracer_tracing_enable() - counter part of tracer_tracing_disable() * @tr: The trace array that had tracer_tracincg_disable() called on it * * This is called after tracer_tracing_disable() has been called on @tr, * when it's safe to re-enable tracing. */ void tracer_tracing_enable(struct trace_array *tr) { if (WARN_ON_ONCE(!tr->array_buffer.buffer)) return; ring_buffer_record_enable(tr->array_buffer.buffer); } /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { tracer_tracing_off(&global_trace); } EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) { trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, "Disabling tracing due to warning\n"); tracing_off(); } } /** * tracer_tracing_is_on - show real state of ring buffer enabled * @tr : the trace array to know if ring buffer is enabled * * Shows real state of the ring buffer if it is enabled or not. */ bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->array_buffer.buffer) return ring_buffer_record_is_set_on(tr->array_buffer.buffer); return !tr->buffer_disabled; } /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { return tracer_tracing_is_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_is_on); static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* * nr_entries can not be zero and the startup * tests require some buffer space. Therefore * ensure we have at least 4096 bytes of buffer. */ trace_buf_size = max(4096UL, buf_size); return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshold; int ret; if (!str) return 0; ret = kstrtoul(str, 0, &threshold); if (ret < 0) return 0; tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* * TRACE_FLAGS is defined as a tuple matching bit masks with strings. * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list * of strings in the order that the evals (enum) were defined. */ #undef C #define C(a, b) b /* These must match the bit positions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL }; static struct { u64 (*func)(void); const char *name; int in_ns; /* is this clock in nanoseconds? */ } trace_clocks[] = { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, { ktime_get_boot_fast_ns, "boot", 1 }, { ktime_get_tai_fast_ns, "tai", 1 }, ARCH_TRACE_CLOCKS }; bool trace_clock_in_ns(struct trace_array *tr) { if (trace_clocks[tr->clock_id].in_ns) return true; return false; } /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); parser->buffer = NULL; } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto fail; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto fail; read++; cnt--; } parser->idx = 0; /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; return read; } } /* read the non-space input */ while (cnt && !isspace(ch) && ch) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto fail; } ret = get_user(ch, ubuf++); if (ret) goto fail; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch) || !ch) { parser->buffer[parser->idx] = 0; parser->cont = false; } else if (parser->idx < parser->size - 1) { parser->cont = true; parser->buffer[parser->idx++] = ch; /* Make sure the parsed string always terminates with '\0'. */ parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; goto fail; } *ppos += read; return read; fail: trace_parser_fail(parser); return ret; } /* TODO add a seq_buf_to_buffer() */ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; if (trace_seq_used(s) <= s->readpos) return -EBUSY; len = trace_seq_used(s) - s->readpos; if (cnt > len) cnt = len; memcpy(buf, s->buffer + s->readpos, cnt); s->readpos += cnt; return cnt; } unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE static const struct file_operations tracing_max_lat_fops; #ifdef LATENCY_FS_NOTIFY static struct workqueue_struct *fsnotify_wq; static void latency_fsnotify_workfn(struct work_struct *work) { struct trace_array *tr = container_of(work, struct trace_array, fsnotify_work); fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); } static void latency_fsnotify_workfn_irq(struct irq_work *iwork) { struct trace_array *tr = container_of(iwork, struct trace_array, fsnotify_irqwork); queue_work(fsnotify_wq, &tr->fsnotify_work); } static void trace_create_maxlat_file(struct trace_array *tr, struct dentry *d_tracer) { INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); tr->d_max_latency = trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, d_tracer, tr, &tracing_max_lat_fops); } __init static int latency_fsnotify_init(void) { fsnotify_wq = alloc_workqueue("tr_max_lat_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!fsnotify_wq) { pr_err("Unable to allocate tr_max_lat_wq\n"); return -ENOMEM; } return 0; } late_initcall_sync(latency_fsnotify_init); void latency_fsnotify(struct trace_array *tr) { if (!fsnotify_wq) return; /* * We cannot call queue_work(&tr->fsnotify_work) from here because it's * possible that we are called from __schedule() or do_idle(), which * could cause a deadlock. */ irq_work_queue(&tr->fsnotify_irqwork); } #else /* !LATENCY_FS_NOTIFY */ #define trace_create_maxlat_file(tr, d_tracer) \ trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ d_tracer, tr, &tracing_max_lat_fops) #endif /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/tracing/tracing_max_latency) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct array_buffer *trace_buf = &tr->array_buffer; struct array_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; max_buf->time_start = data->preempt_timestamp; max_data->saved_latency = tr->max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; strscpy(max_data->comm, tsk->comm); max_data->pid = tsk->pid; /* * If tsk == current, then use current_uid(), as that does not use * RCU. The irq tracer can be called out of RCU scope. */ if (tsk == current) max_data->uid = current_uid(); else max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); latency_fsnotify(tr); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * @cond_data: User data associated with a conditional snapshot * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, void *cond_data) { if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); /* Inherit the recordable setting from array_buffer */ if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); #ifdef CONFIG_TRACER_SNAPSHOT if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { arch_spin_unlock(&tr->max_lock); return; } #endif swap(tr->array_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); /* Any waiters on the old snapshot buffer need to wake up */ ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr: tracer * @tsk: task with the latency * @cpu: the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. * Another reason is resize is in progress. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit or resize in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ struct pipe_wait { struct trace_iterator *iter; int wait_index; }; static bool wait_pipe_cond(void *data) { struct pipe_wait *pwait = data; struct trace_iterator *iter = pwait->iter; if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index) return true; return iter->closed; } static int wait_on_pipe(struct trace_iterator *iter, int full) { struct pipe_wait pwait; int ret; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; pwait.wait_index = atomic_read_acquire(&iter->wait_index); pwait.iter = iter; ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, wait_pipe_cond, &pwait); #ifdef CONFIG_TRACER_MAX_TRACE /* * Make sure this is still the snapshot buffer, as if a snapshot were * to happen, this would now be the main buffer. */ if (iter->snapshot) iter->array_buffer = &iter->tr->max_buffer; #endif return ret; } #ifdef CONFIG_FTRACE_STARTUP_TEST static bool selftests_can_run; struct trace_selftests { struct list_head list; struct tracer *type; }; static LIST_HEAD(postponed_selftests); static int save_selftest(struct tracer *type) { struct trace_selftests *selftest; selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); if (!selftest) return -ENOMEM; selftest->type = type; list_add(&selftest->list, &postponed_selftests); return 0; } static int run_tracer_selftest(struct tracer *type) { struct trace_array *tr = &global_trace; struct tracer_flags *saved_flags = tr->current_trace_flags; struct tracer *saved_tracer = tr->current_trace; int ret; if (!type->selftest || tracing_selftest_disabled) return 0; /* * If a tracer registers early in boot up (before scheduling is * initialized and such), then do not run its selftests yet. * Instead, run it a little later in the boot process. */ if (!selftests_can_run) return save_selftest(type); if (!tracing_is_on()) { pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", type->name); return 0; } /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(&tr->array_buffer); tr->current_trace = type; tr->current_trace_flags = type->flags ? : type->default_flags; #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ if (tr->ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } #endif /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ tr->current_trace = saved_tracer; tr->current_trace_flags = saved_flags; if (ret) { printk(KERN_CONT "FAILED!\n"); /* Add the warning after printing 'FAILED' */ WARN_ON(1); return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (tr->ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif printk(KERN_CONT "PASSED\n"); return 0; } static int do_run_tracer_selftest(struct tracer *type) { int ret; /* * Tests can take a long time, especially if they are run one after the * other, as does happen during bootup when all the tracers are * registered. This could cause the soft lockup watchdog to trigger. */ cond_resched(); tracing_selftest_running = true; ret = run_tracer_selftest(type); tracing_selftest_running = false; return ret; } static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; guard(mutex)(&trace_types_lock); if (list_empty(&postponed_selftests)) return 0; pr_info("Running postponed tracer tests:\n"); tracing_selftest_running = true; list_for_each_entry_safe(p, n, &postponed_selftests, list) { /* This loop can take minutes when sanitizers are enabled, so * lets make sure we allow RCU processing. */ cond_resched(); ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } tracing_selftest_running = false; return 0; } core_initcall(init_trace_selftests); #else static inline int do_run_tracer_selftest(struct tracer *type) { return 0; } #endif /* CONFIG_FTRACE_STARTUP_TEST */ static int add_tracer(struct trace_array *tr, struct tracer *t); static void __init apply_trace_boot_options(void); static void free_tracers(struct trace_array *tr) { struct tracers *t, *n; lockdep_assert_held(&trace_types_lock); list_for_each_entry_safe(t, n, &tr->tracers, list) { list_del(&t->list); kfree(t->flags); kfree(t); } } /** * register_tracer - register a tracer with the ftrace system. * @type: the plugin for the tracer * * Register a new plugin tracer. */ int __init register_tracer(struct tracer *type) { struct trace_array *tr; struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Can not register tracer %s due to lockdown\n", type->name); return -EPERM; } mutex_lock(&trace_types_lock); for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } /* store the tracer for __set_tracer_option */ if (type->flags) type->flags->trace = type; ret = do_run_tracer_selftest(type); if (ret < 0) goto out; list_for_each_entry(tr, &ftrace_trace_arrays, list) { ret = add_tracer(tr, type); if (ret < 0) { /* The tracer will still exist but without options */ pr_warn("Failed to create tracer options for %s\n", type->name); break; } } type->next = trace_types; trace_types = type; out: mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) return ret; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) return 0; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ WARN_ON(tracing_set_tracer(&global_trace, type->name) < 0); default_bootup_tracer = NULL; apply_trace_boot_options(); /* disable other selftests, since this will break it. */ disable_tracing_selftest("running a tracer"); return 0; } static void tracing_reset_cpu(struct array_buffer *buf, int cpu) { struct trace_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct array_buffer *buf) { struct trace_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); ring_buffer_reset_online_cpus(buffer); ring_buffer_record_enable(buffer); } static void tracing_reset_all_cpus(struct array_buffer *buf) { struct trace_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); ring_buffer_reset(buffer); ring_buffer_record_enable(buffer); } /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus_unlocked(void) { struct trace_array *tr; lockdep_assert_held(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->clear_trace) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif } } void tracing_reset_all_online_cpus(void) { guard(mutex)(&trace_types_lock); tracing_reset_all_online_cpus_unlocked(); } int is_tracing_stopped(void) { return global_trace.stop_count; } static void tracing_start_tr(struct trace_array *tr) { struct trace_buffer *buffer; if (tracing_disabled) return; guard(raw_spinlock_irqsave)(&tr->start_lock); if (--tr->stop_count) { if (WARN_ON_ONCE(tr->stop_count < 0)) { /* Someone screwed up their debugging */ tr->stop_count = 0; } return; } /* Prevent the buffers from switching */ arch_spin_lock(&tr->max_lock); buffer = tr->array_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = tr->max_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif arch_spin_unlock(&tr->max_lock); } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { return tracing_start_tr(&global_trace); } static void tracing_stop_tr(struct trace_array *tr) { struct trace_buffer *buffer; guard(raw_spinlock_irqsave)(&tr->start_lock); if (tr->stop_count++) return; /* Prevent the buffers from switching */ arch_spin_lock(&tr->max_lock); buffer = tr->array_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = tr->max_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif arch_spin_unlock(&tr->max_lock); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { return tracing_stop_tr(&global_trace); } /* * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function * simplifies those functions and keeps them in sync. */ enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } EXPORT_SYMBOL_GPL(trace_handle_return); static unsigned short migration_disable_value(void) { #if defined(CONFIG_SMP) return current->migration_disabled; #else return 0; #endif } unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) { unsigned int trace_flags = irqs_status; unsigned int pc; pc = preempt_count(); if (pc & NMI_MASK) trace_flags |= TRACE_FLAG_NMI; if (pc & HARDIRQ_MASK) trace_flags |= TRACE_FLAG_HARDIRQ; if (in_serving_softirq()) trace_flags |= TRACE_FLAG_SOFTIRQ; if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) trace_flags |= TRACE_FLAG_BH_OFF; if (tif_need_resched()) trace_flags |= TRACE_FLAG_NEED_RESCHED; if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY)) trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; } struct ring_buffer_event * trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned int trace_ctx) { return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DEFINE_PER_CPU(int, trace_buffered_event_cnt); static int trace_buffered_event_ref; /** * trace_buffered_event_enable - enable buffering events * * When events are being filtered, it is quicker to use a temporary * buffer to write the event data into if there's a likely chance * that it will not be committed. The discard of the ring buffer * is not as fast as committing, and is much slower than copying * a commit. * * When an event is to be filtered, allocate per cpu buffers to * write the event data into, and if the event is filtered and discarded * it is simply dropped, otherwise, the entire data is to be committed * in one shot. */ void trace_buffered_event_enable(void) { struct ring_buffer_event *event; struct page *page; int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (trace_buffered_event_ref++) return; for_each_tracing_cpu(cpu) { page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); /* This is just an optimization and can handle failures */ if (!page) { pr_err("Failed to allocate event buffer\n"); break; } event = page_address(page); memset(event, 0, sizeof(*event)); per_cpu(trace_buffered_event, cpu) = event; scoped_guard(preempt,) { if (cpu == smp_processor_id() && __this_cpu_read(trace_buffered_event) != per_cpu(trace_buffered_event, cpu)) WARN_ON_ONCE(1); } } } static void enable_trace_buffered_event(void *data) { this_cpu_dec(trace_buffered_event_cnt); } static void disable_trace_buffered_event(void *data) { this_cpu_inc(trace_buffered_event_cnt); } /** * trace_buffered_event_disable - disable buffering events * * When a filter is removed, it is faster to not use the buffered * events, and to commit directly into the ring buffer. Free up * the temp buffers when there are no more users. This requires * special synchronization with current events. */ void trace_buffered_event_disable(void) { int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (WARN_ON_ONCE(!trace_buffered_event_ref)) return; if (--trace_buffered_event_ref) return; /* For each CPU, set the buffer as used. */ on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event, NULL, true); /* Wait for all current users to finish */ synchronize_rcu(); for_each_tracing_cpu(cpu) { free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); per_cpu(trace_buffered_event, cpu) = NULL; } /* * Wait for all CPUs that potentially started checking if they can use * their event buffer only after the previous synchronize_rcu() call and * they still read a valid pointer from trace_buffered_event. It must be * ensured they don't see cleared trace_buffered_event_cnt else they * could wrongly decide to use the pointed-to buffer which is now freed. */ synchronize_rcu(); /* For each CPU, relinquish the buffer */ on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL, true); } static struct trace_buffer *temp_buffer; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned int trace_ctx) { struct ring_buffer_event *entry; struct trace_array *tr = trace_file->tr; int val; *current_rb = tr->array_buffer.buffer; if (!tr->no_filter_buffering_ref && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { preempt_disable_notrace(); /* * Filtering is on, so try to use the per cpu buffer first. * This buffer will simulate a ring_buffer_event, * where the type_len is zero and the array[0] will * hold the full length. * (see include/linux/ring-buffer.h for details on * how the ring_buffer_event is structured). * * Using a temp buffer during filtering and copying it * on a matched filter is quicker than writing directly * into the ring buffer and then discarding it when * it doesn't match. That is because the discard * requires several atomic operations to get right. * Copying on match and doing nothing on a failed match * is still quicker than no copy on match, but having * to discard out of the ring buffer on a failed match. */ if ((entry = __this_cpu_read(trace_buffered_event))) { int max_len = PAGE_SIZE - struct_size(entry, array, 1); val = this_cpu_inc_return(trace_buffered_event_cnt); /* * Preemption is disabled, but interrupts and NMIs * can still come in now. If that happens after * the above increment, then it will have to go * back to the old method of allocating the event * on the ring buffer, and if the filter fails, it * will have to call ring_buffer_discard_commit() * to remove it. * * Need to also check the unlikely case that the * length is bigger than the temp buffer size. * If that happens, then the reserve is pretty much * guaranteed to fail, as the ring buffer currently * only allows events less than a page. But that may * change in the future, so let the ring buffer reserve * handle the failure in that case. */ if (val == 1 && likely(len <= max_len)) { trace_event_setup(entry, type, trace_ctx); entry->array[0] = len; /* Return with preemption disabled */ return entry; } this_cpu_dec(trace_buffered_event_cnt); } /* __trace_buffer_lock_reserve() disables preemption */ preempt_enable_notrace(); } entry = __trace_buffer_lock_reserve(*current_rb, type, len, trace_ctx); /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer * to store the trace event for the trigger to use. It's recursive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; entry = __trace_buffer_lock_reserve(*current_rb, type, len, trace_ctx); } return entry; } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; struct trace_event_file *file; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; /* We should never get here if iter is NULL */ if (WARN_ON_ONCE(!iter)) return; event_call = fbuffer->trace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; file = fbuffer->trace_file; if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && !filter_match_preds(file->filter, fbuffer->entry))) return; event = &fbuffer->trace_file->event_call->event; raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int save_tracepoint_printk; int ret; guard(mutex)(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); /* * This will force exiting early, as tracepoint_printk * is always zero when tracepoint_printk_iter is not allocated */ if (!tracepoint_print_iter) tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) return ret; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); return ret; } void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { enum event_trigger_type tt = ETT_NONE; struct trace_event_file *file = fbuffer->trace_file; if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, fbuffer->entry, &tt)) goto discard; if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); if (static_branch_unlikely(&trace_event_exports_enabled)) ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); discard: if (tt) event_triggers_post_call(file, tt); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); /* * Skip 3: * * trace_buffer_unlock_commit_regs() * trace_event_buffer_commit() * trace_event_raw_event_xxx() */ # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct trace_buffer *buffer, struct ring_buffer_event *event, unsigned int trace_ctx, struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); /* * If regs is not set, then skip the necessary functions. * Note, we can still get here via blktrace, wakeup tracer * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); ftrace_trace_userstack(tr, buffer, trace_ctx); } /* * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); } void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned int trace_ctx, struct ftrace_regs *fregs) { struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; int size = sizeof(*entry); size += FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long); event = __trace_buffer_lock_reserve(buffer, TRACE_FN, size, trace_ctx); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API if (fregs) { for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++) entry->args[i] = ftrace_regs_get_argument(fregs, i); } #endif if (static_branch_unlikely(&trace_function_exports_enabled)) ftrace_exports(event, TRACE_EXPORT_FUNCTION); __buffer_unlock_commit(buffer, event); } #ifdef CONFIG_STACKTRACE /* Allow 4 levels of nesting: normal, softirq, irq, NMI */ #define FTRACE_KSTACK_NESTING 4 #define FTRACE_KSTACK_ENTRIES (SZ_4K / FTRACE_KSTACK_NESTING) struct ftrace_stack { unsigned long calls[FTRACE_KSTACK_ENTRIES]; }; struct ftrace_stacks { struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; }; static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs) { struct ring_buffer_event *event; unsigned int size, nr_entries; struct ftrace_stack *fstack; struct stack_entry *entry; int stackidx; /* * Add one, for this function and the call to save_stack_trace() * If regs is set, then these functions will not be in the way. */ #ifndef CONFIG_UNWINDER_ORC if (!regs) skip++; #endif guard(preempt_notrace)(); stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; /* This should never happen. If it does, yell once and skip */ if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) goto out; /* * The above __this_cpu_inc_return() is 'atomic' cpu local. An * interrupt will either see the value pre increment or post * increment. If the interrupt happens pre increment it will have * restored the counter when it returns. We just need a barrier to * keep gcc from moving things around. */ barrier(); fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; size = ARRAY_SIZE(fstack->calls); if (regs) { nr_entries = stack_trace_save_regs(regs, fstack->calls, size, skip); } else { nr_entries = stack_trace_save(fstack->calls, size, skip); } #ifdef CONFIG_DYNAMIC_FTRACE /* Mark entry of stack trace as trampoline code */ if (tr->ops && tr->ops->trampoline) { unsigned long tramp_start = tr->ops->trampoline; unsigned long tramp_end = tramp_start + tr->ops->trampoline_size; unsigned long *calls = fstack->calls; for (int i = 0; i < nr_entries; i++) { if (calls[i] >= tramp_start && calls[i] < tramp_end) calls[i] = FTRACE_TRAMPOLINE_MARKER; } } #endif event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, struct_size(entry, caller, nr_entries), trace_ctx); if (!event) goto out; entry = ring_buffer_event_data(event); entry->size = nr_entries; memcpy(&entry->caller, fstack->calls, flex_array_size(entry, caller, nr_entries)); __buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); } static inline void ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs) { if (!(tr->trace_flags & TRACE_ITER(STACKTRACE))) return; __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs); } void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip) { struct trace_buffer *buffer = tr->array_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); return; } if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY))) return; /* * When an NMI triggers, RCU is enabled via ct_nmi_enter(), * but if the above rcu_is_watching() failed, then the NMI * triggered someplace critical, and ct_irq_enter() should * not be called from NMI. */ if (unlikely(in_nmi())) return; ct_irq_enter_irqson(); __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); ct_irq_exit_irqson(); } /** * trace_dump_stack - record a stack back trace in the trace buffer * @skip: Number of functions to skip (helper handlers) */ void trace_dump_stack(int skip) { if (tracing_disabled || tracing_selftest_running) return; #ifndef CONFIG_UNWINDER_ORC /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer, tracing_gen_ctx(), skip, NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); #ifdef CONFIG_USER_STACKTRACE_SUPPORT static DEFINE_PER_CPU(int, user_stack_count); static void ftrace_trace_userstack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx) { struct ring_buffer_event *event; struct userstack_entry *entry; if (!(tr->trace_flags & TRACE_ITER(USERSTACKTRACE))) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ guard(preempt)(); if (__this_cpu_read(user_stack_count)) return; __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), trace_ctx); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); __buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); } #else /* CONFIG_USER_STACKTRACE_SUPPORT */ static void ftrace_trace_userstack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx) { } #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ #endif /* CONFIG_STACKTRACE */ static inline void func_repeats_set_delta_ts(struct func_repeats_entry *entry, unsigned long long delta) { entry->bottom_delta_ts = delta & U32_MAX; entry->top_delta_ts = (delta >> 32); } void trace_last_func_repeats(struct trace_array *tr, struct trace_func_repeats *last_info, unsigned int trace_ctx) { struct trace_buffer *buffer = tr->array_buffer.buffer; struct func_repeats_entry *entry; struct ring_buffer_event *event; u64 delta; event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, sizeof(*entry), trace_ctx); if (!event) return; delta = ring_buffer_event_time_stamp(buffer, event) - last_info->ts_last_call; entry = ring_buffer_event_data(event); entry->ip = last_info->ip; entry->parent_ip = last_info->parent_ip; entry->count = last_info->count; func_repeats_set_delta_ts(entry, delta); __buffer_unlock_commit(buffer, event); } /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; char buffer[4][TRACE_BUF_SIZE]; }; static struct trace_buffer_struct __percpu *trace_percpu_buffer; /* * This allows for lockless recording. If we're nested too deeply, then * this returns NULL. */ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); if (!trace_percpu_buffer || buffer->nesting >= 4) return NULL; buffer->nesting++; /* Interrupts must see nesting incremented before we use the buffer */ barrier(); return &buffer->buffer[buffer->nesting - 1][0]; } static void put_trace_buf(void) { /* Don't let the decrement of nesting leak before this */ barrier(); this_cpu_dec(trace_percpu_buffer->nesting); } static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct __percpu *buffers; if (trace_percpu_buffer) return 0; buffers = alloc_percpu(struct trace_buffer_struct); if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; return 0; } static int buffers_allocated; void trace_printk_init_buffers(void) { if (buffers_allocated) return; if (alloc_percpu_trace_buffer()) return; /* trace_printk() is for debug use only. Don't use it in production. */ pr_warn("\n"); pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); pr_warn("** **\n"); pr_warn("** This means that this is a DEBUG kernel and it is **\n"); pr_warn("** unsafe for production use. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your vendor! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ tracing_update_buffers(&global_trace); buffers_allocated = 1; /* * trace_printk_init_buffers() can be called by modules. * If that happens, then we need to start cmdline recording * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ if (global_trace.array_buffer.buffer) tracing_start_cmdline_record(); } EXPORT_SYMBOL_GPL(trace_printk_init_buffers); void trace_printk_start_comm(void) { /* Start tracing comms if trace printk is set */ if (!buffers_allocated) return; tracing_start_cmdline_record(); } static void trace_printk_start_stop_comm(int enabled) { if (!buffers_allocated) return; if (enabled) tracing_start_cmdline_record(); else tracing_stop_cmdline_record(); } /** * trace_vbprintk - write binary msg to tracing buffer * @ip: The address of the caller * @fmt: The string format to write to the buffer * @args: Arguments for @fmt */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct ring_buffer_event *event; struct trace_buffer *buffer; struct trace_array *tr = READ_ONCE(printk_trace); struct bprint_entry *entry; unsigned int trace_ctx; char *tbuffer; int len = 0, size; if (!printk_binsafe(tr)) return trace_vprintk(ip, fmt, args); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); trace_ctx = tracing_gen_ctx(); guard(preempt_notrace)(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out_put; size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->array_buffer.buffer; scoped_guard(ring_buffer_nest, buffer) { event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, trace_ctx); if (!event) goto out_put; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, tbuffer, sizeof(u32) * len); __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); } out_put: put_trace_buf(); out_nobuffer: unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); static __printf(3, 0) int __trace_array_vprintk(struct trace_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct ring_buffer_event *event; int len = 0, size; struct print_entry *entry; unsigned int trace_ctx; char *tbuffer; if (tracing_disabled) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); trace_ctx = tracing_gen_ctx(); guard(preempt_notrace)(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); size = sizeof(*entry) + len + 1; scoped_guard(ring_buffer_nest, buffer) { event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, trace_ctx); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, tbuffer, len + 1); __buffer_unlock_commit(buffer, event); ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); } out: put_trace_buf(); out_nobuffer: unpause_graph_tracing(); return len; } int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { if (tracing_selftest_running && tr == &global_trace) return 0; return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); } /** * trace_array_printk - Print a message to a specific instance * @tr: The instance trace_array descriptor * @ip: The instruction pointer that this is called from. * @fmt: The format to print (printf format) * * If a subsystem sets up its own instance, they have the right to * printk strings into their tracing instance buffer using this * function. Note, this function will not write into the top level * buffer (use trace_printk() for that), as writing into the top level * buffer should only have events that can be individually disabled. * trace_printk() is only used for debugging a kernel, and should not * be ever incorporated in normal use. * * trace_array_printk() can be used, as it will not add noise to the * top level tracing buffer. * * Note, trace_array_init_printk() must be called on @tr before this * can be used. */ int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!tr) return -ENOENT; /* This is only allowed for created instances */ if (tr == &global_trace) return 0; if (!(tr->trace_flags & TRACE_ITER(PRINTK))) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } EXPORT_SYMBOL_GPL(trace_array_printk); /** * trace_array_init_printk - Initialize buffers for trace_array_printk() * @tr: The trace array to initialize the buffers for * * As trace_array_printk() only writes into instances, they are OK to * have in the kernel (unlike trace_printk()). This needs to be called * before trace_array_printk() can be used on a trace_array. */ int trace_array_init_printk(struct trace_array *tr) { if (!tr) return -ENOENT; /* This is only allowed for created instances */ if (tr == &global_trace) return -EINVAL; return alloc_percpu_trace_buffer(); } EXPORT_SYMBOL_GPL(trace_array_init_printk); int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(printk_trace->trace_flags & TRACE_ITER(PRINTK))) return 0; va_start(ap, fmt); ret = __trace_array_vprintk(buffer, ip, fmt, ap); va_end(ap); return ret; } int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(printk_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); iter->idx++; if (buf_iter) ring_buffer_iter_advance(buf_iter); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { event = ring_buffer_iter_peek(buf_iter, ts); if (lost_events) *lost_events = ring_buffer_iter_dropped(buf_iter) ? (unsigned long)-1 : 0; } else { event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, lost_events); } if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct trace_buffer *buffer = iter->array_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > RING_BUFFER_ALL_CPUS) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } #define STATIC_FMT_BUF_SIZE 128 static char static_fmt_buf[STATIC_FMT_BUF_SIZE]; char *trace_iter_expand_format(struct trace_iterator *iter) { char *tmp; /* * iter->tr is NULL when used with tp_printk, which makes * this get called where it is not safe to call krealloc(). */ if (!iter->tr || iter->fmt == static_fmt_buf) return NULL; tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, GFP_KERNEL); if (tmp) { iter->fmt_size += STATIC_FMT_BUF_SIZE; iter->fmt = tmp; } return tmp; } /* Returns true if the string is safe to dereference from an event */ static bool trace_safe_str(struct trace_iterator *iter, const char *str) { unsigned long addr = (unsigned long)str; struct trace_event *trace_event; struct trace_event_call *event; /* OK if part of the event data */ if ((addr >= (unsigned long)iter->ent) && (addr < (unsigned long)iter->ent + iter->ent_size)) return true; /* OK if part of the temp seq buffer */ if ((addr >= (unsigned long)iter->tmp_seq.buffer) && (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE)) return true; /* Core rodata can not be freed */ if (is_kernel_rodata(addr)) return true; if (trace_is_tracepoint_string(str)) return true; /* * Now this could be a module event, referencing core module * data, which is OK. */ if (!iter->ent) return false; trace_event = ftrace_find_event(iter->ent->type); if (!trace_event) return false; event = container_of(trace_event, struct trace_event_call, event); if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) return false; /* Would rather have rodata, but this will suffice */ if (within_module_core(addr, event->module)) return true; return false; } /** * ignore_event - Check dereferenced fields while writing to the seq buffer * @iter: The iterator that holds the seq buffer and the event being printed * * At boot up, test_event_printk() will flag any event that dereferences * a string with "%s" that does exist in the ring buffer. It may still * be valid, as the string may point to a static string in the kernel * rodata that never gets freed. But if the string pointer is pointing * to something that was allocated, there's a chance that it can be freed * by the time the user reads the trace. This would cause a bad memory * access by the kernel and possibly crash the system. * * This function will check if the event has any fields flagged as needing * to be checked at runtime and perform those checks. * * If it is found that a field is unsafe, it will write into the @iter->seq * a message stating what was found to be unsafe. * * @return: true if the event is unsafe and should be ignored, * false otherwise. */ bool ignore_event(struct trace_iterator *iter) { struct ftrace_event_field *field; struct trace_event *trace_event; struct trace_event_call *event; struct list_head *head; struct trace_seq *seq; const void *ptr; trace_event = ftrace_find_event(iter->ent->type); seq = &iter->seq; if (!trace_event) { trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type); return true; } event = container_of(trace_event, struct trace_event_call, event); if (!(event->flags & TRACE_EVENT_FL_TEST_STR)) return false; head = trace_get_fields(event); if (!head) { trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n", trace_event_name(event)); return true; } /* Offsets are from the iter->ent that points to the raw event */ ptr = iter->ent; list_for_each_entry(field, head, link) { const char *str; bool good; if (!field->needs_test) continue; str = *(const char **)(ptr + field->offset); good = trace_safe_str(iter, str); /* * If you hit this warning, it is likely that the * trace event in question used %s on a string that * was saved at the time of the event, but may not be * around when the trace is read. Use __string(), * __assign_str() and __get_str() helpers in the TRACE_EVENT() * instead. See samples/trace_events/trace-events-sample.h * for reference. */ if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'", trace_event_name(event), field->name)) { trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n", trace_event_name(event), field->name); return true; } } return false; } const char *trace_event_format(struct trace_iterator *iter, const char *fmt) { const char *p, *new_fmt; char *q; if (WARN_ON_ONCE(!fmt)) return fmt; if (!iter->tr || iter->tr->trace_flags & TRACE_ITER(HASH_PTR)) return fmt; p = fmt; new_fmt = q = iter->fmt; while (*p) { if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { if (!trace_iter_expand_format(iter)) return fmt; q += iter->fmt - new_fmt; new_fmt = iter->fmt; } *q++ = *p++; /* Replace %p with %px */ if (p[-1] == '%') { if (p[0] == '%') { *q++ = *p++; } else if (p[0] == 'p' && !isalnum(p[1])) { *q++ = *p++; *q++ = 'x'; } } } *q = '\0'; return new_fmt; } #define STATIC_TEMP_BUF_SIZE 128 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { /* __find_next_entry will reset ent_size */ int ent_size = iter->ent_size; struct trace_entry *entry; /* * If called from ftrace_dump(), then the iter->temp buffer * will be the static_temp_buf and not created from kmalloc. * If the entry size is greater than the buffer, we can * not save it. Just return NULL in that case. This is only * used to add markers when two consecutive events' time * stamps have a large delta. See trace_print_lat_context() */ if (iter->temp == static_temp_buf && STATIC_TEMP_BUF_SIZE < ent_size) return NULL; /* * The __find_next_entry() may call peek_next_entry(), which may * call ring_buffer_peek() that may make the contents of iter->ent * undefined. Need to copy iter->ent now. */ if (iter->ent && iter->ent != iter->temp) { if ((!iter->temp || iter->temp_size < iter->ent_size) && !WARN_ON_ONCE(iter->temp == static_temp_buf)) { void *temp; temp = kmalloc(iter->ent_size, GFP_KERNEL); if (!temp) return NULL; kfree(iter->temp); iter->temp = temp; iter->temp_size = iter->ent_size; } memcpy(iter->temp, iter->ent, iter->ent_size); iter->ent = iter->temp; } entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); /* Put back the original ent_size */ iter->ent_size = ent_size; return entry; } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) return; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while (ring_buffer_iter_peek(buf_iter, &ts)) { if (ts >= iter->array_buffer->time_start) break; entries++; ring_buffer_iter_advance(buf_iter); /* This could be a big loop */ cond_resched(); } per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; mutex_lock(&trace_types_lock); if (unlikely(tr->current_trace != iter->trace)) { /* Close iter->trace before switching to the new current tracer */ if (iter->trace->close) iter->trace->close(iter); iter->trace = tr->current_trace; /* Reopen the new current tracer */ if (iter->trace->open) iter->trace->open(iter); } mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return ERR_PTR(-EBUSY); #endif if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return; #endif trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, unsigned long *entries, int cpu) { unsigned long count; count = ring_buffer_entries_cpu(buf->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; /* total is the same as the entries */ *total = count; } else *total = count + ring_buffer_overrun_cpu(buf->buffer, cpu); *entries = count; } static void get_total_entries(struct array_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long t, e; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { get_total_entries_cpu(buf, &t, &e, cpu); *total += t; *entries += e; } } unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) { unsigned long total, entries; if (!tr) tr = &global_trace; get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); return entries; } unsigned long trace_total_entries(struct trace_array *tr) { unsigned long total, entries; if (!tr) tr = &global_trace; get_total_entries(&tr->array_buffer, &total, &entries); return entries; } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n" "# / _-----=> irqs-off/BH-disabled\n" "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" "# |||| / _-=> migrate-disable \n" "# ||||| / delay \n" "# cmd pid |||||| time | caller \n" "# \\ / |||||| \\ | / \n"); } static void print_event_info(struct array_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(buf, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER(RECORD_TGID); print_event_info(buf, m); seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER(RECORD_TGID); static const char space[] = " "; int prec = tgid ? 12 : 2; print_event_info(buf, m); seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); seq_printf(m, "# %.*s|||| / delay\n", prec, space); seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); struct array_buffer *buf = iter->array_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; unsigned long total; const char *name = type->name; get_total_entries(buf, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, init_utsname()->release); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, buf->cpu, preempt_model_str(), /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, from_kuid_munged(seq_user_ns(m), data->uid), data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_array *tr = iter->tr; if (!(tr->trace_flags & TRACE_ITER(ANNOTATE))) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_available(iter->started) && cpumask_test_cpu(iter->cpu, iter->started)) return; if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } #ifdef CONFIG_FTRACE_SYSCALLS static bool is_syscall_event(struct trace_event *event) { return (event->funcs == &enter_syscall_print_funcs) || (event->funcs == &exit_syscall_print_funcs); } #define syscall_buf_size CONFIG_TRACE_SYSCALL_BUF_SIZE_DEFAULT #else static inline bool is_syscall_event(struct trace_event *event) { return false; } #define syscall_buf_size 0 #endif /* CONFIG_FTRACE_SYSCALLS */ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (tr->trace_flags & TRACE_ITER(CONTEXT_INFO)) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_print_lat_context(iter); else trace_print_context(iter); } if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; if (event) { if (tr->trace_flags & TRACE_ITER(FIELDS)) return print_event_fields(iter, event); /* * For TRACE_EVENT() events, the print_fmt is not * safe to use if the array has delta offsets * Force printing via the fields. */ if ((tr->text_delta)) { /* ftrace and system call events are still OK */ if ((event->type > __TRACE_LAST_TYPE) && !is_syscall_event(event)) return print_event_fields(iter, event); } return event->funcs->trace(iter, sym_flags, event); } trace_seq_printf(s, "Unknown type %d\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER(CONTEXT_INFO)) trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); trace_seq_printf(s, "%d ?\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER(CONTEXT_INFO)) { SEQ_PUT_HEX_FIELD(s, entry->pid); SEQ_PUT_HEX_FIELD(s, iter->cpu); SEQ_PUT_HEX_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD(s, newline); return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER(CONTEXT_INFO)) { SEQ_PUT_FIELD(s, entry->pid); SEQ_PUT_FIELD(s, iter->cpu); SEQ_PUT_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { cpu = iter->cpu_file; buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; enum print_line_t ret; if (iter->lost_events) { if (iter->lost_events == (unsigned long)-1) trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", iter->cpu); else trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPUTS && trace_flags & TRACE_ITER(PRINTK) && trace_flags & TRACE_ITER(PRINTK_MSGONLY)) return trace_print_bputs_msg_only(iter); if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER(PRINTK) && trace_flags & TRACE_ITER(PRINTK_MSGONLY)) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER(PRINTK) && trace_flags & TRACE_ITER(PRINTK_MSGONLY)) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER(BIN)) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER(HEX)) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER(RAW)) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(tr->trace_flags & TRACE_ITER(VERBOSE))) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; if (!(trace_flags & TRACE_ITER(CONTEXT_INFO))) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER(VERBOSE))) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER(VERBOSE))) { if (trace_flags & TRACE_ITER(IRQ_INFO)) print_func_help_header_irq(iter->array_buffer, m, trace_flags); else print_func_help_header(iter->array_buffer, m, trace_flags); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer.\n" "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer for this cpu.\n"); #else seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" "# Must use main snapshot file to allocate.\n"); #endif seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else show_snapshot_percpu_help(m); } #else /* Should never be called */ static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } #endif static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->snapshot && trace_empty(iter)) print_snapshot_help(m, iter); else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.full = 0; trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); } ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } /* * Should be used after trace_array_get(), trace_types_lock * ensures that i_cdev was already initialized. */ static inline int tracing_get_cpu(struct inode *inode) { if (inode->i_cdev) /* See trace_create_cpu_file() */ return (long)inode->i_cdev - 1; return RING_BUFFER_ALL_CPUS; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; /* * Note, as iter itself can be allocated and freed in different * ways, this function is only used to free its content, and not * the iterator itself. The only requirement to all the allocations * is that it must zero all fields (kzalloc), as freeing works with * ethier allocated content or NULL. */ static void free_trace_iter_content(struct trace_iterator *iter) { /* The fmt is either NULL, allocated or points to static_fmt_buf */ if (iter->fmt != static_fmt_buf) kfree(iter->fmt); kfree(iter->temp); kfree(iter->buffer_iter); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); } static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, bool snapshot) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); if (!iter) return ERR_PTR(-ENOMEM); iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), GFP_KERNEL); if (!iter->buffer_iter) goto release; /* * trace_find_next_entry() may need to save off iter->ent. * It will place it into the iter->temp buffer. As most * events are less than 128, allocate a buffer of that size. * If one is greater, then trace_find_next_entry() will * allocate a new buffer to adjust for the bigger iter->ent. * It's not critical if it fails to get allocated here. */ iter->temp = kmalloc(128, GFP_KERNEL); if (iter->temp) iter->temp_size = 128; /* * trace_event_printf() may need to modify given format * string to replace %p with %px so that it shows real address * instead of hash value. However, that is only for the event * tracing, other tracer may not need. Defer the allocation * until it is needed. */ iter->fmt = NULL; iter->fmt_size = 0; mutex_lock(&trace_types_lock); iter->trace = tr->current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; iter->tr = tr; #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) iter->array_buffer = &tr->max_buffer; else #endif iter->array_buffer = &tr->array_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ if (iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->array_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* * If pause-on-trace is enabled, then stop the trace while * dumping, unless this is the "snapshot" file */ if (!iter->snapshot && (tr->trace_flags & TRACE_ITER(PAUSE_ON_TRACE))) { iter->iter_flags |= TRACE_FILE_PAUSE; tracing_stop_tr(tr); } if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_start(iter->array_buffer->buffer, cpu, GFP_KERNEL); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_start(iter->array_buffer->buffer, cpu, GFP_KERNEL); tracing_iter_reset(iter, cpu); } mutex_unlock(&trace_types_lock); return iter; fail: mutex_unlock(&trace_types_lock); free_trace_iter_content(iter); release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) { int ret; ret = tracing_check_open_get_tr(NULL); if (ret) return ret; filp->private_data = inode->i_private; return 0; } bool tracing_is_disabled(void) { return (tracing_disabled) ? true: false; } /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. */ int tracing_open_generic_tr(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; filp->private_data = inode->i_private; return 0; } /* * The private pointer of the inode is the trace_event_file. * Update the tr ref count associated to it. */ int tracing_open_file_tr(struct inode *inode, struct file *filp) { struct trace_event_file *file = inode->i_private; int ret; ret = tracing_check_open_get_tr(file->tr); if (ret) return ret; guard(mutex)(&event_mutex); /* Fail if the file is marked for removal */ if (file->flags & EVENT_FILE_FL_FREED) { trace_array_put(file->tr); return -ENODEV; } else { event_file_get(file); } filp->private_data = inode->i_private; return 0; } int tracing_release_file_tr(struct inode *inode, struct file *filp) { struct trace_event_file *file = inode->i_private; trace_array_put(file->tr); event_file_put(file); return 0; } int tracing_single_release_file_tr(struct inode *inode, struct file *filp) { tracing_release_file_tr(inode, filp); return single_release(inode, filp); } static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) { trace_array_put(tr); return 0; } /* Writes do not use seq_file */ iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (iter->iter_flags & TRACE_FILE_PAUSE) /* reenable tracing if it was previously enabled */ tracing_start_tr(tr); __trace_array_put(tr); mutex_unlock(&trace_types_lock); free_trace_iter_content(iter); seq_release_private(inode, file); return 0; } int tracing_release_generic_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return 0; } static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return single_release(inode, file); } static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); struct array_buffer *trace_buf = &tr->array_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) trace_buf = &tr->max_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset_cpu(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (tr->trace_flags & TRACE_ITER(LATENCY_FMT)) iter->iter_flags |= TRACE_FILE_LAT_FMT; } if (ret < 0) trace_array_put(tr); return ret; } /* * Some tracers are not suitable for instance buffers. * A tracer is always available for the global array (toplevel) * or if it explicitly states that it is. */ static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { #ifdef CONFIG_TRACER_SNAPSHOT /* arrays with mapped buffer range do not have snapshots */ if (tr->range_addr_start && t->use_max_tr) return false; #endif return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } /* Find the next tracer that this trace array may use */ static struct tracer * get_tracer_for_array(struct trace_array *tr, struct tracer *t) { while (t && !trace_ok_for_array(t, tr)) t = t->next; return t; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = seq_open(file, &show_traces_seq_ops); if (ret) { trace_array_put(tr); return ret; } m = file->private_data; m->private = tr; return 0; } static int tracing_seq_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return seq_release(inode, file); } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } loff_t tracing_lseek(struct file *file, loff_t offset, int whence) { int ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, whence); else file->f_pos = ret = 0; return ret; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .read_iter = seq_read_iter, .splice_read = copy_splice_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_seq_release, }; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; char *mask_str __free(kfree) = NULL; int len; len = snprintf(NULL, 0, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)) + 1; mask_str = kmalloc(len, GFP_KERNEL); if (!mask_str) return -ENOMEM; len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) return -EINVAL; return simple_read_from_buffer(ubuf, count, ppos, mask_str, len); } int tracing_set_cpumask(struct trace_array *tr, cpumask_var_t tracing_cpumask_new) { int cpu; if (!tr) return -EINVAL; local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); #endif } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); #endif } } arch_spin_unlock(&tr->max_lock); local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); return 0; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err; if (count == 0 || count > KMALLOC_MAX_SIZE) return -EINVAL; if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_free; err = tracing_set_cpumask(tr, tracing_cpumask_new); if (err) goto err_free; free_cpumask_var(tracing_cpumask_new); return count; err_free: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; struct trace_array *tr = m->private; struct tracer_flags *flags; u32 tracer_flags; int i; guard(mutex)(&trace_types_lock); for (i = 0; trace_options[i]; i++) { if (tr->trace_flags & (1ULL << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } flags = tr->current_trace_flags; if (!flags || !flags->opts) return 0; tracer_flags = flags->val; trace_opts = flags->opts; for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } return 0; } static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { struct tracer *trace = tracer_flags->trace; int ret = 0; if (trace->set_flag) ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { struct tracer_flags *tracer_flags = tr->current_trace_flags; struct tracer_opt *opts = NULL; int i; if (!tracer_flags || !tracer_flags->opts) return 0; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(tr, tracer_flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u64 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER(OVERWRITE)) && !set) return -1; return 0; } int set_tracer_flag(struct trace_array *tr, u64 mask, int enabled) { switch (mask) { case TRACE_ITER(RECORD_TGID): case TRACE_ITER(RECORD_CMD): case TRACE_ITER(TRACE_PRINTK): case TRACE_ITER(COPY_MARKER): lockdep_assert_held(&event_mutex); } /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; switch (mask) { case TRACE_ITER(TRACE_PRINTK): if (enabled) { update_printk_trace(tr); } else { /* * The global_trace cannot clear this. * It's flag only gets cleared if another instance sets it. */ if (printk_trace == &global_trace) return -EINVAL; /* * An instance must always have it set. * by default, that's the global_trace instance. */ if (printk_trace == tr) update_printk_trace(&global_trace); } break; case TRACE_ITER(COPY_MARKER): update_marker_trace(tr, enabled); /* update_marker_trace updates the tr->trace_flags */ return 0; } if (enabled) tr->trace_flags |= mask; else tr->trace_flags &= ~mask; switch (mask) { case TRACE_ITER(RECORD_CMD): trace_event_enable_cmd_record(enabled); break; case TRACE_ITER(RECORD_TGID): if (trace_alloc_tgid_map() < 0) { tr->trace_flags &= ~TRACE_ITER(RECORD_TGID); return -ENOMEM; } trace_event_enable_tgid_record(enabled); break; case TRACE_ITER(EVENT_FORK): trace_event_follow_fork(tr, enabled); break; case TRACE_ITER(FUNC_FORK): ftrace_pid_follow_fork(tr, enabled); break; case TRACE_ITER(OVERWRITE): ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif break; case TRACE_ITER(PRINTK): trace_printk_start_stop_comm(enabled); trace_printk_control(enabled); break; #if defined(CONFIG_FUNCTION_PROFILER) && defined(CONFIG_FUNCTION_GRAPH_TRACER) case TRACE_GRAPH_GRAPH_TIME: ftrace_graph_graph_time_control(enabled); break; #endif } return 0; } int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; int ret; size_t orig_len = strlen(option); int len; cmp = strstrip(option); len = str_has_prefix(cmp, "no"); if (len) neg = 1; cmp += len; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); /* If no option could be set, test the specific tracer options */ if (ret < 0) ret = set_tracer_option(tr, cmp, neg); else ret = set_tracer_flag(tr, 1ULL << ret, !neg); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); /* * If the first trailing whitespace is replaced with '\0' by strstrip, * turn it back into a space. */ if (orig_len > strlen(option)) option[strlen(option)] = ' '; return ret; } static void __init apply_trace_boot_options(void) { char *buf = trace_boot_options_buf; char *option; while (true) { option = strsep(&buf, ","); if (!option) break; if (*option) trace_set_options(&global_trace, option); /* Put back the comma to allow this to be called again */ if (buf) *(buf - 1) = ','; } } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = trace_set_options(tr, buf); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = single_open(file, tracing_trace_options_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "By default tracefs removes all OTH file permission bits.\n" "When mounting tracefs an optional group id can be specified\n" "which adds the group to every directory and file in tracefs:\n\n" "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n" "# echo 0 > tracing_on : quick way to disable tracing\n" "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" " Important files:\n" " trace\t\t\t- The static contents of the buffer\n" "\t\t\t To clear the buffer write into this file: echo > trace\n" " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" " current_tracer\t- function and latency tracers\n" " available_tracers\t- list of configured tracers for current_tracer\n" " error_log\t- error log for failed commands (that support it)\n" " buffer_size_kb\t- view and modify size of per cpu buffer\n" " buffer_total_size_kb - view total size of all cpu buffers\n\n" " trace_clock\t\t- change the clock used to order events\n" " local: Per cpu clock but may not be synced across CPUs\n" " global: Synced across CPUs but slows tracing down.\n" " counter: Not a clock, but just an increment\n" " uptime: Jiffy counter from time of boot\n" " perf: Same clock that perf events use\n" #ifdef CONFIG_X86_64 " x86-tsc: TSC cycle counter\n" #endif "\n timestamp_mode\t- view the mode used to timestamp events\n" " delta: Delta difference against a buffer-wide timestamp\n" " absolute: Absolute (standalone) timestamp\n" "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" " tracing_cpumask\t- Limit which CPUs to trace\n" " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" "\t\t\t Disable an option by prefixing 'no' to the\n" "\t\t\t option name\n" " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" " set_ftrace_filter\t- echo function name in here to only trace these\n" "\t\t\t functions\n" "\t accepts: func_full_name or glob-matching-pattern\n" "\t modules: Can select a group via module\n" "\t Format: :mod:<module-name>\n" "\t example: echo :mod:ext3 > set_ftrace_filter\n" "\t triggers: a command to perform when function is hit\n" "\t Format: <function>:<trigger>[:count]\n" "\t trigger: traceon, traceoff\n" "\t\t enable_event:<system>:<event>\n" "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif "\t\t dump\n" "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" "\t The second will disable tracing at most 3 times when do_trap is hit\n" "\t The first time do trap is hit and it disables tracing, the\n" "\t counter will decrement to 2. If tracing is already disabled,\n" "\t the counter will not decrement. It only decrements when the\n" "\t trigger did work\n" "\t To remove trigger without count:\n" "\t echo '!<function>:<trigger> > set_ftrace_filter\n" "\t To remove trigger with a count:\n" "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" "\t modules: Can select a group via module command :mod:\n" "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" "\t\t (function)\n" " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" "\t\t\t snapshot buffer. Read the contents for more\n" "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" "\t\t\t Write into this file to reset the max size (trigger a\n" "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ defined(CONFIG_FPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" #endif #ifdef CONFIG_FPROBE_EVENTS "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/]<event> <field> [<field>]\n" #endif "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n" "\t -:[<group>/][<event>]\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" #endif #ifdef CONFIG_UPROBE_EVENTS " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" #endif "\t args: <name>=fetcharg[:type]\n" "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS "\t <argname>[->field[->field|.field...]],\n" #endif #else "\t $stack<index>, $stack, $retval, $comm,\n" #endif "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" "\t kernel return probes support: $retval, $arg<N>, $comm\n" "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n" "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: <stype> <name>;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif "\t efield: For event probes ('e' types), the field is on of the fields\n" "\t of the <attached-group>/<attached-event>.\n" #endif " set_event\t\t- Enables events by name written into it\n" "\t\t\t Can enable module events via: :mod:<module>\n" " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" " events/<system>/\t- Directory containing all trace events for <system>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" "\t\t\t events\n" " filter\t\t- If set, only events passing filter are traced\n" " events/<system>/<event>/\t- Directory containing control files for\n" "\t\t\t <event>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" " filter\t\t- If set, only events passing filter are traced\n" " trigger\t\t- If set, a command to perform when event is hit\n" "\t Format: <trigger>[:count][if <filter>]\n" "\t trigger: traceon, traceoff\n" "\t enable_event:<system>:<event>\n" "\t disable_event:<system>:<event>\n" #ifdef CONFIG_HIST_TRIGGERS "\t enable_hist:<system>:<event>\n" "\t disable_hist:<system>:<event>\n" #endif #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t\t hist (see below)\n" #endif "\t example: echo traceoff > events/block/block_unplug/trigger\n" "\t echo traceoff:3 > events/block/block_unplug/trigger\n" "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" "\t events/block/block_unplug/trigger\n" "\t The first disables tracing every time block_unplug is hit.\n" "\t The second disables tracing the first 3 times block_unplug is hit.\n" "\t The third enables the kmalloc event the first 3 times block_unplug\n" "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" "\t Like function triggers, the counter is only decremented if it\n" "\t enabled or disabled tracing.\n" "\t To remove a trigger without a count:\n" "\t echo '!<trigger> > <system>/<event>/trigger\n" "\t To remove a trigger with a count:\n" "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" "\t Filters can be ignored when removing a trigger.\n" #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" "\t [:pause][:continue][:clear]\n" "\t [:name=histname1]\n" "\t [:nohitcount]\n" "\t [:<handler>.<action>]\n" "\t [if <filter>]\n\n" "\t Note, special fields can be used as well:\n" "\t common_timestamp - to record current timestamp\n" "\t common_cpu - to record the CPU the event happened on\n" "\n" "\t A hist trigger variable can be:\n" "\t - a reference to a field e.g. x=current_timestamp,\n" "\t - a reference to another variable e.g. y=$x,\n" "\t - a numeric literal: e.g. ms_per_sec=1000,\n" "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" "\n" "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n" "\t multiplication(*) and division(/) operators. An operand can be either a\n" "\t variable reference, field or numeric literal.\n" "\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" "\t can be any field, or the special string 'common_stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" "\t specified using the 'sort' keyword. The sort direction can\n" "\t be modified by appending '.descending' or '.ascending' to a\n" "\t sort field. The 'size' parameter can be used to specify more\n" "\t or fewer than the default 2048 entries for the hashtable size.\n" "\t If a hist trigger is given a name using the 'name' parameter,\n" "\t its histogram data will be shared with other triggers of the\n" "\t same name, and trigger hits will update this common data.\n\n" "\t Reading the 'hist' file for the event will dump the hash\n" "\t table in its entirety to stdout. If there are multiple hist\n" "\t triggers attached to an event, there will be a table for each\n" "\t trigger in the output. The table displayed for a named\n" "\t trigger will be the same as any other instance having the\n" "\t same name. The default format used to display a given field\n" "\t can be modified by appending any of the following modifiers\n" "\t to the field name, as applicable:\n\n" "\t .hex display a number as a hex value\n" "\t .sym display an address as a symbol\n" "\t .sym-offset display an address as a symbol and offset\n" "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" "\t .buckets=size display values in groups of size rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n" "\t .percent display a number of percentage value\n" "\t .graph display a bar-graph of a value\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" "\t until told to do so. 'continue' can be used to start or\n" "\t restart a paused hist trigger.\n\n" "\t The 'clear' parameter will clear the contents of a running\n" "\t hist trigger and leave its current paused/active state\n" "\t unchanged.\n\n" "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n" "\t raw hitcount in the histogram.\n\n" "\t The enable_hist and disable_hist triggers can be used to\n" "\t have one event conditionally start and stop another event's\n" "\t already-attached hist trigger. The syntax is analogous to\n" "\t the enable_event and disable_event triggers.\n\n" "\t Hist trigger handlers and actions are executed whenever a\n" "\t a histogram entry is added or updated. They take the form:\n\n" "\t <handler>.<action>\n\n" "\t The available handlers are:\n\n" "\t onmatch(matching.event) - invoke on addition or update\n" "\t onmax(var) - invoke if var exceeds current max\n" "\t onchange(var) - invoke action if var changes\n\n" "\t The available actions are:\n\n" "\t trace(<synthetic_event>,param list) - generate synthetic event\n" "\t save(field,...) - save current event fields\n" #ifdef CONFIG_TRACER_SNAPSHOT "\t snapshot() - snapshot the trace buffer\n\n" #endif #ifdef CONFIG_SYNTH_EVENTS " events/synthetic_events\t- Create/append/remove/show synthetic events\n" "\t Write into this file to define/undefine new synthetic events.\n" "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n" #endif #endif ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; #ifdef CONFIG_TRACE_EVAL_MAP_FILE static union trace_eval_map_item * update_eval_map(union trace_eval_map_item *ptr) { if (!ptr->map.eval_string) { if (ptr->tail.next) { ptr = ptr->tail.next; /* Set ptr to the next real item (skip head) */ ptr++; } else return NULL; } return ptr; } static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) { union trace_eval_map_item *ptr = v; /* * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ (*pos)++; ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; ptr = update_eval_map(ptr); return ptr; } static void *eval_map_start(struct seq_file *m, loff_t *pos) { union trace_eval_map_item *v; loff_t l = 0; mutex_lock(&trace_eval_mutex); v = trace_eval_maps; if (v) v++; while (v && l < *pos) { v = eval_map_next(m, v, &l); } return v; } static void eval_map_stop(struct seq_file *m, void *v) { mutex_unlock(&trace_eval_mutex); } static int eval_map_show(struct seq_file *m, void *v) { union trace_eval_map_item *ptr = v; seq_printf(m, "%s %ld (%s)\n", ptr->map.eval_string, ptr->map.eval_value, ptr->map.system); return 0; } static const struct seq_operations tracing_eval_map_seq_ops = { .start = eval_map_start, .next = eval_map_next, .stop = eval_map_stop, .show = eval_map_show, }; static int tracing_eval_map_open(struct inode *inode, struct file *filp) { int ret; ret = tracing_check_open_get_tr(NULL); if (ret) return ret; return seq_open(filp, &tracing_eval_map_seq_ops); } static const struct file_operations tracing_eval_map_fops = { .open = tracing_eval_map_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static inline union trace_eval_map_item * trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) { /* Return tail of array given the head */ return ptr + ptr->head.length + 1; } static void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **stop; struct trace_eval_map **map; union trace_eval_map_item *map_array; union trace_eval_map_item *ptr; stop = start + len; /* * The trace_eval_maps contains the map plus a head and tail item, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; } guard(mutex)(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; else { ptr = trace_eval_maps; for (;;) { ptr = trace_eval_jmp_to_tail(ptr); if (!ptr->tail.next) break; ptr = ptr->tail.next; } ptr->tail.next = map_array; } map_array->head.mod = mod; map_array->head.length = len; map_array++; for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map_array->map = **map; map_array++; } memset(map_array, 0, sizeof(*map_array)); } static void trace_create_eval_file(struct dentry *d_tracer) { trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, NULL, &tracing_eval_map_fops); } #else /* CONFIG_TRACE_EVAL_MAP_FILE */ static inline void trace_create_eval_file(struct dentry *d_tracer) { } static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { } #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_event_update_with_eval_map(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **map; /* Always run sanitizer only if btf_type_tag attr exists. */ if (len <= 0) { if (!(IS_ENABLED(CONFIG_DEBUG_INFO_BTF) && IS_ENABLED(CONFIG_PAHOLE_HAS_BTF_TAG) && __has_attribute(btf_type_tag))) return; } map = start; trace_event_update_all(map, len); if (len <= 0) return; trace_insert_eval_map_file(mod, start, len); } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+2]; int r; scoped_guard(mutex, &trace_types_lock) { r = sprintf(buf, "%s\n", tr->current_trace->name); } return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->array_buffer); return t->init(tr); } static void set_buffer_entries(struct array_buffer *buf, unsigned long val) { int cpu; for_each_tracing_cpu(cpu) per_cpu_ptr(buf->data, cpu)->entries = val; } static void update_buffer_entries(struct array_buffer *buf, int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); } else { per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); } } #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, struct array_buffer *size_buf, int cpu_id) { int cpu, ret = 0; if (cpu_id == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu)->entries, cpu); if (ret < 0) break; per_cpu_ptr(trace_buf->data, cpu)->entries = per_cpu_ptr(size_buf->data, cpu)->entries; } } else { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); if (ret == 0) per_cpu_ptr(trace_buf->data, cpu_id)->entries = per_cpu_ptr(size_buf->data, cpu_id)->entries; } return ret; } #endif /* CONFIG_TRACER_MAX_TRACE */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ trace_set_ring_buffer_expanded(tr); /* May be called before buffers are initialized */ if (!tr->array_buffer.buffer) return 0; /* Do not allow tracing while resizing ring buffer */ tracing_stop_tr(tr); ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); if (ret < 0) goto out_start; #ifdef CONFIG_TRACER_MAX_TRACE if (!tr->allocated_snapshot) goto out; ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->array_buffer, &tr->array_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } goto out_start; } update_buffer_entries(&tr->max_buffer, cpu); out: #endif /* CONFIG_TRACER_MAX_TRACE */ update_buffer_entries(&tr->array_buffer, cpu); out_start: tracing_start_tr(tr); return ret; } ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { guard(mutex)(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) return -EINVAL; } return __tracing_resize_ring_buffer(tr, size, cpu_id); } struct trace_mod_entry { unsigned long mod_addr; char mod_name[MODULE_NAME_LEN]; }; struct trace_scratch { unsigned int clock_id; unsigned long text_addr; unsigned long nr_entries; struct trace_mod_entry entries[]; }; static DEFINE_MUTEX(scratch_mutex); static int cmp_mod_entry(const void *key, const void *pivot) { unsigned long addr = (unsigned long)key; const struct trace_mod_entry *ent = pivot; if (addr >= ent[0].mod_addr && addr < ent[1].mod_addr) return 0; else return addr - ent->mod_addr; } /** * trace_adjust_address() - Adjust prev boot address to current address. * @tr: Persistent ring buffer's trace_array. * @addr: Address in @tr which is adjusted. */ unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr) { struct trace_module_delta *module_delta; struct trace_scratch *tscratch; struct trace_mod_entry *entry; unsigned long raddr; int idx = 0, nr_entries; /* If we don't have last boot delta, return the address */ if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) return addr; /* tr->module_delta must be protected by rcu. */ guard(rcu)(); tscratch = tr->scratch; /* if there is no tscrach, module_delta must be NULL. */ module_delta = READ_ONCE(tr->module_delta); if (!module_delta || !tscratch->nr_entries || tscratch->entries[0].mod_addr > addr) { raddr = addr + tr->text_delta; return __is_kernel(raddr) || is_kernel_core_data(raddr) || is_kernel_rodata(raddr) ? raddr : addr; } /* Note that entries must be sorted. */ nr_entries = tscratch->nr_entries; if (nr_entries == 1 || tscratch->entries[nr_entries - 1].mod_addr < addr) idx = nr_entries - 1; else { entry = __inline_bsearch((void *)addr, tscratch->entries, nr_entries - 1, sizeof(tscratch->entries[0]), cmp_mod_entry); if (entry) idx = entry - tscratch->entries; } return addr + module_delta->delta[idx]; } #ifdef CONFIG_MODULES static int save_mod(struct module *mod, void *data) { struct trace_array *tr = data; struct trace_scratch *tscratch; struct trace_mod_entry *entry; unsigned int size; tscratch = tr->scratch; if (!tscratch) return -1; size = tr->scratch_size; if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size) return -1; entry = &tscratch->entries[tscratch->nr_entries]; tscratch->nr_entries++; entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base; strscpy(entry->mod_name, mod->name); return 0; } #else static int save_mod(struct module *mod, void *data) { return 0; } #endif static void update_last_data(struct trace_array *tr) { struct trace_module_delta *module_delta; struct trace_scratch *tscratch; if (!(tr->flags & TRACE_ARRAY_FL_BOOT)) return; if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) return; /* Only if the buffer has previous boot data clear and update it. */ tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT; /* Reset the module list and reload them */ if (tr->scratch) { struct trace_scratch *tscratch = tr->scratch; tscratch->clock_id = tr->clock_id; memset(tscratch->entries, 0, flex_array_size(tscratch, entries, tscratch->nr_entries)); tscratch->nr_entries = 0; guard(mutex)(&scratch_mutex); module_for_each_mod(save_mod, tr); } /* * Need to clear all CPU buffers as there cannot be events * from the previous boot mixed with events with this boot * as that will cause a confusing trace. Need to clear all * CPU buffers, even for those that may currently be offline. */ tracing_reset_all_cpus(&tr->array_buffer); /* Using current data now */ tr->text_delta = 0; if (!tr->scratch) return; tscratch = tr->scratch; module_delta = READ_ONCE(tr->module_delta); WRITE_ONCE(tr->module_delta, NULL); kfree_rcu(module_delta, rcu); /* Set the persistent ring buffer meta data to this address */ tscratch->text_addr = (unsigned long)_text; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * @tr: The tracing instance * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(struct trace_array *tr) { int ret = 0; guard(mutex)(&trace_types_lock); update_last_data(tr); if (!tr->ring_buffer_expanded) ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); return ret; } /* * Used to clear out the tracer before deletion of an instance. * Must have trace_types_lock held. */ static void tracing_set_nop(struct trace_array *tr) { if (tr->current_trace == &nop_trace) return; tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); tr->current_trace = &nop_trace; tr->current_trace_flags = nop_trace.flags; } static bool tracer_options_updated; int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *trace = NULL; struct tracers *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif int ret; guard(mutex)(&trace_types_lock); update_last_data(tr); if (!tr->ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; ret = 0; } list_for_each_entry(t, &tr->tracers, list) { if (strcmp(t->tracer->name, buf) == 0) { trace = t->tracer; break; } } if (!trace) return -EINVAL; if (trace == tr->current_trace) return 0; #ifdef CONFIG_TRACER_SNAPSHOT if (trace->use_max_tr) { local_irq_disable(); arch_spin_lock(&tr->max_lock); ret = tr->cond_snapshot ? -EBUSY : 0; arch_spin_unlock(&tr->max_lock); local_irq_enable(); if (ret) return ret; } #endif /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && trace->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", trace->name); return -EINVAL; } /* Some tracers are only allowed for the top level buffer */ if (!trace_ok_for_array(trace, tr)) return -EINVAL; /* If trace pipe files are being read, we can't change the tracer */ if (tr->trace_ref) return -EBUSY; trace_branch_disable(); tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); #ifdef CONFIG_TRACER_MAX_TRACE had_max_tr = tr->current_trace->use_max_tr; /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; tr->current_trace_flags = nop_trace.flags; if (had_max_tr && !trace->use_max_tr) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from * swapping the buffers after we resize it. * The update_max_tr is called from interrupts disabled * so a synchronized_sched() is sufficient. */ synchronize_rcu(); free_snapshot(tr); tracing_disarm_snapshot(tr); } if (!had_max_tr && trace->use_max_tr) { ret = tracing_arm_snapshot_locked(tr); if (ret) return ret; } #else tr->current_trace = &nop_trace; #endif tr->current_trace_flags = t->flags ? : t->tracer->flags; if (trace->init) { ret = tracer_init(trace, tr); if (ret) { #ifdef CONFIG_TRACER_MAX_TRACE if (trace->use_max_tr) tracing_disarm_snapshot(tr); #endif tr->current_trace_flags = nop_trace.flags; return ret; } } tr->current_trace = trace; tr->current_trace->enabled++; trace_branch_enable(tr); return 0; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; char *name; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; name = strim(buf); err = tracing_set_tracer(tr, name); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static ssize_t tracing_thresh_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); } static ssize_t tracing_thresh_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; int ret; guard(mutex)(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) return ret; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) return ret; } return cnt; } #ifdef CONFIG_TRACER_MAX_TRACE static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); } #endif static int open_pipe_on_cpu(struct trace_array *tr, int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { if (cpumask_empty(tr->pipe_cpumask)) { cpumask_setall(tr->pipe_cpumask); return 0; } } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { cpumask_set_cpu(cpu, tr->pipe_cpumask); return 0; } return -EBUSY; } static void close_pipe_on_cpu(struct trace_array *tr, int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { WARN_ON(!cpumask_full(tr->pipe_cpumask)); cpumask_clear(tr->pipe_cpumask); } else { WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); cpumask_clear_cpu(cpu, tr->pipe_cpumask); } } static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; guard(mutex)(&trace_types_lock); cpu = tracing_get_cpu(inode); ret = open_pipe_on_cpu(tr, cpu); if (ret) goto fail_pipe_on_cpu; /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; goto fail_alloc_iter; } trace_seq_init(&iter->seq); iter->trace = tr->current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (tr->trace_flags & TRACE_ITER(LATENCY_FMT)) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; iter->array_buffer = &tr->array_buffer; iter->cpu_file = cpu; mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); tr->trace_ref++; return ret; fail: kfree(iter); fail_alloc_iter: close_pipe_on_cpu(tr, cpu); fail_pipe_on_cpu: __trace_array_put(tr); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; struct trace_array *tr = inode->i_private; scoped_guard(mutex, &trace_types_lock) { tr->trace_ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); close_pipe_on_cpu(tr, iter->cpu_file); } free_trace_iter_content(iter); kfree(iter); trace_array_put(tr); return 0; } static __poll_t trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) { struct trace_array *tr = iter->tr; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return EPOLLIN | EPOLLRDNORM; if (tr->trace_flags & TRACE_ITER(BLOCK)) /* * Always select as readable when in blocking mode */ return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, filp, poll_table, iter->tr->buffer_percent); } static __poll_t tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; return trace_poll(iter, filp, poll_table); } /* Must be called with iter->mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; int ret; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_tracing_is_on(iter->tr) && iter->pos) break; mutex_unlock(&iter->mutex); ret = wait_on_pipe(iter, 0); mutex_lock(&iter->mutex); if (ret) return ret; } return 1; } static bool update_last_data_if_empty(struct trace_array *tr) { if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) return false; if (!ring_buffer_empty(tr->array_buffer.buffer)) return false; /* * If the buffer contains the last boot data and all per-cpu * buffers are empty, reset it from the kernel side. */ update_last_data(tr); return true; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ guard(mutex)(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) return sret; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) return sret; } waitagain: if (update_last_data_if_empty(iter->tr)) return 0; sret = tracing_wait_pipe(filp); if (sret <= 0) return sret; /* stop when tracing is finished */ if (trace_empty(iter)) return 0; if (cnt >= TRACE_SEQ_BUFFER_SIZE) cnt = TRACE_SEQ_BUFFER_SIZE - 1; /* reset all but tr, trace, and overruns */ trace_iterator_reset(iter); cpumask_clear(iter->started); trace_seq_init(&iter->seq); trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* * If one print_trace_line() fills entire trace_seq in one shot, * trace_seq_to_user() will returns -EBUSY because save_len == 0, * In this case, we need to consume it, otherwise, loop will peek * this event next time, resulting in an infinite loop. */ if (save_len == 0) { iter->seq.full = 0; trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); trace_consume(iter); break; } /* In other cases, don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; return sret; } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int save_len; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (trace_seq_has_overflowed(&iter->seq)) { iter->seq.seq.len = save_len; break; } /* * This should not be hit, because it should only * be set if the iter->seq overflowed. But check it * anyway to be safe. */ if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.seq.len = save_len; break; } count = trace_seq_used(&iter->seq) - save_len; if (rem < count) { rem = 0; iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &default_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), min((size_t)trace_seq_used(&iter->seq), (size_t)PAGE_SIZE)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = ret; trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; if (i) ret = splice_to_pipe(pipe, &spd); else ret = 0; out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_syscall_buf_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; char buf[64]; int r; r = snprintf(buf, 64, "%d\n", tr->syscall_buf_sz); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_syscall_buf_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > SYSCALL_FAULT_USER_MAX) val = SYSCALL_FAULT_USER_MAX; tr->syscall_buf_sz = val; *ppos += cnt; return cnt; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; size = 0; buf_size_same = 1; /* check if all cpu sizes are same */ for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { buf_size_same = 0; break; } } if (buf_size_same) { if (!tr->ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", size >> 10); } else r = sprintf(buf, "X\n"); } else r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return ret; } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; if (!tr->ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (tr->ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } #define LAST_BOOT_HEADER ((void *)1) static void *l_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct trace_scratch *tscratch = tr->scratch; unsigned int index = *pos; (*pos)++; if (*pos == 1) return LAST_BOOT_HEADER; /* Only show offsets of the last boot data */ if (!tscratch || !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) return NULL; /* *pos 0 is for the header, 1 is for the first module */ index--; if (index >= tscratch->nr_entries) return NULL; return &tscratch->entries[index]; } static void *l_start(struct seq_file *m, loff_t *pos) { mutex_lock(&scratch_mutex); return l_next(m, NULL, pos); } static void l_stop(struct seq_file *m, void *p) { mutex_unlock(&scratch_mutex); } static void show_last_boot_header(struct seq_file *m, struct trace_array *tr) { struct trace_scratch *tscratch = tr->scratch; /* * Do not leak KASLR address. This only shows the KASLR address of * the last boot. When the ring buffer is started, the LAST_BOOT * flag gets cleared, and this should only report "current". * Otherwise it shows the KASLR address from the previous boot which * should not be the same as the current boot. */ if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) seq_printf(m, "%lx\t[kernel]\n", tscratch->text_addr); else seq_puts(m, "# Current\n"); } static int l_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; struct trace_mod_entry *entry = v; if (v == LAST_BOOT_HEADER) { show_last_boot_header(m, tr); return 0; } seq_printf(m, "%lx\t%s\n", entry->mod_addr, entry->mod_name); return 0; } static const struct seq_operations last_boot_seq_ops = { .start = l_start, .next = l_next, .stop = l_stop, .show = l_show, }; static int tracing_last_boot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = seq_open(file, &last_boot_seq_ops); if (ret) { trace_array_put(tr); return ret; } m = file->private_data; m->private = tr; return 0; } static int tracing_buffer_meta_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu); if (ret < 0) __trace_array_put(tr); return ret; } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; /* disable tracing ? */ if (tr->trace_flags & TRACE_ITER(STOP_ON_FREE)) tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); trace_array_put(tr); return 0; } #define TRACE_MARKER_MAX_SIZE 4096 static ssize_t write_marker_to_buffer(struct trace_array *tr, const char *buf, size_t cnt, unsigned long ip) { struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; struct trace_buffer *buffer; struct print_entry *entry; int meta_size; ssize_t written; size_t size; meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */ again: size = cnt + meta_size; buffer = tr->array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, tracing_gen_ctx()); if (unlikely(!event)) { /* * If the size was greater than what was allowed, then * make it smaller and try again. */ if (size > ring_buffer_max_event_size(buffer)) { cnt = ring_buffer_max_event_size(buffer) - meta_size; /* The above should only happen once */ if (WARN_ON_ONCE(cnt + meta_size == size)) return -EBADF; goto again; } /* Ring buffer disabled, return as if not open for write */ return -EBADF; } entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, buf, cnt); written = cnt; if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); } if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; } else entry->buf[cnt] = '\0'; if (static_branch_unlikely(&trace_marker_exports_enabled)) ftrace_exports(event, TRACE_EXPORT_MARKER); __buffer_unlock_commit(buffer, event); if (tt) event_triggers_post_call(tr->trace_marker_file, tt); return written; } struct trace_user_buf { char *buf; }; static DEFINE_MUTEX(trace_user_buffer_mutex); static struct trace_user_buf_info *trace_user_buffer; /** * trace_user_fault_destroy - free up allocated memory of a trace user buffer * @tinfo: The descriptor to free up * * Frees any data allocated in the trace info dsecriptor. */ void trace_user_fault_destroy(struct trace_user_buf_info *tinfo) { char *buf; int cpu; if (!tinfo || !tinfo->tbuf) return; for_each_possible_cpu(cpu) { buf = per_cpu_ptr(tinfo->tbuf, cpu)->buf; kfree(buf); } free_percpu(tinfo->tbuf); } static int user_fault_buffer_enable(struct trace_user_buf_info *tinfo, size_t size) { char *buf; int cpu; lockdep_assert_held(&trace_user_buffer_mutex); tinfo->tbuf = alloc_percpu(struct trace_user_buf); if (!tinfo->tbuf) return -ENOMEM; tinfo->ref = 1; tinfo->size = size; /* Clear each buffer in case of error */ for_each_possible_cpu(cpu) { per_cpu_ptr(tinfo->tbuf, cpu)->buf = NULL; } for_each_possible_cpu(cpu) { buf = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); if (!buf) return -ENOMEM; per_cpu_ptr(tinfo->tbuf, cpu)->buf = buf; } return 0; } /* For internal use. Free and reinitialize */ static void user_buffer_free(struct trace_user_buf_info **tinfo) { lockdep_assert_held(&trace_user_buffer_mutex); trace_user_fault_destroy(*tinfo); kfree(*tinfo); *tinfo = NULL; } /* For internal use. Initialize and allocate */ static int user_buffer_init(struct trace_user_buf_info **tinfo, size_t size) { bool alloc = false; int ret; lockdep_assert_held(&trace_user_buffer_mutex); if (!*tinfo) { alloc = true; *tinfo = kzalloc(sizeof(**tinfo), GFP_KERNEL); if (!*tinfo) return -ENOMEM; } ret = user_fault_buffer_enable(*tinfo, size); if (ret < 0 && alloc) user_buffer_free(tinfo); return ret; } /* For internal use, derefrence and free if necessary */ static void user_buffer_put(struct trace_user_buf_info **tinfo) { guard(mutex)(&trace_user_buffer_mutex); if (WARN_ON_ONCE(!*tinfo || !(*tinfo)->ref)) return; if (--(*tinfo)->ref) return; user_buffer_free(tinfo); } /** * trace_user_fault_init - Allocated or reference a per CPU buffer * @tinfo: A pointer to the trace buffer descriptor * @size: The size to allocate each per CPU buffer * * Create a per CPU buffer that can be used to copy from user space * in a task context. When calling trace_user_fault_read(), preemption * must be disabled, and it will enable preemption and copy user * space data to the buffer. If any schedule switches occur, it will * retry until it succeeds without a schedule switch knowing the buffer * is still valid. * * Returns 0 on success, negative on failure. */ int trace_user_fault_init(struct trace_user_buf_info *tinfo, size_t size) { int ret; if (!tinfo) return -EINVAL; guard(mutex)(&trace_user_buffer_mutex); ret = user_buffer_init(&tinfo, size); if (ret < 0) trace_user_fault_destroy(tinfo); return ret; } /** * trace_user_fault_get - up the ref count for the user buffer * @tinfo: A pointer to a pointer to the trace buffer descriptor * * Ups the ref count of the trace buffer. * * Returns the new ref count. */ int trace_user_fault_get(struct trace_user_buf_info *tinfo) { if (!tinfo) return -1; guard(mutex)(&trace_user_buffer_mutex); tinfo->ref++; return tinfo->ref; } /** * trace_user_fault_put - dereference a per cpu trace buffer * @tinfo: The @tinfo that was passed to trace_user_fault_get() * * Decrement the ref count of @tinfo. * * Returns the new refcount (negative on error). */ int trace_user_fault_put(struct trace_user_buf_info *tinfo) { guard(mutex)(&trace_user_buffer_mutex); if (WARN_ON_ONCE(!tinfo || !tinfo->ref)) return -1; --tinfo->ref; return tinfo->ref; } /** * trace_user_fault_read - Read user space into a per CPU buffer * @tinfo: The @tinfo allocated by trace_user_fault_get() * @ptr: The user space pointer to read * @size: The size of user space to read. * @copy_func: Optional function to use to copy from user space * @data: Data to pass to copy_func if it was supplied * * Preemption must be disabled when this is called, and must not * be enabled while using the returned buffer. * This does the copying from user space into a per CPU buffer. * * The @size must not be greater than the size passed in to * trace_user_fault_init(). * * If @copy_func is NULL, trace_user_fault_read() will use copy_from_user(), * otherwise it will call @copy_func. It will call @copy_func with: * * buffer: the per CPU buffer of the @tinfo. * ptr: The pointer @ptr to user space to read * size: The @size of the ptr to read * data: The @data parameter * * It is expected that @copy_func will return 0 on success and non zero * if there was a fault. * * Returns a pointer to the buffer with the content read from @ptr. * Preemption must remain disabled while the caller accesses the * buffer returned by this function. * Returns NULL if there was a fault, or the size passed in is * greater than the size passed to trace_user_fault_init(). */ char *trace_user_fault_read(struct trace_user_buf_info *tinfo, const char __user *ptr, size_t size, trace_user_buf_copy copy_func, void *data) { int cpu = smp_processor_id(); char *buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf; unsigned int cnt; int trys = 0; int ret; lockdep_assert_preemption_disabled(); /* * It's up to the caller to not try to copy more than it said * it would. */ if (size > tinfo->size) return NULL; /* * This acts similar to a seqcount. The per CPU context switches are * recorded, migration is disabled and preemption is enabled. The * read of the user space memory is copied into the per CPU buffer. * Preemption is disabled again, and if the per CPU context switches count * is still the same, it means the buffer has not been corrupted. * If the count is different, it is assumed the buffer is corrupted * and reading must be tried again. */ do { /* * If for some reason, copy_from_user() always causes a context * switch, this would then cause an infinite loop. * If this task is preempted by another user space task, it * will cause this task to try again. But just in case something * changes where the copying from user space causes another task * to run, prevent this from going into an infinite loop. * 100 tries should be plenty. */ if (WARN_ONCE(trys++ > 100, "Error: Too many tries to read user space")) return NULL; /* Read the current CPU context switch counter */ cnt = nr_context_switches_cpu(cpu); /* * Preemption is going to be enabled, but this task must * remain on this CPU. */ migrate_disable(); /* * Now preemption is being enabled and another task can come in * and use the same buffer and corrupt our data. */ preempt_enable_notrace(); /* Make sure preemption is enabled here */ lockdep_assert_preemption_enabled(); if (copy_func) { ret = copy_func(buffer, ptr, size, data); } else { ret = __copy_from_user(buffer, ptr, size); } preempt_disable_notrace(); migrate_enable(); /* if it faulted, no need to test if the buffer was corrupted */ if (ret) return NULL; /* * Preemption is disabled again, now check the per CPU context * switch counter. If it doesn't match, then another user space * process may have schedule in and corrupted our buffer. In that * case the copying must be retried. */ } while (nr_context_switches_cpu(cpu) != cnt); return buffer; } static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; ssize_t written = -ENODEV; unsigned long ip; char *buf; if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER(MARKERS))) return -EINVAL; if ((ssize_t)cnt < 0) return -EINVAL; if (cnt > TRACE_MARKER_MAX_SIZE) cnt = TRACE_MARKER_MAX_SIZE; /* Must have preemption disabled while having access to the buffer */ guard(preempt_notrace)(); buf = trace_user_fault_read(trace_user_buffer, ubuf, cnt, NULL, NULL); if (!buf) return -EFAULT; /* The selftests expect this function to be the IP address */ ip = _THIS_IP_; /* The global trace_marker can go to multiple instances */ if (tr == &global_trace) { guard(rcu)(); list_for_each_entry_rcu(tr, &marker_copies, marker_list) { written = write_marker_to_buffer(tr, buf, cnt, ip); if (written < 0) break; } } else { written = write_marker_to_buffer(tr, buf, cnt, ip); } return written; } static ssize_t write_raw_marker_to_buffer(struct trace_array *tr, const char *buf, size_t cnt) { struct ring_buffer_event *event; struct trace_buffer *buffer; struct raw_data_entry *entry; ssize_t written; size_t size; /* cnt includes both the entry->id and the data behind it. */ size = struct_offset(entry, id) + cnt; buffer = tr->array_buffer.buffer; if (size > ring_buffer_max_event_size(buffer)) return -EINVAL; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, tracing_gen_ctx()); if (!event) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); unsafe_memcpy(&entry->id, buf, cnt, "id and content already reserved on ring buffer" "'buf' includes the 'id' and the data." "'entry' was allocated with cnt from 'id'."); written = cnt; __buffer_unlock_commit(buffer, event); return written; } static ssize_t tracing_mark_raw_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; ssize_t written = -ENODEV; char *buf; if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER(MARKERS))) return -EINVAL; /* The marker must at least have a tag id */ if (cnt < sizeof(unsigned int)) return -EINVAL; /* raw write is all or nothing */ if (cnt > TRACE_MARKER_MAX_SIZE) return -EINVAL; /* Must have preemption disabled while having access to the buffer */ guard(preempt_notrace)(); buf = trace_user_fault_read(trace_user_buffer, ubuf, cnt, NULL, NULL); if (!buf) return -EFAULT; /* The global trace_marker_raw can go to multiple instances */ if (tr == &global_trace) { guard(rcu)(); list_for_each_entry_rcu(tr, &marker_copies, marker_list) { written = write_raw_marker_to_buffer(tr, buf, cnt); if (written < 0) break; } } else { written = write_raw_marker_to_buffer(tr, buf, cnt); } return written; } static int tracing_mark_open(struct inode *inode, struct file *filp) { int ret; scoped_guard(mutex, &trace_user_buffer_mutex) { if (!trace_user_buffer) { ret = user_buffer_init(&trace_user_buffer, TRACE_MARKER_MAX_SIZE); if (ret < 0) return ret; } else { trace_user_buffer->ref++; } } stream_open(inode, filp); ret = tracing_open_generic_tr(inode, filp); if (ret < 0) user_buffer_put(&trace_user_buffer); return ret; } static int tracing_mark_release(struct inode *inode, struct file *file) { user_buffer_put(&trace_user_buffer); return tracing_release_generic_tr(inode, file); } static int tracing_clock_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == tr->clock_id ? "[" : "", trace_clocks[i].name, i == tr->clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } int tracing_set_clock(struct trace_array *tr, const char *clockstr) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; guard(mutex)(&trace_types_lock); tr->clock_id = i; ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif if (tr->scratch && !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) { struct trace_scratch *tscratch = tr->scratch; tscratch->clock_id = i; } return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; const char *clockstr; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); ret = tracing_set_clock(tr, clockstr); if (ret) return ret; *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = single_open(file, tracing_clock_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; guard(mutex)(&trace_types_lock); if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); return 0; } static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) { if (rbe == this_cpu_read(trace_buffered_event)) return ring_buffer_time_stamp(buffer); return ring_buffer_event_time_stamp(buffer, rbe); } /* * Set or disable using the per CPU trace_buffer_event when possible. */ int tracing_set_filter_buffering(struct trace_array *tr, bool set) { guard(mutex)(&trace_types_lock); if (set && tr->no_filter_buffering_ref++) return 0; if (!set) { if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) return -EINVAL; --tr->no_filter_buffering_ref; } return 0; } struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int spare_size; unsigned int read; }; #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) goto out; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { kfree(m); goto out; } ret = 0; iter->tr = tr; iter->array_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } out: if (ret < 0) trace_array_put(tr); return ret; } static void tracing_swap_cpu_buffer(void *tr) { update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); } static ssize_t tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long val; int ret; ret = tracing_update_buffers(tr); if (ret < 0) return ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; guard(mutex)(&trace_types_lock); if (tr->current_trace->use_max_tr) return -EBUSY; local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); local_irq_enable(); if (ret) return ret; switch (val) { case 0: if (iter->cpu_file != RING_BUFFER_ALL_CPUS) return -EINVAL; if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP if (iter->cpu_file != RING_BUFFER_ALL_CPUS) return -EINVAL; #endif if (tr->allocated_snapshot) ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->array_buffer, iter->cpu_file); ret = tracing_arm_snapshot_locked(tr); if (ret) return ret; /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { local_irq_disable(); update_max_tr(tr, current, smp_processor_id(), NULL); local_irq_enable(); } else { smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, (void *)tr, 1); } tracing_disarm_snapshot(tr); break; default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); } break; } if (ret >= 0) { *ppos += cnt; ret = cnt; } return ret; } static int tracing_snapshot_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; int ret; ret = tracing_release(inode, file); if (file->f_mode & FMODE_READ) return ret; /* If write only, the seq_file is just a stub */ if (m) kfree(m->private); kfree(m); return 0; } static int tracing_buffers_open(struct inode *inode, struct file *filp); static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos); static int tracing_buffers_release(struct inode *inode, struct file *file); static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static int snapshot_raw_open(struct inode *inode, struct file *filp) { struct ftrace_buffer_info *info; int ret; /* The following checks for tracefs lockdown */ ret = tracing_buffers_open(inode, filp); if (ret < 0) return ret; info = filp->private_data; if (info->iter.trace->use_max_tr) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; info->iter.array_buffer = &info->iter.tr->max_buffer; return ret; } #endif /* CONFIG_TRACER_SNAPSHOT */ static const struct file_operations tracing_thresh_fops = { .open = tracing_open_generic, .read = tracing_thresh_read, .write = tracing_thresh_write, .llseek = generic_file_llseek, }; #ifdef CONFIG_TRACER_MAX_TRACE static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic_tr, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #endif static const struct file_operations set_tracer_fops = { .open = tracing_open_generic_tr, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_syscall_buf_fops = { .open = tracing_open_generic_tr, .read = tracing_syscall_buf_read, .write = tracing_syscall_buf_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_buffer_meta_fops = { .open = tracing_buffer_meta_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_seq_release, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_total_entries_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_free_buffer_fops = { .open = tracing_open_generic_tr, .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_mark_open, .write = tracing_mark_write, .release = tracing_mark_release, }; static const struct file_operations tracing_mark_raw_fops = { .open = tracing_mark_open, .write = tracing_mark_raw_write, .release = tracing_mark_release, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_clock_write, }; static const struct file_operations trace_time_stamp_mode_fops = { .open = tracing_time_stamp_mode_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, }; static const struct file_operations last_boot_fops = { .open = tracing_last_boot_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_seq_release, }; #ifdef CONFIG_TRACER_SNAPSHOT static const struct file_operations snapshot_fops = { .open = tracing_snapshot_open, .read = seq_read, .write = tracing_snapshot_write, .llseek = tracing_lseek, .release = tracing_snapshot_release, }; static const struct file_operations snapshot_raw_fops = { .open = snapshot_raw_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, }; #endif /* CONFIG_TRACER_SNAPSHOT */ /* * trace_min_max_write - Write a u64 value to a trace_min_max_param struct * @filp: The active open file structure * @ubuf: The userspace provided buffer to read value into * @cnt: The maximum number of bytes to read * @ppos: The current "file" position * * This function implements the write interface for a struct trace_min_max_param. * The filp->private_data must point to a trace_min_max_param structure that * defines where to write the value, the min and the max acceptable values, * and a lock to protect the write. */ static ssize_t trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_min_max_param *param = filp->private_data; u64 val; int err; if (!param) return -EFAULT; err = kstrtoull_from_user(ubuf, cnt, 10, &val); if (err) return err; if (param->lock) mutex_lock(param->lock); if (param->min && val < *param->min) err = -EINVAL; if (param->max && val > *param->max) err = -EINVAL; if (!err) *param->val = val; if (param->lock) mutex_unlock(param->lock); if (err) return err; return cnt; } /* * trace_min_max_read - Read a u64 value from a trace_min_max_param struct * @filp: The active open file structure * @ubuf: The userspace provided buffer to read value into * @cnt: The maximum number of bytes to read * @ppos: The current "file" position * * This function implements the read interface for a struct trace_min_max_param. * The filp->private_data must point to a trace_min_max_param struct with valid * data. */ static ssize_t trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_min_max_param *param = filp->private_data; char buf[U64_STR_SIZE]; int len; u64 val; if (!param) return -EFAULT; val = *param->val; if (cnt > sizeof(buf)) cnt = sizeof(buf); len = snprintf(buf, sizeof(buf), "%llu\n", val); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } const struct file_operations trace_min_max_fops = { .open = tracing_open_generic, .read = trace_min_max_read, .write = trace_min_max_write, }; #define TRACING_LOG_ERRS_MAX 8 #define TRACING_LOG_LOC_MAX 128 #define CMD_PREFIX " Command: " struct err_info { const char **errs; /* ptr to loc-specific array of err strings */ u8 type; /* index into errs -> specific err string */ u16 pos; /* caret position */ u64 ts; }; struct tracing_log_err { struct list_head list; struct err_info info; char loc[TRACING_LOG_LOC_MAX]; /* err location */ char *cmd; /* what caused err */ }; static DEFINE_MUTEX(tracing_err_log_lock); static struct tracing_log_err *alloc_tracing_log_err(int len) { struct tracing_log_err *err; err = kzalloc(sizeof(*err), GFP_KERNEL); if (!err) return ERR_PTR(-ENOMEM); err->cmd = kzalloc(len, GFP_KERNEL); if (!err->cmd) { kfree(err); return ERR_PTR(-ENOMEM); } return err; } static void free_tracing_log_err(struct tracing_log_err *err) { kfree(err->cmd); kfree(err); } static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, int len) { struct tracing_log_err *err; char *cmd; if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { err = alloc_tracing_log_err(len); if (PTR_ERR(err) != -ENOMEM) tr->n_err_log_entries++; return err; } cmd = kzalloc(len, GFP_KERNEL); if (!cmd) return ERR_PTR(-ENOMEM); err = list_first_entry(&tr->err_log, struct tracing_log_err, list); kfree(err->cmd); err->cmd = cmd; list_del(&err->list); return err; } /** * err_pos - find the position of a string within a command for error careting * @cmd: The tracing command that caused the error * @str: The string to position the caret at within @cmd * * Finds the position of the first occurrence of @str within @cmd. The * return value can be passed to tracing_log_err() for caret placement * within @cmd. * * Returns the index within @cmd of the first occurrence of @str or 0 * if @str was not found. */ unsigned int err_pos(char *cmd, const char *str) { char *found; if (WARN_ON(!strlen(cmd))) return 0; found = strstr(cmd, str); if (found) return found - cmd; return 0; } /** * tracing_log_err - write an error to the tracing error log * @tr: The associated trace array for the error (NULL for top level array) * @loc: A string describing where the error occurred * @cmd: The tracing command that caused the error * @errs: The array of loc-specific static error strings * @type: The index into errs[], which produces the specific static err string * @pos: The position the caret should be placed in the cmd * * Writes an error into tracing/error_log of the form: * * <loc>: error: <text> * Command: <cmd> * ^ * * tracing/error_log is a small log file containing the last * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated * unless there has been a tracing error, and the error log can be * cleared and have its memory freed by writing the empty string in * truncation mode to it i.e. echo > tracing/error_log. * * NOTE: the @errs array along with the @type param are used to * produce a static error string - this string is not copied and saved * when the error is logged - only a pointer to it is saved. See * existing callers for examples of how static strings are typically * defined for use with tracing_log_err(). */ void tracing_log_err(struct trace_array *tr, const char *loc, const char *cmd, const char **errs, u8 type, u16 pos) { struct tracing_log_err *err; int len = 0; if (!tr) tr = &global_trace; len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; guard(mutex)(&tracing_err_log_lock); err = get_tracing_log_err(tr, len); if (PTR_ERR(err) == -ENOMEM) return; snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); err->info.errs = errs; err->info.type = type; err->info.pos = pos; err->info.ts = local_clock(); list_add_tail(&err->list, &tr->err_log); } static void clear_tracing_err_log(struct trace_array *tr) { struct tracing_log_err *err, *next; guard(mutex)(&tracing_err_log_lock); list_for_each_entry_safe(err, next, &tr->err_log, list) { list_del(&err->list); free_tracing_log_err(err); } tr->n_err_log_entries = 0; } static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; mutex_lock(&tracing_err_log_lock); return seq_list_start(&tr->err_log, *pos); } static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; return seq_list_next(v, &tr->err_log, pos); } static void tracing_err_log_seq_stop(struct seq_file *m, void *v) { mutex_unlock(&tracing_err_log_lock); } static void tracing_err_log_show_pos(struct seq_file *m, u16 pos) { u16 i; for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) seq_putc(m, ' '); for (i = 0; i < pos; i++) seq_putc(m, ' '); seq_puts(m, "^\n"); } static int tracing_err_log_seq_show(struct seq_file *m, void *v) { struct tracing_log_err *err = v; if (err) { const char *err_text = err->info.errs[err->info.type]; u64 sec = err->info.ts; u32 nsec; nsec = do_div(sec, NSEC_PER_SEC); seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, err->loc, err_text); seq_printf(m, "%s", err->cmd); tracing_err_log_show_pos(m, err->info.pos); } return 0; } static const struct seq_operations tracing_err_log_seq_ops = { .start = tracing_err_log_seq_start, .next = tracing_err_log_seq_next, .stop = tracing_err_log_seq_stop, .show = tracing_err_log_seq_show }; static int tracing_err_log_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret = 0; ret = tracing_check_open_get_tr(tr); if (ret) return ret; /* If this file was opened for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) clear_tracing_err_log(tr); if (file->f_mode & FMODE_READ) { ret = seq_open(file, &tracing_err_log_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = tr; } else { trace_array_put(tr); } } return ret; } static ssize_t tracing_err_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { return count; } static int tracing_err_log_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); if (file->f_mode & FMODE_READ) seq_release(inode, file); return 0; } static const struct file_operations tracing_err_log_fops = { .open = tracing_err_log_open, .write = tracing_err_log_write, .read = seq_read, .llseek = tracing_lseek, .release = tracing_err_log_release, }; static int tracing_buffers_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; info = kvzalloc(sizeof(*info), GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; } mutex_lock(&trace_types_lock); info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.array_buffer = &tr->array_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; tr->trace_ref++; mutex_unlock(&trace_types_lock); ret = nonseekable_open(inode, filp); if (ret < 0) trace_array_put(tr); return ret; } static __poll_t tracing_buffers_poll(struct file *filp, poll_table *poll_table) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; return trace_poll(iter, filp, poll_table); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; void *trace_data; int page_size; ssize_t ret = 0; ssize_t size; if (!count) return 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); /* Make sure the spare matches the current sub buffer size */ if (info->spare) { if (page_size != info->spare_size) { ring_buffer_free_read_page(iter->array_buffer->buffer, info->spare_cpu, info->spare); info->spare = NULL; } } if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); info->spare = NULL; } else { info->spare_cpu = iter->cpu_file; info->spare_size = page_size; } } if (!info->spare) return ret; /* Do we have previous read data to read? */ if (info->read < page_size) goto read; again: trace_access_lock(iter->cpu_file); ret = ring_buffer_read_page(iter->array_buffer->buffer, info->spare, count, iter->cpu_file, 0); trace_access_unlock(iter->cpu_file); if (ret < 0) { if (trace_empty(iter) && !iter->closed) { if (update_last_data_if_empty(iter->tr)) return 0; if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_on_pipe(iter, 0); if (ret) return ret; goto again; } return 0; } info->read = 0; read: size = page_size - info->read; if (size > count) size = count; trace_data = ring_buffer_read_page_data(info->spare); ret = copy_to_user(ubuf, trace_data + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_flush(struct file *file, fl_owner_t id) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; iter->closed = true; /* Make sure the waiters see the new wait_index */ (void)atomic_fetch_inc_release(&iter->wait_index); ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); return 0; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; guard(mutex)(&trace_types_lock); iter->tr->trace_ref--; __trace_array_put(iter->tr); if (info->spare) ring_buffer_free_read_page(iter->array_buffer->buffer, info->spare_cpu, info->spare); kvfree(info); return 0; } struct buffer_ref { struct trace_buffer *buffer; void *page; int cpu; refcount_t refcount; }; static void buffer_ref_release(struct buffer_ref *ref) { if (!refcount_dec_and_test(&ref->refcount)) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); } static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; buffer_ref_release(ref); buf->private = 0; } static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (refcount_read(&ref->refcount) > INT_MAX/2) return false; refcount_inc(&ref->refcount); return true; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .release = buffer_pipe_buf_release, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; buffer_ref_release(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; bool woken = false; int page_size; int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); if (*ppos & (page_size - 1)) return -EINVAL; if (len & (page_size - 1)) { if (len < page_size) return -EINVAL; len &= (~(page_size - 1)); } if (splice_grow_spd(pipe, &spd)) return -ENOMEM; again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; } refcount_set(&ref->refcount, 1); ref->buffer = iter->array_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); ref->page = NULL; kfree(ref); break; } ref->cpu = iter->cpu_file; r = ring_buffer_read_page(ref->buffer, ref->page, len, iter->cpu_file, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); break; } page = virt_to_page(ring_buffer_read_page_data(ref->page)); spd.pages[i] = page; spd.partial[i].len = page_size; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += page_size; entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (ret) goto out; if (woken) goto out; ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) goto out; ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); if (ret) goto out; /* No need to wait after waking up when tracing is off */ if (!tracer_tracing_is_on(iter->tr)) goto out; /* Iterate one more time to collect any new data then exit */ woken = true; goto again; } ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; } static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; int err; if (cmd == TRACE_MMAP_IOCTL_GET_READER) { if (!(file->f_flags & O_NONBLOCK)) { err = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, iter->tr->buffer_percent, NULL, NULL); if (err) return err; } return ring_buffer_map_get_reader(iter->array_buffer->buffer, iter->cpu_file); } else if (cmd) { return -ENOTTY; } /* * An ioctl call with cmd 0 to the ring buffer file will wake up all * waiters */ guard(mutex)(&trace_types_lock); /* Make sure the waiters see the new wait_index */ (void)atomic_fetch_inc_release(&iter->wait_index); ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); return 0; } #ifdef CONFIG_TRACER_MAX_TRACE static int get_snapshot_map(struct trace_array *tr) { int err = 0; /* * Called with mmap_lock held. lockdep would be unhappy if we would now * take trace_types_lock. Instead use the specific * snapshot_trigger_lock. */ spin_lock(&tr->snapshot_trigger_lock); if (tr->snapshot || tr->mapped == UINT_MAX) err = -EBUSY; else tr->mapped++; spin_unlock(&tr->snapshot_trigger_lock); /* Wait for update_max_tr() to observe iter->tr->mapped */ if (tr->mapped == 1) synchronize_rcu(); return err; } static void put_snapshot_map(struct trace_array *tr) { spin_lock(&tr->snapshot_trigger_lock); if (!WARN_ON(!tr->mapped)) tr->mapped--; spin_unlock(&tr->snapshot_trigger_lock); } #else static inline int get_snapshot_map(struct trace_array *tr) { return 0; } static inline void put_snapshot_map(struct trace_array *tr) { } #endif static void tracing_buffers_mmap_close(struct vm_area_struct *vma) { struct ftrace_buffer_info *info = vma->vm_file->private_data; struct trace_iterator *iter = &info->iter; WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file)); put_snapshot_map(iter->tr); } static int tracing_buffers_may_split(struct vm_area_struct *vma, unsigned long addr) { /* * Trace buffer mappings require the complete buffer including * the meta page. Partial mappings are not supported. */ return -EINVAL; } static const struct vm_operations_struct tracing_buffers_vmops = { .close = tracing_buffers_mmap_close, .may_split = tracing_buffers_may_split, }; static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; int ret = 0; /* A memmap'ed and backup buffers are not supported for user space mmap */ if (iter->tr->flags & (TRACE_ARRAY_FL_MEMMAP | TRACE_ARRAY_FL_VMALLOC)) return -ENODEV; ret = get_snapshot_map(iter->tr); if (ret) return ret; ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma); if (ret) put_snapshot_map(iter->tr); vma->vm_ops = &tracing_buffers_vmops; return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, .flush = tracing_buffers_flush, .splice_read = tracing_buffers_splice_read, .unlocked_ioctl = tracing_buffers_ioctl, .mmap = tracing_buffers_mmap, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; struct array_buffer *trace_buf = &tr->array_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); if (trace_clocks[tr->clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { /* counter or tsc mode for trace_clock */ trace_seq_printf(s, "oldest event ts: %llu\n", ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", ring_buffer_time_stamp(trace_buf->buffer)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "read events: %ld\n", cnt); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, trace_seq_used(s)); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { ssize_t ret; char *buf; int r; /* 512 should be plenty to hold the amount needed */ #define DYN_INFO_BUF_SIZE 512 buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; r = scnprintf(buf, DYN_INFO_BUF_SIZE, "%ld pages:%ld groups: %ld\n" "ftrace boot update time = %llu (ns)\n" "ftrace module total update time = %llu (ns)\n", ftrace_update_tot_cnt, ftrace_number_of_pages, ftrace_number_of_groups, ftrace_update_time, ftrace_total_mod_time); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); kfree(buf); return ret; } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif /* CONFIG_DYNAMIC_FTRACE */ #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { tracing_snapshot_instance(tr); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) { if (*count <= 0) return; (*count)--; } tracing_snapshot_instance(tr); } static int ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; } static int ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *init_data, void **data) { struct ftrace_func_mapper *mapper = *data; if (!mapper) { mapper = allocate_ftrace_func_mapper(); if (!mapper) return -ENOMEM; *data = mapper; } return ftrace_func_mapper_add_ip(mapper, ip, init_data); } static void ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = data; if (!ip) { if (!mapper) return; free_ftrace_func_mapper(mapper, NULL); return; } ftrace_func_mapper_remove_ip(mapper, ip); } static struct ftrace_probe_ops snapshot_probe_ops = { .func = ftrace_snapshot, .print = ftrace_snapshot_print, }; static struct ftrace_probe_ops snapshot_count_probe_ops = { .func = ftrace_count_snapshot, .print = ftrace_snapshot_print, .init = ftrace_snapshot_init, .free = ftrace_snapshot_free, }; static int ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) { struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enable) return -EINVAL; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; if (glob[0] == '!') { ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); if (!ret) tracing_disarm_snapshot(tr); return ret; } if (!param) goto out_reg; number = strsep(&param, ":"); if (!strlen(number)) goto out_reg; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; out_reg: ret = tracing_arm_snapshot(tr); if (ret < 0) return ret; ret = register_ftrace_function_probe(glob, tr, ops, count); if (ret < 0) tracing_disarm_snapshot(tr); return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_snapshot_cmd = { .name = "snapshot", .func = ftrace_trace_snapshot_callback, }; static __init int register_snapshot_cmd(void) { return register_ftrace_command(&ftrace_snapshot_cmd); } #else static inline __init int register_snapshot_cmd(void) { return 0; } #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ static struct dentry *tracing_get_dentry(struct trace_array *tr) { /* Top directory uses NULL as the parent */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return NULL; if (WARN_ON(!tr->dir)) return ERR_PTR(-ENODEV); /* All sub buffers have a descriptor */ return tr->dir; } static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) { struct dentry *d_tracer; if (tr->percpu_dir) return tr->percpu_dir; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); MEM_FAIL(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; } static struct dentry * trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, void *data, long cpu, const struct file_operations *fops) { struct dentry *ret = trace_create_file(name, mode, parent, data, fops); if (ret) /* See tracing_get_cpu() */ d_inode(ret)->i_cdev = (void *)(cpu + 1); return ret; } static void tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ if (!d_percpu) return; snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = tracefs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_entries_fops); if (tr->range_addr_start) trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_buffer_meta_fops); #ifdef CONFIG_TRACER_SNAPSHOT if (!tr->range_addr_start) { trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, tr, cpu, &snapshot_raw_fops); } #endif } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { guard(mutex)(&trace_types_lock); ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); if (ret) return ret; } *ppos += cnt; return cnt; } static int tracing_open_options(struct inode *inode, struct file *filp) { struct trace_option_dentry *topt = inode->i_private; int ret; ret = tracing_check_open_get_tr(topt->tr); if (ret) return ret; filp->private_data = inode->i_private; return 0; } static int tracing_release_options(struct inode *inode, struct file *file) { struct trace_option_dentry *topt = file->private_data; trace_array_put(topt->tr); return 0; } static const struct file_operations trace_options_fops = { .open = tracing_open_options, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, .release = tracing_release_options, }; /* * In order to pass in both the trace_array descriptor as well as the index * to the flag that the trace option file represents, the trace_array * has a character array of trace_flags_index[], which holds the index * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. * The address of this character array is passed to the flag option file * read/write callbacks. * * In order to extract both the index and the trace_array descriptor, * get_tr_index() uses the following algorithm. * * idx = *ptr; * * As the pointer itself contains the address of the index (remember * index[1] == 1). * * Then to get the trace_array descriptor, by subtracting that index * from the ptr, we get to the start of the index itself. * * ptr - idx == &index[0] * * Then a simple container_of() from that pointer gets us to the * trace_array descriptor. */ static void get_tr_index(void *data, struct trace_array **ptr, unsigned int *pindex) { *pindex = *(unsigned char *)data; *ptr = container_of(data - *pindex, struct trace_array, trace_flags_index); } static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; char *buf; get_tr_index(tr_index, &tr, &index); if (tr->trace_flags & (1ULL << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; unsigned long val; int ret; get_tr_index(tr_index, &tr, &index); ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1ULL << index, val); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = tracefs_create_file(name, mode, parent, data, fops); if (!ret) pr_warn("Could not create tracefs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(struct trace_array *tr) { struct dentry *d_tracer; if (tr->options) return tr->options; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->options = tracefs_create_dir("options", d_tracer); if (!tr->options) { pr_warn("Could not create tracefs directory 'options'\n"); return NULL; } return tr->options; } static void create_trace_option_file(struct trace_array *tr, struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->tr = tr; topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, t_options, topt, &trace_options_fops); } static int create_trace_option_files(struct trace_array *tr, struct tracer *tracer, struct tracer_flags *flags) { struct trace_option_dentry *topts; struct trace_options *tr_topts; struct tracer_opt *opts; int cnt; if (!flags || !flags->opts) return 0; opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return 0; tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), GFP_KERNEL); if (!tr_topts) { kfree(topts); return -ENOMEM; } tr->topts = tr_topts; tr->topts[tr->nr_topts].tracer = tracer; tr->topts[tr->nr_topts].topts = topts; tr->nr_topts++; for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); MEM_FAIL(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } return 0; } static int get_global_flags_val(struct tracer *tracer) { struct tracers *t; list_for_each_entry(t, &global_trace.tracers, list) { if (t->tracer != tracer) continue; if (!t->flags) return -1; return t->flags->val; } return -1; } static int add_tracer_options(struct trace_array *tr, struct tracers *t) { struct tracer *tracer = t->tracer; struct tracer_flags *flags = t->flags ?: tracer->flags; if (!flags) return 0; /* Only add tracer options after update_tracer_options finish */ if (!tracer_options_updated) return 0; return create_trace_option_files(tr, tracer, flags); } static int add_tracer(struct trace_array *tr, struct tracer *tracer) { struct tracer_flags *flags; struct tracers *t; int ret; /* Only enable if the directory has been created already. */ if (!tr->dir && !(tr->flags & TRACE_ARRAY_FL_GLOBAL)) return 0; /* * If this is an instance, only create flags for tracers * the instance may have. */ if (!trace_ok_for_array(tracer, tr)) return 0; t = kmalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; t->tracer = tracer; t->flags = NULL; list_add(&t->list, &tr->tracers); flags = tracer->flags; if (!flags) { if (!tracer->default_flags) return 0; /* * If the tracer defines default flags, it means the flags are * per trace instance. */ flags = kmalloc(sizeof(*flags), GFP_KERNEL); if (!flags) return -ENOMEM; *flags = *tracer->default_flags; flags->trace = tracer; t->flags = flags; /* If this is an instance, inherit the global_trace flags */ if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { int val = get_global_flags_val(tracer); if (!WARN_ON_ONCE(val < 0)) flags->val = val; } } ret = add_tracer_options(tr, t); if (ret < 0) { list_del(&t->list); kfree(t->flags); kfree(t); } return ret; } static struct dentry * create_trace_option_core_file(struct trace_array *tr, const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return NULL; return trace_create_file(option, TRACE_MODE_WRITE, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } static void create_trace_options_dir(struct trace_array *tr) { struct dentry *t_options; bool top_level = tr == &global_trace; int i; t_options = trace_options_init_dentry(tr); if (!t_options) return; for (i = 0; trace_options[i]; i++) { if (top_level || !((1ULL << i) & TOP_LEVEL_TRACE_FLAGS)) { create_trace_option_core_file(tr, trace_options[i], i); } } } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tracer_tracing_is_on(tr); r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct trace_buffer *buffer = tr->array_buffer.buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { guard(mutex)(&trace_types_lock); if (!!val == tracer_tracing_is_on(tr)) { val = 0; /* do nothing */ } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); } else { tracer_tracing_off(tr); if (tr->current_trace->stop) tr->current_trace->stop(tr); /* Wake up any waiters */ ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); } } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic_tr, .read = rb_simple_read, .write = rb_simple_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_percent_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tr->buffer_percent; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_percent_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > 100) return -EINVAL; tr->buffer_percent = val; (*ppos)++; return cnt; } static const struct file_operations buffer_percent_fops = { .open = tracing_open_generic_tr, .read = buffer_percent_read, .write = buffer_percent_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; size_t size; char buf[64]; int order; int r; order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); size = (PAGE_SIZE << order) / 1024; r = sprintf(buf, "%zd\n", size); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_subbuf_size_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int old_order; int order; int pages; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; val *= 1024; /* value passed in is in KB */ pages = DIV_ROUND_UP(val, PAGE_SIZE); order = fls(pages - 1); /* limit between 1 and 128 system pages */ if (order < 0 || order > 7) return -EINVAL; /* Do not allow tracing while changing the order of the ring buffer */ tracing_stop_tr(tr); old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); if (old_order == order) goto out; ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order); if (ret) goto out; #ifdef CONFIG_TRACER_MAX_TRACE if (!tr->allocated_snapshot) goto out_max; ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); if (ret) { /* Put back the old order */ cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order); if (WARN_ON_ONCE(cnt)) { /* * AARGH! We are left with different orders! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the order of the main buffer, but failed to * update the order of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ tracing_disabled = 1; } goto out; } out_max: #endif (*ppos)++; out: if (ret) cnt = ret; tracing_start_tr(tr); return cnt; } static const struct file_operations buffer_subbuf_size_fops = { .open = tracing_open_generic_tr, .read = buffer_subbuf_size_read, .write = buffer_subbuf_size_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static struct dentry *trace_instance_dir; static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); #ifdef CONFIG_MODULES static int make_mod_delta(struct module *mod, void *data) { struct trace_module_delta *module_delta; struct trace_scratch *tscratch; struct trace_mod_entry *entry; struct trace_array *tr = data; int i; tscratch = tr->scratch; module_delta = READ_ONCE(tr->module_delta); for (i = 0; i < tscratch->nr_entries; i++) { entry = &tscratch->entries[i]; if (strcmp(mod->name, entry->mod_name)) continue; if (mod->state == MODULE_STATE_GOING) module_delta->delta[i] = 0; else module_delta->delta[i] = (unsigned long)mod->mem[MOD_TEXT].base - entry->mod_addr; break; } return 0; } #else static int make_mod_delta(struct module *mod, void *data) { return 0; } #endif static int mod_addr_comp(const void *a, const void *b, const void *data) { const struct trace_mod_entry *e1 = a; const struct trace_mod_entry *e2 = b; return e1->mod_addr > e2->mod_addr ? 1 : -1; } static void setup_trace_scratch(struct trace_array *tr, struct trace_scratch *tscratch, unsigned int size) { struct trace_module_delta *module_delta; struct trace_mod_entry *entry; int i, nr_entries; if (!tscratch) return; tr->scratch = tscratch; tr->scratch_size = size; if (tscratch->text_addr) tr->text_delta = (unsigned long)_text - tscratch->text_addr; if (struct_size(tscratch, entries, tscratch->nr_entries) > size) goto reset; /* Check if each module name is a valid string */ for (i = 0; i < tscratch->nr_entries; i++) { int n; entry = &tscratch->entries[i]; for (n = 0; n < MODULE_NAME_LEN; n++) { if (entry->mod_name[n] == '\0') break; if (!isprint(entry->mod_name[n])) goto reset; } if (n == MODULE_NAME_LEN) goto reset; } /* Sort the entries so that we can find appropriate module from address. */ nr_entries = tscratch->nr_entries; sort_r(tscratch->entries, nr_entries, sizeof(struct trace_mod_entry), mod_addr_comp, NULL, NULL); if (IS_ENABLED(CONFIG_MODULES)) { module_delta = kzalloc(struct_size(module_delta, delta, nr_entries), GFP_KERNEL); if (!module_delta) { pr_info("module_delta allocation failed. Not able to decode module address."); goto reset; } init_rcu_head(&module_delta->rcu); } else module_delta = NULL; WRITE_ONCE(tr->module_delta, module_delta); /* Scan modules to make text delta for modules. */ module_for_each_mod(make_mod_delta, tr); /* Set trace_clock as the same of the previous boot. */ if (tscratch->clock_id != tr->clock_id) { if (tscratch->clock_id >= ARRAY_SIZE(trace_clocks) || tracing_set_clock(tr, trace_clocks[tscratch->clock_id].name) < 0) { pr_info("the previous trace_clock info is not valid."); goto reset; } } return; reset: /* Invalid trace modules */ memset(tscratch, 0, size); } static int allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) { enum ring_buffer_flags rb_flags; struct trace_scratch *tscratch; unsigned int scratch_size = 0; rb_flags = tr->trace_flags & TRACE_ITER(OVERWRITE) ? RB_FL_OVERWRITE : 0; buf->tr = tr; if (tr->range_addr_start && tr->range_addr_size) { /* Add scratch buffer to handle 128 modules */ buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0, tr->range_addr_start, tr->range_addr_size, struct_size(tscratch, entries, 128)); tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size); setup_trace_scratch(tr, tscratch, scratch_size); /* * This is basically the same as a mapped buffer, * with the same restrictions. */ tr->mapped++; } else { buf->buffer = ring_buffer_alloc(size, rb_flags); } if (!buf->buffer) return -ENOMEM; buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); buf->buffer = NULL; return -ENOMEM; } /* Allocate the first page for all buffers */ set_buffer_entries(&tr->array_buffer, ring_buffer_size(tr->array_buffer.buffer, 0)); return 0; } static void free_trace_buffer(struct array_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); buf->buffer = NULL; free_percpu(buf->data); buf->data = NULL; } } static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; ret = allocate_trace_buffer(tr, &tr->array_buffer, size); if (ret) return ret; #ifdef CONFIG_TRACER_MAX_TRACE /* Fix mapped buffer trace arrays do not have snapshot buffers */ if (tr->range_addr_start) return 0; ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { free_trace_buffer(&tr->array_buffer); return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; allocate_snapshot = false; #endif return 0; } static void free_trace_buffers(struct trace_array *tr) { if (!tr) return; free_trace_buffer(&tr->array_buffer); kfree(tr->module_delta); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); #endif } static void init_trace_flags_index(struct trace_array *tr) { int i; /* Used by the trace options files */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) tr->trace_flags_index[i] = i; } static int __update_tracer(struct trace_array *tr) { struct tracer *t; int ret = 0; for (t = trace_types; t && !ret; t = t->next) ret = add_tracer(tr, t); return ret; } static __init int __update_tracer_options(struct trace_array *tr) { struct tracers *t; int ret = 0; list_for_each_entry(t, &tr->tracers, list) { ret = add_tracer_options(tr, t); if (ret < 0) break; } return ret; } static __init void update_tracer_options(void) { struct trace_array *tr; guard(mutex)(&trace_types_lock); tracer_options_updated = true; list_for_each_entry(tr, &ftrace_trace_arrays, list) __update_tracer_options(tr); } /* Must have trace_types_lock held */ struct trace_array *trace_array_find(const char *instance) { struct trace_array *tr, *found = NULL; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, instance) == 0) { found = tr; break; } } return found; } struct trace_array *trace_array_find_get(const char *instance) { struct trace_array *tr; guard(mutex)(&trace_types_lock); tr = trace_array_find(instance); if (tr) tr->ref++; return tr; } static int trace_array_create_dir(struct trace_array *tr) { int ret; tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); if (!tr->dir) return -EINVAL; ret = event_trace_add_tracer(tr->dir, tr); if (ret) { tracefs_remove(tr->dir); return ret; } init_tracer_tracefs(tr, tr->dir); ret = __update_tracer(tr); if (ret) { event_trace_del_tracer(tr); tracefs_remove(tr->dir); return ret; } return 0; } static struct trace_array * trace_array_create_systems(const char *name, const char *systems, unsigned long range_addr_start, unsigned long range_addr_size) { struct trace_array *tr; int ret; ret = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) return ERR_PTR(ret); tr->name = kstrdup(name, GFP_KERNEL); if (!tr->name) goto out_free_tr; if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) goto out_free_tr; if (systems) { tr->system_names = kstrdup_const(systems, GFP_KERNEL); if (!tr->system_names) goto out_free_tr; } /* Only for boot up memory mapped ring buffers */ tr->range_addr_start = range_addr_start; tr->range_addr_size = range_addr_size; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&tr->start_lock); tr->syscall_buf_sz = global_trace.syscall_buf_sz; tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE spin_lock_init(&tr->snapshot_trigger_lock); #endif tr->current_trace = &nop_trace; tr->current_trace_flags = nop_trace.flags; INIT_LIST_HEAD(&tr->systems); INIT_LIST_HEAD(&tr->events); INIT_LIST_HEAD(&tr->hist_vars); INIT_LIST_HEAD(&tr->err_log); INIT_LIST_HEAD(&tr->tracers); INIT_LIST_HEAD(&tr->marker_list); #ifdef CONFIG_MODULES INIT_LIST_HEAD(&tr->mod_events); #endif if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; /* The ring buffer is defaultly expanded */ trace_set_ring_buffer_expanded(tr); if (ftrace_allocate_ftrace_ops(tr) < 0) goto out_free_tr; ftrace_init_trace_array(tr); init_trace_flags_index(tr); if (trace_instance_dir) { ret = trace_array_create_dir(tr); if (ret) goto out_free_tr; } else __trace_early_add_events(tr); list_add(&tr->list, &ftrace_trace_arrays); tr->ref++; return tr; out_free_tr: ftrace_free_ftrace_ops(tr); free_trace_buffers(tr); free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); kfree_const(tr->system_names); kfree(tr->range_name); kfree(tr->name); kfree(tr); return ERR_PTR(ret); } static struct trace_array *trace_array_create(const char *name) { return trace_array_create_systems(name, NULL, 0, 0); } static int instance_mkdir(const char *name) { struct trace_array *tr; int ret; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); ret = -EEXIST; if (trace_array_find(name)) return -EEXIST; tr = trace_array_create(name); ret = PTR_ERR_OR_ZERO(tr); return ret; } #ifdef CONFIG_MMU static u64 map_pages(unsigned long start, unsigned long size) { unsigned long vmap_start, vmap_end; struct vm_struct *area; int ret; area = get_vm_area(size, VM_IOREMAP); if (!area) return 0; vmap_start = (unsigned long) area->addr; vmap_end = vmap_start + size; ret = vmap_page_range(vmap_start, vmap_end, start, pgprot_nx(PAGE_KERNEL)); if (ret < 0) { free_vm_area(area); return 0; } return (u64)vmap_start; } #else static inline u64 map_pages(unsigned long start, unsigned long size) { return 0; } #endif /** * trace_array_get_by_name - Create/Lookup a trace array, given its name. * @name: The name of the trace array to be looked up/created. * @systems: A list of systems to create event directories for (NULL for all) * * Returns pointer to trace array with given name. * NULL, if it cannot be created. * * NOTE: This function increments the reference counter associated with the * trace array returned. This makes sure it cannot be freed while in use. * Use trace_array_put() once the trace array is no longer needed. * If the trace_array is to be freed, trace_array_destroy() needs to * be called after the trace_array_put(), or simply let user space delete * it from the tracefs instances directory. But until the * trace_array_put() is called, user space can not delete it. * */ struct trace_array *trace_array_get_by_name(const char *name, const char *systems) { struct trace_array *tr; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) { tr->ref++; return tr; } } tr = trace_array_create_systems(name, systems, 0, 0); if (IS_ERR(tr)) tr = NULL; else tr->ref++; return tr; } EXPORT_SYMBOL_GPL(trace_array_get_by_name); static int __remove_instance(struct trace_array *tr) { int i; /* Reference counter for a newly created trace array = 1. */ if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) return -EBUSY; list_del(&tr->list); /* Disable all the flags that were enabled coming in */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { if ((1ULL << i) & ZEROED_TRACE_FLAGS) set_tracer_flag(tr, 1ULL << i, 0); } if (printk_trace == tr) update_printk_trace(&global_trace); if (update_marker_trace(tr, 0)) synchronize_rcu(); tracing_set_nop(tr); clear_ftrace_function_probes(tr); event_trace_del_tracer(tr); ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove(tr->dir); free_percpu(tr->last_func_repeats); free_trace_buffers(tr); clear_tracing_err_log(tr); free_tracers(tr); if (tr->range_name) { reserve_mem_release_by_name(tr->range_name); kfree(tr->range_name); } if (tr->flags & TRACE_ARRAY_FL_VMALLOC) vfree((void *)tr->range_addr_start); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); } kfree(tr->topts); free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); kfree_const(tr->system_names); kfree(tr->name); kfree(tr); return 0; } int trace_array_destroy(struct trace_array *this_tr) { struct trace_array *tr; if (!this_tr) return -EINVAL; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); /* Making sure trace array exists before destroying it. */ list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) return __remove_instance(tr); } return -ENODEV; } EXPORT_SYMBOL_GPL(trace_array_destroy); static int instance_rmdir(const char *name) { struct trace_array *tr; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); tr = trace_array_find(name); if (!tr) return -ENODEV; return __remove_instance(tr); } static __init void create_trace_instances(struct dentry *d_tracer) { struct trace_array *tr; trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) return; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->name) continue; if (MEM_FAIL(trace_array_create_dir(tr) < 0, "Failed to create instance directory\n")) return; } } static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { int cpu; trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, tr, &show_traces_fops); trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, tr, &set_tracer_fops); trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, tr, &tracing_cpumask_fops); trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, tr, &tracing_iter_fops); trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, tr, &tracing_fops); trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, tr, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); tr->trace_marker_file = __find_event_file(tr, "ftrace", "print"); trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, &trace_clock_fops); trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, tr, &rb_simple_fops); trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer, tr, &buffer_percent_fops); trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer, tr, &buffer_subbuf_size_fops); trace_create_file("syscall_user_buf_size", TRACE_MODE_WRITE, d_tracer, tr, &tracing_syscall_buf_fops); create_trace_options_dir(tr); #ifdef CONFIG_TRACER_MAX_TRACE trace_create_maxlat_file(tr, d_tracer); #endif if (ftrace_create_function_files(tr, d_tracer)) MEM_FAIL(1, "Could not allocate function filter files"); if (tr->range_addr_start) { trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer, tr, &last_boot_fops); #ifdef CONFIG_TRACER_SNAPSHOT } else { trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, tr, &snapshot_fops); #endif } trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, tr, &tracing_err_log_fops); for_each_tracing_cpu(cpu) tracing_init_tracefs_percpu(tr, cpu); ftrace_init_tracefs(tr, d_tracer); } #ifdef CONFIG_TRACEFS_AUTOMOUNT_DEPRECATED static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) { struct vfsmount *mnt; struct file_system_type *type; struct fs_context *fc; int ret; /* * To maintain backward compatibility for tools that mount * debugfs to get to the tracing facility, tracefs is automatically * mounted to the debugfs/tracing directory. */ type = get_fs_type("tracefs"); if (!type) return NULL; fc = fs_context_for_submount(type, mntpt); put_filesystem(type); if (IS_ERR(fc)) return ERR_CAST(fc); pr_warn("NOTICE: Automounting of tracing to debugfs is deprecated and will be removed in 2030\n"); ret = vfs_parse_fs_string(fc, "source", "tracefs"); if (!ret) mnt = fc_mount(fc); else mnt = ERR_PTR(ret); put_fs_context(fc); return mnt; } #endif /** * tracing_init_dentry - initialize top level trace array * * This is called when creating files or directories in the tracing * directory. It is called via fs_initcall() by any of the boot up code * and expects to return the dentry of the top level tracing directory. */ int tracing_init_dentry(void) { struct trace_array *tr = &global_trace; if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Tracing disabled due to lockdown\n"); return -EPERM; } /* The top level trace array uses NULL as parent */ if (tr->dir) return 0; if (WARN_ON(!tracefs_initialized())) return -ENODEV; #ifdef CONFIG_TRACEFS_AUTOMOUNT_DEPRECATED /* * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still * work with the newer kernel. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); #endif return 0; } extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; static struct workqueue_struct *eval_map_wq __initdata; static struct work_struct eval_map_work __initdata; static struct work_struct tracerfs_init_work __initdata; static void __init eval_map_work_func(struct work_struct *work) { int len; len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; trace_event_update_with_eval_map(NULL, __start_ftrace_eval_maps, len); } static int __init trace_eval_init(void) { INIT_WORK(&eval_map_work, eval_map_work_func); eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); if (!eval_map_wq) { pr_err("Unable to allocate eval_map_wq\n"); /* Do work here */ eval_map_work_func(&eval_map_work); return -ENOMEM; } queue_work(eval_map_wq, &eval_map_work); return 0; } subsys_initcall(trace_eval_init); static int __init trace_eval_sync(void) { /* Make sure the eval map updates are finished */ if (eval_map_wq) destroy_workqueue(eval_map_wq); return 0; } late_initcall_sync(trace_eval_sync); #ifdef CONFIG_MODULES bool module_exists(const char *module) { /* All modules have the symbol __this_module */ static const char this_mod[] = "__this_module"; char modname[MODULE_NAME_LEN + sizeof(this_mod) + 2]; unsigned long val; int n; n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); if (n > sizeof(modname) - 1) return false; val = module_kallsyms_lookup_name(modname); return val != 0; } static void trace_module_add_evals(struct module *mod) { /* * Modules with bad taint do not have events created, do * not bother with enums either. */ if (trace_module_has_bad_taint(mod)) return; /* Even if no trace_evals, this need to sanitize field types. */ trace_event_update_with_eval_map(mod, mod->trace_evals, mod->num_trace_evals); } #ifdef CONFIG_TRACE_EVAL_MAP_FILE static void trace_module_remove_evals(struct module *mod) { union trace_eval_map_item *map; union trace_eval_map_item **last = &trace_eval_maps; if (!mod->num_trace_evals) return; guard(mutex)(&trace_eval_mutex); map = trace_eval_maps; while (map) { if (map->head.mod == mod) break; map = trace_eval_jmp_to_tail(map); last = &map->tail.next; map = map->tail.next; } if (!map) return; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); } #else static inline void trace_module_remove_evals(struct module *mod) { } #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_module_record(struct module *mod, bool add) { struct trace_array *tr; unsigned long flags; list_for_each_entry(tr, &ftrace_trace_arrays, list) { flags = tr->flags & (TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT); /* Update any persistent trace array that has already been started */ if (flags == TRACE_ARRAY_FL_BOOT && add) { guard(mutex)(&scratch_mutex); save_mod(mod, tr); } else if (flags & TRACE_ARRAY_FL_LAST_BOOT) { /* Update delta if the module loaded in previous boot */ make_mod_delta(mod, tr); } } } static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: trace_module_add_evals(mod); trace_module_record(mod, true); break; case MODULE_STATE_GOING: trace_module_remove_evals(mod); trace_module_record(mod, false); break; } return NOTIFY_OK; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ static __init void tracer_init_tracefs_work_func(struct work_struct *work) { event_trace_init(); init_tracer_tracefs(&global_trace, NULL); ftrace_init_tracefs_toplevel(&global_trace, NULL); trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, &global_trace, &tracing_thresh_fops); trace_create_file("README", TRACE_MODE_READ, NULL, NULL, &tracing_readme_fops); trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, NULL, &tracing_saved_cmdlines_fops); trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, NULL, &tracing_saved_cmdlines_size_fops); trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, NULL, &tracing_saved_tgids_fops); trace_create_eval_file(NULL); #ifdef CONFIG_MODULES register_module_notifier(&trace_module_nb); #endif #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, NULL, &tracing_dyn_info_fops); #endif create_trace_instances(NULL); update_tracer_options(); } static __init int tracer_init_tracefs(void) { int ret; trace_access_lock_init(); ret = tracing_init_dentry(); if (ret) return 0; if (eval_map_wq) { INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); queue_work(eval_map_wq, &tracerfs_init_work); } else { tracer_init_tracefs_work_func(NULL); } if (rv_init_interface()) pr_err("RV: Error while creating the RV interface\n"); return 0; } fs_initcall(tracer_init_tracefs); static int trace_die_panic_handler(struct notifier_block *self, unsigned long ev, void *unused); static struct notifier_block trace_panic_notifier = { .notifier_call = trace_die_panic_handler, .priority = INT_MAX - 1, }; static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_panic_handler, .priority = INT_MAX - 1, }; /* * The idea is to execute the following die/panic callback early, in order * to avoid showing irrelevant information in the trace (like other panic * notifier functions); we are the 2nd to run, after hung_task/rcu_stall * warnings get disabled (to prevent potential log flooding). */ static int trace_die_panic_handler(struct notifier_block *self, unsigned long ev, void *unused) { if (!ftrace_dump_on_oops_enabled()) return NOTIFY_DONE; /* The die notifier requires DIE_OOPS to trigger */ if (self == &trace_die_notifier && ev != DIE_OOPS) return NOTIFY_DONE; ftrace_dump(DUMP_PARAM); return NOTIFY_DONE; } /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->seq.len >= TRACE_MAX_PRINT) s->seq.len = TRACE_MAX_PRINT; /* * More paranoid code. Although the buffer size is set to * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just * an extra layer of protection. */ if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) s->seq.len = s->seq.size - 1; /* should be zero ended, but we are paranoid. */ s->buffer[s->seq.len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr) { iter->tr = tr; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; iter->array_buffer = &tr->array_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->array_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* Can not use kmalloc for iter.temp and iter.fmt */ iter->temp = static_temp_buf; iter->temp_size = STATIC_TEMP_BUF_SIZE; iter->fmt = static_fmt_buf; iter->fmt_size = STATIC_FMT_BUF_SIZE; } void trace_init_global_iter(struct trace_iterator *iter) { trace_init_iter(iter, &global_trace); } static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; unsigned long flags; int cnt = 0; /* * Always turn off tracing when we dump. * We don't need to show trace output of what happens * between multiple crashes. * * If the user does a sysrq-z, then they can re-enable * tracing with echo 1 > tracing_on. */ tracer_tracing_off(tr); local_irq_save(flags); /* Simulate the iterator */ trace_init_iter(&iter, tr); /* While dumping, do not allow the buffer to be enable */ tracer_tracing_disable(tr); old_userobj = tr->trace_flags & TRACE_ITER(SYM_USEROBJ); /* don't look at user memory in panic mode */ tr->trace_flags &= ~TRACE_ITER(SYM_USEROBJ); if (dump_mode == DUMP_ORIG) iter.cpu_file = raw_smp_processor_id(); else iter.cpu_file = RING_BUFFER_ALL_CPUS; if (tr == &global_trace) printk(KERN_TRACE "Dumping ftrace buffer:\n"); else printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } /* * We need to stop all tracing on all CPUS to read * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; trace_iterator_reset(&iter); iter.iter_flags |= TRACE_FILE_LAT_FMT; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); trace_printk_seq(&iter.seq); } touch_nmi_watchdog(); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); tr->trace_flags |= old_userobj; tracer_tracing_enable(tr); local_irq_restore(flags); } static void ftrace_dump_by_param(void) { bool first_param = true; char dump_param[MAX_TRACER_SIZE]; char *buf, *token, *inst_name; struct trace_array *tr; strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE); buf = dump_param; while ((token = strsep(&buf, ",")) != NULL) { if (first_param) { first_param = false; if (!strcmp("0", token)) continue; else if (!strcmp("1", token)) { ftrace_dump_one(&global_trace, DUMP_ALL); continue; } else if (!strcmp("2", token) || !strcmp("orig_cpu", token)) { ftrace_dump_one(&global_trace, DUMP_ORIG); continue; } } inst_name = strsep(&token, "="); tr = trace_array_find(inst_name); if (!tr) { printk(KERN_TRACE "Instance %s not found\n", inst_name); continue; } if (token && (!strcmp("2", token) || !strcmp("orig_cpu", token))) ftrace_dump_one(tr, DUMP_ORIG); else ftrace_dump_one(tr, DUMP_ALL); } } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { static atomic_t dump_running; /* Only allow one dump user at a time. */ if (atomic_inc_return(&dump_running) != 1) { atomic_dec(&dump_running); return; } switch (oops_dump_mode) { case DUMP_ALL: ftrace_dump_one(&global_trace, DUMP_ALL); break; case DUMP_ORIG: ftrace_dump_one(&global_trace, DUMP_ORIG); break; case DUMP_PARAM: ftrace_dump_by_param(); break; case DUMP_NONE: break; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); ftrace_dump_one(&global_trace, DUMP_ALL); } atomic_dec(&dump_running); } EXPORT_SYMBOL_GPL(ftrace_dump); #define WRITE_BUFSIZE 4096 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(const char *)) { char *kbuf __free(kfree) = NULL; char *buf, *tmp; int ret = 0; size_t done = 0; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) return -EFAULT; kbuf[size] = '\0'; buf = kbuf; do { tmp = strchr(buf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - buf + 1; } else { size = strlen(buf); if (done + size < count) { if (buf != kbuf) break; /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ pr_warn("Line length is too long: Should be less than %d\n", WRITE_BUFSIZE - 2); return -EINVAL; } } done += size; /* Remove comments */ tmp = strchr(buf, '#'); if (tmp) *tmp = '\0'; ret = createfn(buf); if (ret) return ret; buf += size; } while (done < count); } return done; } #ifdef CONFIG_TRACER_MAX_TRACE __init static bool tr_needs_alloc_snapshot(const char *name) { char *test; int len = strlen(name); bool ret; if (!boot_snapshot_index) return false; if (strncmp(name, boot_snapshot_info, len) == 0 && boot_snapshot_info[len] == '\t') return true; test = kmalloc(strlen(name) + 3, GFP_KERNEL); if (!test) return false; sprintf(test, "\t%s\t", name); ret = strstr(boot_snapshot_info, test) == NULL; kfree(test); return ret; } __init static void do_allocate_snapshot(const char *name) { if (!tr_needs_alloc_snapshot(name)) return; /* * When allocate_snapshot is set, the next call to * allocate_trace_buffers() (called by trace_array_get_by_name()) * will allocate the snapshot buffer. That will also clear * this flag. */ allocate_snapshot = true; } #else static inline void do_allocate_snapshot(const char *name) { } #endif __init static int backup_instance_area(const char *backup, unsigned long *addr, phys_addr_t *size) { struct trace_array *backup_tr; void *allocated_vaddr = NULL; backup_tr = trace_array_get_by_name(backup, NULL); if (!backup_tr) { pr_warn("Tracing: Instance %s is not found.\n", backup); return -ENOENT; } if (!(backup_tr->flags & TRACE_ARRAY_FL_BOOT)) { pr_warn("Tracing: Instance %s is not boot mapped.\n", backup); trace_array_put(backup_tr); return -EINVAL; } *size = backup_tr->range_addr_size; allocated_vaddr = vzalloc(*size); if (!allocated_vaddr) { pr_warn("Tracing: Failed to allocate memory for copying instance %s (size 0x%lx)\n", backup, (unsigned long)*size); trace_array_put(backup_tr); return -ENOMEM; } memcpy(allocated_vaddr, (void *)backup_tr->range_addr_start, (size_t)*size); *addr = (unsigned long)allocated_vaddr; trace_array_put(backup_tr); return 0; } __init static void enable_instances(void) { struct trace_array *tr; bool memmap_area = false; char *curr_str; char *name; char *str; char *tok; /* A tab is always appended */ boot_instance_info[boot_instance_index - 1] = '\0'; str = boot_instance_info; while ((curr_str = strsep(&str, "\t"))) { phys_addr_t start = 0; phys_addr_t size = 0; unsigned long addr = 0; bool traceprintk = false; bool traceoff = false; char *flag_delim; char *addr_delim; char *rname __free(kfree) = NULL; char *backup; tok = strsep(&curr_str, ","); name = strsep(&tok, "="); backup = tok; flag_delim = strchr(name, '^'); addr_delim = strchr(name, '@'); if (addr_delim) *addr_delim++ = '\0'; if (flag_delim) *flag_delim++ = '\0'; if (backup) { if (backup_instance_area(backup, &addr, &size) < 0) continue; } if (flag_delim) { char *flag; while ((flag = strsep(&flag_delim, "^"))) { if (strcmp(flag, "traceoff") == 0) { traceoff = true; } else if ((strcmp(flag, "printk") == 0) || (strcmp(flag, "traceprintk") == 0) || (strcmp(flag, "trace_printk") == 0)) { traceprintk = true; } else { pr_info("Tracing: Invalid instance flag '%s' for %s\n", flag, name); } } } tok = addr_delim; if (tok && isdigit(*tok)) { start = memparse(tok, &tok); if (!start) { pr_warn("Tracing: Invalid boot instance address for %s\n", name); continue; } if (*tok != ':') { pr_warn("Tracing: No size specified for instance %s\n", name); continue; } tok++; size = memparse(tok, &tok); if (!size) { pr_warn("Tracing: Invalid boot instance size for %s\n", name); continue; } memmap_area = true; } else if (tok) { if (!reserve_mem_find_by_name(tok, &start, &size)) { start = 0; pr_warn("Failed to map boot instance %s to %s\n", name, tok); continue; } rname = kstrdup(tok, GFP_KERNEL); } if (start) { /* Start and size must be page aligned */ if (start & ~PAGE_MASK) { pr_warn("Tracing: mapping start addr %pa is not page aligned\n", &start); continue; } if (size & ~PAGE_MASK) { pr_warn("Tracing: mapping size %pa is not page aligned\n", &size); continue; } if (memmap_area) addr = map_pages(start, size); else addr = (unsigned long)phys_to_virt(start); if (addr) { pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n", name, &start, (unsigned long)size); } else { pr_warn("Tracing: Failed to map boot instance %s\n", name); continue; } } else { /* Only non mapped buffers have snapshot buffers */ if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) do_allocate_snapshot(name); } tr = trace_array_create_systems(name, NULL, addr, size); if (IS_ERR(tr)) { pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str); continue; } if (traceoff) tracer_tracing_off(tr); if (traceprintk) update_printk_trace(tr); /* * memmap'd buffers can not be freed. */ if (memmap_area) { tr->flags |= TRACE_ARRAY_FL_MEMMAP; tr->ref++; } /* * Backup buffers can be freed but need vfree(). */ if (backup) tr->flags |= TRACE_ARRAY_FL_VMALLOC; if (start || backup) { tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT; tr->range_name = no_free_ptr(rname); } while ((tok = strsep(&curr_str, ","))) { early_enable_events(tr, tok, true); } } } __init static int tracer_alloc_buffers(void) { int ring_buf_size; int ret = -ENOMEM; if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Tracing disabled due to lockdown\n"); return -EPERM; } /* * Make sure we don't accidentally add more trace options * than we have bits for. */ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) return -ENOMEM; if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ if (global_trace.ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); /* * The prepare callbacks allocates some memory for the ring buffer. We * don't free the buffer if the CPU goes down. If we were to free * the buffer, then the user would lose any trace that was in the * buffer. The memory will be removed once the "instance" is removed. */ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, "trace/RB:prepare", trace_rb_cpu_prepare, NULL); if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) goto out_free_savedcmd; /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); goto out_free_pipe_cpumask; } if (global_trace.buffer_disabled) tracing_off(); if (trace_boot_clock) { ret = tracing_set_clock(&global_trace, trace_boot_clock); if (ret < 0) pr_warn("Trace clock %s not defined, going back to default\n", trace_boot_clock); } /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is * just a bootstrap of current_trace anyway. */ global_trace.current_trace = &nop_trace; global_trace.current_trace_flags = nop_trace.flags; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE spin_lock_init(&global_trace.snapshot_trigger_lock); #endif ftrace_init_global_array_ops(&global_trace); #ifdef CONFIG_MODULES INIT_LIST_HEAD(&global_trace.mod_events); #endif init_trace_flags_index(&global_trace); INIT_LIST_HEAD(&global_trace.tracers); /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); global_trace.flags = TRACE_ARRAY_FL_GLOBAL; global_trace.syscall_buf_sz = syscall_buf_size; INIT_LIST_HEAD(&global_trace.systems); INIT_LIST_HEAD(&global_trace.events); INIT_LIST_HEAD(&global_trace.hist_vars); INIT_LIST_HEAD(&global_trace.err_log); list_add(&global_trace.marker_list, &marker_copies); list_add(&global_trace.list, &ftrace_trace_arrays); register_tracer(&nop_trace); /* Function tracing may start here (via kernel command line) */ init_function_trace(); apply_trace_boot_options(); register_snapshot_cmd(); return 0; out_free_pipe_cpumask: free_cpumask_var(global_trace.pipe_cpumask); out_free_savedcmd: trace_free_saved_cmdlines_buffer(); out_free_temp_buffer: ring_buffer_free(temp_buffer); out_rm_hp_state: cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); out_free_cpumask: free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); return ret; } #ifdef CONFIG_FUNCTION_TRACER /* Used to set module cached ftrace filtering at boot up */ struct trace_array *trace_get_global_array(void) { return &global_trace; } #endif void __init ftrace_boot_snapshot(void) { #ifdef CONFIG_TRACER_MAX_TRACE struct trace_array *tr; if (!snapshot_at_boot) return; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->allocated_snapshot) continue; tracing_snapshot_instance(tr); trace_array_puts(tr, "** Boot snapshot taken **\n"); } #endif } void __init early_trace_init(void) { if (tracepoint_printk) { tracepoint_print_iter = kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); if (MEM_FAIL(!tracepoint_print_iter, "Failed to allocate trace iterator\n")) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); init_events(); } void __init trace_init(void) { trace_event_init(); if (boot_instance_index) enable_instances(); } __init static void clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; } #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __init static void tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Can not set tracing clock due to lockdown\n"); return; } printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" " \"trace_clock=local\"\n" "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } } #else static inline void tracing_set_default_clock(void) { } #endif __init static int late_trace_init(void) { if (tracepoint_printk && tracepoint_printk_stop_on_boot) { static_key_disable(&tracepoint_printk_key.key); tracepoint_printk = 0; } if (traceoff_after_boot) tracing_off(); tracing_set_default_clock(); clear_boot_tracer(); return 0; } late_initcall_sync(late_trace_init);
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion, adapted for tracing. * * Copyright (C) 2020 Paul E. McKenney. */ #ifndef __LINUX_RCUPDATE_TRACE_H #define __LINUX_RCUPDATE_TRACE_H #include <linux/sched.h> #include <linux/rcupdate.h> #include <linux/cleanup.h> extern struct lockdep_map rcu_trace_lock_map; #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline int rcu_read_lock_trace_held(void) { return lock_is_held(&rcu_trace_lock_map); } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ static inline int rcu_read_lock_trace_held(void) { return 1; } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_TASKS_TRACE_RCU void rcu_read_unlock_trace_special(struct task_struct *t); /** * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section * * When synchronize_rcu_tasks_trace() is invoked by one task, then that * task is guaranteed to block until all other tasks exit their read-side * critical sections. Similarly, if call_rcu_trace() is invoked on one * task while other tasks are within RCU read-side critical sections, * invocation of the corresponding RCU callback is deferred until after * the all the other tasks exit their critical sections. * * For more details, please see the documentation for rcu_read_lock(). */ static inline void rcu_read_lock_trace(void) { struct task_struct *t = current; WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); barrier(); if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) smp_mb(); // Pairs with update-side barriers rcu_lock_acquire(&rcu_trace_lock_map); } /** * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section * * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is * allowed. Invoking a rcu_read_unlock_trace() when there is no matching * rcu_read_lock_trace() is verboten, and will result in lockdep complaints. * * For more details, please see the documentation for rcu_read_unlock(). */ static inline void rcu_read_unlock_trace(void) { int nesting; struct task_struct *t = current; rcu_lock_release(&rcu_trace_lock_map); nesting = READ_ONCE(t->trc_reader_nesting) - 1; barrier(); // Critical section before disabling. // Disable IPI-based setting of .need_qs. WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { WRITE_ONCE(t->trc_reader_nesting, nesting); return; // We assume shallow reader nesting. } WARN_ON_ONCE(nesting != 0); rcu_read_unlock_trace_special(t); } void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); void synchronize_rcu_tasks_trace(void); void rcu_barrier_tasks_trace(void); struct task_struct *get_rcu_tasks_trace_gp_kthread(void); #else /* * The BPF JIT forms these addresses even when it doesn't call these * functions, so provide definitions that result in runtime errors. */ static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); } static inline void rcu_read_lock_trace(void) { BUG(); } static inline void rcu_read_unlock_trace(void) { BUG(); } #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ DEFINE_LOCK_GUARD_0(rcu_tasks_trace, rcu_read_lock_trace(), rcu_read_unlock_trace()) #endif /* __LINUX_RCUPDATE_TRACE_H */
2 113 196 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NSPROXY_H #define _LINUX_NSPROXY_H #include <linux/refcount.h> #include <linux/spinlock.h> #include <linux/sched.h> struct mnt_namespace; struct uts_namespace; struct ipc_namespace; struct pid_namespace; struct cgroup_namespace; struct fs_struct; /* * A structure to contain pointers to all per-process * namespaces - fs (mount), uts, network, sysvipc, etc. * * The pid namespace is an exception -- it's accessed using * task_active_pid_ns. The pid namespace here is the * namespace that children will use. * * 'count' is the number of tasks holding a reference. * The count for each namespace, then, will be the number * of nsproxies pointing to it, not the number of tasks. * * The nsproxy is shared by tasks which share all namespaces. * As soon as a single namespace is cloned or unshared, the * nsproxy is copied. */ struct nsproxy { refcount_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct time_namespace *time_ns; struct time_namespace *time_ns_for_children; struct cgroup_namespace *cgroup_ns; }; extern struct nsproxy init_nsproxy; /* * A structure to encompass all bits needed to install * a partial or complete new set of namespaces. * * If a new user namespace is requested cred will * point to a modifiable set of credentials. If a pointer * to a modifiable set is needed nsset_cred() must be * used and tested. */ struct nsset { unsigned flags; struct nsproxy *nsproxy; struct fs_struct *fs; const struct cred *cred; }; static inline struct cred *nsset_cred(struct nsset *set) { if (set->flags & CLONE_NEWUSER) return (struct cred *)set->cred; return NULL; } /* * the namespaces access rules are: * * 1. only current task is allowed to change tsk->nsproxy pointer or * any pointer on the nsproxy itself. Current must hold the task_lock * when changing tsk->nsproxy. * * 2. when accessing (i.e. reading) current task's namespaces - no * precautions should be taken - just dereference the pointers * * 3. the access to other task namespaces is performed like this * task_lock(task); * nsproxy = task->nsproxy; * if (nsproxy != NULL) { * / * * * work with the namespaces here * * e.g. get the reference on one of them * * / * } / * * * NULL task->nsproxy means that this task is * * almost dead (zombie) * * / * task_unlock(task); * */ int copy_namespaces(u64 flags, struct task_struct *tsk); void switch_cred_namespaces(const struct cred *old, const struct cred *new); void exit_nsproxy_namespaces(struct task_struct *tsk); void get_cred_namespaces(struct task_struct *tsk); void exit_cred_namespaces(struct task_struct *tsk); void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); int exec_task_namespaces(void); void deactivate_nsproxy(struct nsproxy *ns); int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, struct cred *, struct fs_struct *); int __init nsproxy_cache_init(void); static inline void put_nsproxy(struct nsproxy *ns) { if (refcount_dec_and_test(&ns->count)) deactivate_nsproxy(ns); } static inline void get_nsproxy(struct nsproxy *ns) { refcount_inc(&ns->count); } DEFINE_FREE(put_nsproxy, struct nsproxy *, if (_T) put_nsproxy(_T)) #endif
9 9 7 8 7 8 8 8 6 1 5 1 4 1 3 2 1 1 2 2 1 1 1 8 5 5 5 1 4 1 3 2 1 1 5 4 5 1 1 1 1 1 1 1 2 1 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 // SPDX-License-Identifier: GPL-2.0 /* * Documentation/ABI/stable/sysfs-fs-orangefs: * * What: /sys/fs/orangefs/perf_counter_reset * Date: June 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * echo a 0 or a 1 into perf_counter_reset to * reset all the counters in * /sys/fs/orangefs/perf_counters * except ones with PINT_PERF_PRESERVE set. * * * What: /sys/fs/orangefs/perf_counters/... * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * Counters and settings for various caches. * Read only. * * * What: /sys/fs/orangefs/perf_time_interval_secs * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * Length of perf counter intervals in * seconds. * * * What: /sys/fs/orangefs/perf_history_size * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * The perf_counters cache statistics have N, or * perf_history_size, samples. The default is * one. * * Every perf_time_interval_secs the (first) * samples are reset. * * If N is greater than one, the "current" set * of samples is reset, and the samples from the * other N-1 intervals remain available. * * * What: /sys/fs/orangefs/op_timeout_secs * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * Service operation timeout in seconds. * * * What: /sys/fs/orangefs/slot_timeout_secs * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * "Slot" timeout in seconds. A "slot" * is an indexed buffer in the shared * memory segment used for communication * between the kernel module and userspace. * Slots are requested and waited for, * the wait times out after slot_timeout_secs. * * What: /sys/fs/orangefs/cache_timeout_msecs * Date: Mar 2018 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Time in milliseconds between which * orangefs_revalidate_mapping will invalidate the page * cache. * * What: /sys/fs/orangefs/dcache_timeout_msecs * Date: Jul 2016 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Time lookup is valid in milliseconds. * * What: /sys/fs/orangefs/getattr_timeout_msecs * Date: Jul 2016 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Time getattr is valid in milliseconds. * * What: /sys/fs/orangefs/readahead_count * Date: Aug 2016 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Readahead cache buffer count. * * What: /sys/fs/orangefs/readahead_size * Date: Aug 2016 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Readahead cache buffer size. * * What: /sys/fs/orangefs/readahead_count_size * Date: Aug 2016 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Readahead cache buffer count and size. * * What: /sys/fs/orangefs/readahead_readcnt * Date: Jan 2017 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Number of buffers (in multiples of readahead_size) * which can be read ahead for a single file at once. * * What: /sys/fs/orangefs/acache/... * Date: Jun 2015 * Contact: Martin Brandenburg <martin@omnibond.com> * Description: * Attribute cache configurable settings. * * * What: /sys/fs/orangefs/ncache/... * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * Name cache configurable settings. * * * What: /sys/fs/orangefs/capcache/... * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * Capability cache configurable settings. * * * What: /sys/fs/orangefs/ccache/... * Date: Jun 2015 * Contact: Mike Marshall <hubcap@omnibond.com> * Description: * Credential cache configurable settings. * */ #include <linux/fs.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/init.h> #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-sysfs.h" #define ORANGEFS_KOBJ_ID "orangefs" #define ACACHE_KOBJ_ID "acache" #define CAPCACHE_KOBJ_ID "capcache" #define CCACHE_KOBJ_ID "ccache" #define NCACHE_KOBJ_ID "ncache" #define PC_KOBJ_ID "pc" #define STATS_KOBJ_ID "stats" /* * Every item calls orangefs_attr_show and orangefs_attr_store through * orangefs_sysfs_ops. They look at the orangefs_attributes further below to * call one of sysfs_int_show, sysfs_int_store, sysfs_service_op_show, or * sysfs_service_op_store. */ struct orangefs_attribute { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct orangefs_attribute *attr, char *buf); ssize_t (*store)(struct kobject *kobj, struct orangefs_attribute *attr, const char *buf, size_t count); }; static ssize_t orangefs_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct orangefs_attribute *attribute; attribute = container_of(attr, struct orangefs_attribute, attr); if (!attribute->show) return -EIO; return attribute->show(kobj, attribute, buf); } static ssize_t orangefs_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct orangefs_attribute *attribute; if (!strcmp(kobj->name, PC_KOBJ_ID) || !strcmp(kobj->name, STATS_KOBJ_ID)) return -EPERM; attribute = container_of(attr, struct orangefs_attribute, attr); if (!attribute->store) return -EIO; return attribute->store(kobj, attribute, buf, len); } static const struct sysfs_ops orangefs_sysfs_ops = { .show = orangefs_attr_show, .store = orangefs_attr_store, }; static ssize_t sysfs_int_show(struct kobject *kobj, struct orangefs_attribute *attr, char *buf) { int rc = -EIO; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n", kobj->name); if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) { if (!strcmp(attr->attr.name, "op_timeout_secs")) { rc = sysfs_emit(buf, "%d\n", op_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) { rc = sysfs_emit(buf, "%d\n", slot_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "cache_timeout_msecs")) { rc = sysfs_emit(buf, "%d\n", orangefs_cache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "dcache_timeout_msecs")) { rc = sysfs_emit(buf, "%d\n", orangefs_dcache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "getattr_timeout_msecs")) { rc = sysfs_emit(buf, "%d\n", orangefs_getattr_timeout_msecs); goto out; } else { goto out; } } else if (!strcmp(kobj->name, STATS_KOBJ_ID)) { if (!strcmp(attr->attr.name, "reads")) { rc = sysfs_emit(buf, "%lu\n", orangefs_stats.reads); goto out; } else if (!strcmp(attr->attr.name, "writes")) { rc = sysfs_emit(buf, "%lu\n", orangefs_stats.writes); goto out; } else { goto out; } } out: return rc; } static ssize_t sysfs_int_store(struct kobject *kobj, struct orangefs_attribute *attr, const char *buf, size_t count) { int rc = 0; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_store: start attr->attr.name:%s: buf:%s:\n", attr->attr.name, buf); if (!strcmp(attr->attr.name, "op_timeout_secs")) { rc = kstrtoint(buf, 0, &op_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) { rc = kstrtoint(buf, 0, &slot_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "cache_timeout_msecs")) { rc = kstrtoint(buf, 0, &orangefs_cache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "dcache_timeout_msecs")) { rc = kstrtoint(buf, 0, &orangefs_dcache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "getattr_timeout_msecs")) { rc = kstrtoint(buf, 0, &orangefs_getattr_timeout_msecs); goto out; } else { goto out; } out: if (rc) rc = -EINVAL; else rc = count; return rc; } /* * obtain attribute values from userspace with a service operation. */ static ssize_t sysfs_service_op_show(struct kobject *kobj, struct orangefs_attribute *attr, char *buf) { struct orangefs_kernel_op_s *new_op = NULL; int rc = 0; char *ser_op_type = NULL; __u32 op_alloc_type; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_service_op_show: id:%s:\n", kobj->name); if (strcmp(kobj->name, PC_KOBJ_ID)) op_alloc_type = ORANGEFS_VFS_OP_PARAM; else op_alloc_type = ORANGEFS_VFS_OP_PERF_COUNT; new_op = op_alloc(op_alloc_type); if (!new_op) return -ENOMEM; /* Can't do a service_operation if the client is not running... */ rc = is_daemon_in_service(); if (rc) { pr_info_ratelimited("%s: Client not running :%d:\n", __func__, is_daemon_in_service()); goto out; } if (strcmp(kobj->name, PC_KOBJ_ID)) new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET; if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) { /* Drop unsupported requests first. */ if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) && (!strcmp(attr->attr.name, "readahead_count") || !strcmp(attr->attr.name, "readahead_size") || !strcmp(attr->attr.name, "readahead_count_size") || !strcmp(attr->attr.name, "readahead_readcnt"))) { rc = -EINVAL; goto out; } if (!strcmp(attr->attr.name, "perf_history_size")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; else if (!strcmp(attr->attr.name, "perf_time_interval_secs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; else if (!strcmp(attr->attr.name, "perf_counter_reset")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; else if (!strcmp(attr->attr.name, "readahead_count")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT; else if (!strcmp(attr->attr.name, "readahead_size")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE; else if (!strcmp(attr->attr.name, "readahead_count_size")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE; else if (!strcmp(attr->attr.name, "readahead_readcnt")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_READCNT; } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_msecs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, CAPCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_secs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, CCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_secs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, NCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_msecs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, PC_KOBJ_ID)) { if (!strcmp(attr->attr.name, ACACHE_KOBJ_ID)) new_op->upcall.req.perf_count.type = ORANGEFS_PERF_COUNT_REQUEST_ACACHE; if (!strcmp(attr->attr.name, CAPCACHE_KOBJ_ID)) new_op->upcall.req.perf_count.type = ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE; if (!strcmp(attr->attr.name, NCACHE_KOBJ_ID)) new_op->upcall.req.perf_count.type = ORANGEFS_PERF_COUNT_REQUEST_NCACHE; } else { gossip_err("sysfs_service_op_show: unknown kobj_id:%s:\n", kobj->name); rc = -EINVAL; goto out; } if (strcmp(kobj->name, PC_KOBJ_ID)) ser_op_type = "orangefs_param"; else ser_op_type = "orangefs_perf_count"; /* * The service_operation will return an errno return code on * error, and zero on success. */ rc = service_operation(new_op, ser_op_type, ORANGEFS_OP_INTERRUPTIBLE); out: if (!rc) { if (strcmp(kobj->name, PC_KOBJ_ID)) { if (new_op->upcall.req.param.op == ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE) { rc = sysfs_emit(buf, "%d %d\n", (int)new_op->downcall.resp.param.u. value32[0], (int)new_op->downcall.resp.param.u. value32[1]); } else { rc = sysfs_emit(buf, "%d\n", (int)new_op->downcall.resp.param.u.value64); } } else { rc = sysfs_emit( buf, "%s", new_op->downcall.resp.perf_count.buffer); } } op_release(new_op); return rc; } /* * pass attribute values back to userspace with a service operation. * * We have to do a memory allocation, an sscanf and a service operation. * And we have to evaluate what the user entered, to make sure the * value is within the range supported by the attribute. So, there's * a lot of return code checking and mapping going on here. * * We want to return 1 if we think everything went OK, and * EINVAL if not. */ static ssize_t sysfs_service_op_store(struct kobject *kobj, struct orangefs_attribute *attr, const char *buf, size_t count) { struct orangefs_kernel_op_s *new_op = NULL; int val = 0; int rc = 0; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_service_op_store: id:%s:\n", kobj->name); new_op = op_alloc(ORANGEFS_VFS_OP_PARAM); if (!new_op) return -EINVAL; /* sic */ /* Can't do a service_operation if the client is not running... */ rc = is_daemon_in_service(); if (rc) { pr_info("%s: Client not running :%d:\n", __func__, is_daemon_in_service()); goto out; } /* * The value we want to send back to userspace is in buf, unless this * there are two parameters, which is specially handled below. */ if (strcmp(kobj->name, ORANGEFS_KOBJ_ID) || strcmp(attr->attr.name, "readahead_count_size")) { rc = kstrtoint(buf, 0, &val); if (rc) goto out; } new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET; if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) { /* Drop unsupported requests first. */ if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) && (!strcmp(attr->attr.name, "readahead_count") || !strcmp(attr->attr.name, "readahead_size") || !strcmp(attr->attr.name, "readahead_count_size") || !strcmp(attr->attr.name, "readahead_readcnt"))) { rc = -EINVAL; goto out; } if (!strcmp(attr->attr.name, "perf_history_size")) { if (val > 0) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "perf_time_interval_secs")) { if (val > 0) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "perf_counter_reset")) { if ((val == 0) || (val == 1)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "readahead_count")) { if ((val >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "readahead_size")) { if ((val >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "readahead_count_size")) { int val1, val2; rc = sscanf(buf, "%d %d", &val1, &val2); if (rc < 2) { rc = 0; goto out; } if ((val1 >= 0) && (val2 >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE; } else { rc = 0; goto out; } new_op->upcall.req.param.u.value32[0] = val1; new_op->upcall.req.param.u.value32[1] = val2; goto value_set; } else if (!strcmp(attr->attr.name, "readahead_readcnt")) { if ((val >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_READCNT; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_msecs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, CAPCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_secs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, CCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_secs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, NCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_msecs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; } else { rc = 0; goto out; } } } else { gossip_err("sysfs_service_op_store: unknown kobj_id:%s:\n", kobj->name); rc = -EINVAL; goto out; } new_op->upcall.req.param.u.value64 = val; value_set: /* * The service_operation will return a errno return code on * error, and zero on success. */ rc = service_operation(new_op, "orangefs_param", ORANGEFS_OP_INTERRUPTIBLE); if (rc < 0) { gossip_err("sysfs_service_op_store: service op returned:%d:\n", rc); rc = 0; } else { rc = count; } out: op_release(new_op); if (rc == -ENOMEM || rc == 0) rc = -EINVAL; return rc; } static struct orangefs_attribute op_timeout_secs_attribute = __ATTR(op_timeout_secs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute slot_timeout_secs_attribute = __ATTR(slot_timeout_secs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute cache_timeout_msecs_attribute = __ATTR(cache_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute dcache_timeout_msecs_attribute = __ATTR(dcache_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute getattr_timeout_msecs_attribute = __ATTR(getattr_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute readahead_count_attribute = __ATTR(readahead_count, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute readahead_size_attribute = __ATTR(readahead_size, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute readahead_count_size_attribute = __ATTR(readahead_count_size, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute readahead_readcnt_attribute = __ATTR(readahead_readcnt, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute perf_counter_reset_attribute = __ATTR(perf_counter_reset, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute perf_history_size_attribute = __ATTR(perf_history_size, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute perf_time_interval_secs_attribute = __ATTR(perf_time_interval_secs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *orangefs_default_attrs[] = { &op_timeout_secs_attribute.attr, &slot_timeout_secs_attribute.attr, &cache_timeout_msecs_attribute.attr, &dcache_timeout_msecs_attribute.attr, &getattr_timeout_msecs_attribute.attr, &readahead_count_attribute.attr, &readahead_size_attribute.attr, &readahead_count_size_attribute.attr, &readahead_readcnt_attribute.attr, &perf_counter_reset_attribute.attr, &perf_history_size_attribute.attr, &perf_time_interval_secs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(orangefs_default); static struct kobject *orangefs_obj; static void orangefs_obj_release(struct kobject *kobj) { kfree(orangefs_obj); orangefs_obj = NULL; } static const struct kobj_type orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = orangefs_default_groups, .release = orangefs_obj_release, }; static struct orangefs_attribute acache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute acache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute acache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute acache_timeout_msecs_attribute = __ATTR(timeout_msecs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *acache_orangefs_default_attrs[] = { &acache_hard_limit_attribute.attr, &acache_reclaim_percent_attribute.attr, &acache_soft_limit_attribute.attr, &acache_timeout_msecs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(acache_orangefs_default); static struct kobject *acache_orangefs_obj; static void acache_orangefs_obj_release(struct kobject *kobj) { kfree(acache_orangefs_obj); acache_orangefs_obj = NULL; } static const struct kobj_type acache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = acache_orangefs_default_groups, .release = acache_orangefs_obj_release, }; static struct orangefs_attribute capcache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute capcache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute capcache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute capcache_timeout_secs_attribute = __ATTR(timeout_secs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *capcache_orangefs_default_attrs[] = { &capcache_hard_limit_attribute.attr, &capcache_reclaim_percent_attribute.attr, &capcache_soft_limit_attribute.attr, &capcache_timeout_secs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(capcache_orangefs_default); static struct kobject *capcache_orangefs_obj; static void capcache_orangefs_obj_release(struct kobject *kobj) { kfree(capcache_orangefs_obj); capcache_orangefs_obj = NULL; } static const struct kobj_type capcache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = capcache_orangefs_default_groups, .release = capcache_orangefs_obj_release, }; static struct orangefs_attribute ccache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ccache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ccache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ccache_timeout_secs_attribute = __ATTR(timeout_secs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *ccache_orangefs_default_attrs[] = { &ccache_hard_limit_attribute.attr, &ccache_reclaim_percent_attribute.attr, &ccache_soft_limit_attribute.attr, &ccache_timeout_secs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(ccache_orangefs_default); static struct kobject *ccache_orangefs_obj; static void ccache_orangefs_obj_release(struct kobject *kobj) { kfree(ccache_orangefs_obj); ccache_orangefs_obj = NULL; } static const struct kobj_type ccache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = ccache_orangefs_default_groups, .release = ccache_orangefs_obj_release, }; static struct orangefs_attribute ncache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ncache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ncache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ncache_timeout_msecs_attribute = __ATTR(timeout_msecs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *ncache_orangefs_default_attrs[] = { &ncache_hard_limit_attribute.attr, &ncache_reclaim_percent_attribute.attr, &ncache_soft_limit_attribute.attr, &ncache_timeout_msecs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(ncache_orangefs_default); static struct kobject *ncache_orangefs_obj; static void ncache_orangefs_obj_release(struct kobject *kobj) { kfree(ncache_orangefs_obj); ncache_orangefs_obj = NULL; } static const struct kobj_type ncache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = ncache_orangefs_default_groups, .release = ncache_orangefs_obj_release, }; static struct orangefs_attribute pc_acache_attribute = __ATTR(acache, 0664, sysfs_service_op_show, NULL); static struct orangefs_attribute pc_capcache_attribute = __ATTR(capcache, 0664, sysfs_service_op_show, NULL); static struct orangefs_attribute pc_ncache_attribute = __ATTR(ncache, 0664, sysfs_service_op_show, NULL); static struct attribute *pc_orangefs_default_attrs[] = { &pc_acache_attribute.attr, &pc_capcache_attribute.attr, &pc_ncache_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(pc_orangefs_default); static struct kobject *pc_orangefs_obj; static void pc_orangefs_obj_release(struct kobject *kobj) { kfree(pc_orangefs_obj); pc_orangefs_obj = NULL; } static const struct kobj_type pc_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = pc_orangefs_default_groups, .release = pc_orangefs_obj_release, }; static struct orangefs_attribute stats_reads_attribute = __ATTR(reads, 0664, sysfs_int_show, NULL); static struct orangefs_attribute stats_writes_attribute = __ATTR(writes, 0664, sysfs_int_show, NULL); static struct attribute *stats_orangefs_default_attrs[] = { &stats_reads_attribute.attr, &stats_writes_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(stats_orangefs_default); static struct kobject *stats_orangefs_obj; static void stats_orangefs_obj_release(struct kobject *kobj) { kfree(stats_orangefs_obj); stats_orangefs_obj = NULL; } static const struct kobj_type stats_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = stats_orangefs_default_groups, .release = stats_orangefs_obj_release, }; int orangefs_sysfs_init(void) { int rc = -EINVAL; gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_init: start\n"); /* create /sys/fs/orangefs. */ orangefs_obj = kzalloc(sizeof(*orangefs_obj), GFP_KERNEL); if (!orangefs_obj) goto out; rc = kobject_init_and_add(orangefs_obj, &orangefs_ktype, fs_kobj, ORANGEFS_KOBJ_ID); if (rc) goto ofs_obj_bail; kobject_uevent(orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/acache. */ acache_orangefs_obj = kzalloc(sizeof(*acache_orangefs_obj), GFP_KERNEL); if (!acache_orangefs_obj) { rc = -EINVAL; goto ofs_obj_bail; } rc = kobject_init_and_add(acache_orangefs_obj, &acache_orangefs_ktype, orangefs_obj, ACACHE_KOBJ_ID); if (rc) goto acache_obj_bail; kobject_uevent(acache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/capcache. */ capcache_orangefs_obj = kzalloc(sizeof(*capcache_orangefs_obj), GFP_KERNEL); if (!capcache_orangefs_obj) { rc = -EINVAL; goto acache_obj_bail; } rc = kobject_init_and_add(capcache_orangefs_obj, &capcache_orangefs_ktype, orangefs_obj, CAPCACHE_KOBJ_ID); if (rc) goto capcache_obj_bail; kobject_uevent(capcache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/ccache. */ ccache_orangefs_obj = kzalloc(sizeof(*ccache_orangefs_obj), GFP_KERNEL); if (!ccache_orangefs_obj) { rc = -EINVAL; goto capcache_obj_bail; } rc = kobject_init_and_add(ccache_orangefs_obj, &ccache_orangefs_ktype, orangefs_obj, CCACHE_KOBJ_ID); if (rc) goto ccache_obj_bail; kobject_uevent(ccache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/ncache. */ ncache_orangefs_obj = kzalloc(sizeof(*ncache_orangefs_obj), GFP_KERNEL); if (!ncache_orangefs_obj) { rc = -EINVAL; goto ccache_obj_bail; } rc = kobject_init_and_add(ncache_orangefs_obj, &ncache_orangefs_ktype, orangefs_obj, NCACHE_KOBJ_ID); if (rc) goto ncache_obj_bail; kobject_uevent(ncache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/perf_counters. */ pc_orangefs_obj = kzalloc(sizeof(*pc_orangefs_obj), GFP_KERNEL); if (!pc_orangefs_obj) { rc = -EINVAL; goto ncache_obj_bail; } rc = kobject_init_and_add(pc_orangefs_obj, &pc_orangefs_ktype, orangefs_obj, "perf_counters"); if (rc) goto pc_obj_bail; kobject_uevent(pc_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/stats. */ stats_orangefs_obj = kzalloc(sizeof(*stats_orangefs_obj), GFP_KERNEL); if (!stats_orangefs_obj) { rc = -EINVAL; goto pc_obj_bail; } rc = kobject_init_and_add(stats_orangefs_obj, &stats_orangefs_ktype, orangefs_obj, STATS_KOBJ_ID); if (rc) goto stats_obj_bail; kobject_uevent(stats_orangefs_obj, KOBJ_ADD); goto out; stats_obj_bail: kobject_put(stats_orangefs_obj); pc_obj_bail: kobject_put(pc_orangefs_obj); ncache_obj_bail: kobject_put(ncache_orangefs_obj); ccache_obj_bail: kobject_put(ccache_orangefs_obj); capcache_obj_bail: kobject_put(capcache_orangefs_obj); acache_obj_bail: kobject_put(acache_orangefs_obj); ofs_obj_bail: kobject_put(orangefs_obj); out: return rc; } void orangefs_sysfs_exit(void) { gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_exit: start\n"); kobject_put(acache_orangefs_obj); kobject_put(capcache_orangefs_obj); kobject_put(ccache_orangefs_obj); kobject_put(ncache_orangefs_obj); kobject_put(pc_orangefs_obj); kobject_put(stats_orangefs_obj); kobject_put(orangefs_obj); }
23 75 1 355 2 1 355 355 2 355 355 355 354 355 355 353 1 2 23 23 23 22 22 18 11 22 21 18 21 18 22 10 1 11 10 10 9 9 9 9 9 9 3 3 3 9 3 9 9 22 22 22 22 15 15 15 13 15 15 15 15 15 15 15 15 15 15 23 23 23 22 23 11 1 11 23 2 2 2 2 2 1 14 2 2 20 20 20 19 2 2 22 22 22 22 22 22 22 22 22 22 21 22 15 1 14 22 21 22 22 22 22 3 1 3 3 3 1 1 1 3 1 1 1 1 1 1 1 8 7 8 7 22 22 22 21 22 22 22 22 22 22 22 21 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 23 23 23 23 23 23 22 2 21 21 1 22 24 23 22 23 23 23 22 22 21 2 1 2 23 23 24 4 4 9 8 3 1 4 3 4 4 9 4 13 4 10 10 16 16 15 16 16 13 21 1 21 21 8 21 20 17 17 18 4 4 3 18 2 2 2 16 16 16 16 16 16 1 15 16 13 13 13 9 3 3 3 8 3 12 11 12 12 4 356 1 356 2 355 353 2 354 352 354 17 17 18 18 18 18 16 17 1 18 54 18 54 54 54 2 54 422 422 421 422 239 1 239 211 4 2 2 3 1 13 12 1 11 10 9 9 9 9 5 4 1 46 46 47 3 2 1 44 41 39 40 12 13 8 13 1 12 27 25 24 17 15 12 1 13 27 25 1 4 3 12 25 25 1 21 21 6 46 39 300 15 355 349 349 350 12 349 4 346 332 327 333 366 365 58 364 2 365 362 363 362 362 361 11 358 15 14 11 10 13 345 367 367 362 356 340 15 354 354 367 355 353 354 7 354 20 354 347 354 5 355 5 354 355 355 336 333 9 333 6 333 332 1 332 328 28 6 370 367 366 371 371 370 371 371 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 // SPDX-License-Identifier: GPL-2.0 /* * Shared application/kernel submission and completion ring pairs, for * supporting fast/efficient IO. * * A note on the read/write ordering memory barriers that are matched between * the application and kernel side. * * After the application reads the CQ ring tail, it must use an * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses * before writing the tail (using smp_load_acquire to read the tail will * do). It also needs a smp_mb() before updating CQ head (ordering the * entry load(s) with the head store), pairing with an implicit barrier * through a control-dependency in io_get_cqe (smp_store_release to * store head will do). Failure to do so could lead to reading invalid * CQ entries. * * Likewise, the application must use an appropriate smp_wmb() before * writing the SQ tail (ordering SQ entry stores with the tail store), * which pairs with smp_load_acquire in io_get_sqring (smp_store_release * to store the tail will do). And it needs a barrier ordering the SQ * head load before writing new SQ entries (smp_load_acquire to read * head will do). * * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* * updating the SQ tail; a full memory barrier smp_mb() is needed * between. * * Also see the examples in the liburing library: * * git://git.kernel.org/pub/scm/linux/kernel/git/axboe/liburing.git * * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens * from data shared between the kernel and application. This is done both * for ordering purposes, but also to ensure that once a value is loaded from * data that the application could potentially modify, it remains stable. * * Copyright (C) 2018-2019 Jens Axboe * Copyright (c) 2018-2019 Christoph Hellwig */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/syscalls.h> #include <net/compat.h> #include <linux/refcount.h> #include <linux/uio.h> #include <linux/bits.h> #include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/bvec.h> #include <linux/net.h> #include <net/sock.h> #include <linux/anon_inodes.h> #include <linux/sched/mm.h> #include <linux/uaccess.h> #include <linux/nospec.h> #include <linux/fsnotify.h> #include <linux/fadvise.h> #include <linux/task_work.h> #include <linux/io_uring.h> #include <linux/io_uring/cmd.h> #include <linux/audit.h> #include <linux/security.h> #include <linux/jump_label.h> #include <asm/shmparam.h> #define CREATE_TRACE_POINTS #include <trace/events/io_uring.h> #include <uapi/linux/io_uring.h> #include "io-wq.h" #include "filetable.h" #include "io_uring.h" #include "opdef.h" #include "refs.h" #include "tctx.h" #include "register.h" #include "sqpoll.h" #include "fdinfo.h" #include "kbuf.h" #include "rsrc.h" #include "cancel.h" #include "net.h" #include "notif.h" #include "waitid.h" #include "futex.h" #include "napi.h" #include "uring_cmd.h" #include "msg_ring.h" #include "memmap.h" #include "zcrx.h" #include "timeout.h" #include "poll.h" #include "rw.h" #include "alloc_cache.h" #include "eventfd.h" #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ IOSQE_IO_HARDLINK | IOSQE_ASYNC) #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ REQ_F_INFLIGHT | REQ_F_CREDS | REQ_F_ASYNC_DATA) #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | IO_REQ_LINK_FLAGS | \ REQ_F_REISSUE | REQ_F_POLLED | \ IO_REQ_CLEAN_FLAGS) #define IO_TCTX_REFS_CACHE_NR (1U << 10) #define IO_COMPL_BATCH 32 #define IO_REQ_ALLOC_BATCH 8 #define IO_LOCAL_TW_DEFAULT_MAX 20 /* requests with any of those set should undergo io_disarm_next() */ #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) /* * No waiters. It's larger than any valid value of the tw counter * so that tests against ->cq_wait_nr would fail and skip wake_up(). */ #define IO_CQ_WAKE_INIT (-1U) /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */ #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1) static void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags); static void __io_req_caches_free(struct io_ring_ctx *ctx); static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray); struct kmem_cache *req_cachep; static struct workqueue_struct *iou_wq __ro_after_init; static int __read_mostly sysctl_io_uring_disabled; static int __read_mostly sysctl_io_uring_group = -1; #ifdef CONFIG_SYSCTL static const struct ctl_table kernel_io_uring_disabled_table[] = { { .procname = "io_uring_disabled", .data = &sysctl_io_uring_disabled, .maxlen = sizeof(sysctl_io_uring_disabled), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, { .procname = "io_uring_group", .data = &sysctl_io_uring_group, .maxlen = sizeof(gid_t), .mode = 0644, .proc_handler = proc_dointvec, }, }; #endif static void io_poison_cached_req(struct io_kiocb *req) { req->ctx = IO_URING_PTR_POISON; req->tctx = IO_URING_PTR_POISON; req->file = IO_URING_PTR_POISON; req->creds = IO_URING_PTR_POISON; req->io_task_work.func = IO_URING_PTR_POISON; req->apoll = IO_URING_PTR_POISON; } static void io_poison_req(struct io_kiocb *req) { io_poison_cached_req(req); req->async_data = IO_URING_PTR_POISON; req->kbuf = IO_URING_PTR_POISON; req->comp_list.next = IO_URING_PTR_POISON; req->file_node = IO_URING_PTR_POISON; req->link = IO_URING_PTR_POISON; } static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) { return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); } static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) { return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); } static inline void req_fail_link_node(struct io_kiocb *req, int res) { req_set_fail(req); io_req_set_res(req, res, 0); } static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) { if (IS_ENABLED(CONFIG_KASAN)) io_poison_cached_req(req); wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); } static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); complete(&ctx->ref_comp); } /* * Terminate the request if either of these conditions are true: * * 1) It's being executed by the original task, but that task is marked * with PF_EXITING as it's exiting. * 2) PF_KTHREAD is set, in which case the invoker of the task_work is * our fallback task_work. * 3) The ring has been closed and is going away. */ static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) { return (current->flags & (PF_EXITING | PF_KTHREAD)) || percpu_ref_is_dying(&ctx->refs); } static __cold void io_fallback_req_func(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, fallback_work.work); struct llist_node *node = llist_del_all(&ctx->fallback_llist); struct io_kiocb *req, *tmp; struct io_tw_state ts = {}; percpu_ref_get(&ctx->refs); mutex_lock(&ctx->uring_lock); ts.cancel = io_should_terminate_tw(ctx); llist_for_each_entry_safe(req, tmp, node, io_task_work.node) req->io_task_work.func((struct io_tw_req){req}, ts); io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); percpu_ref_put(&ctx->refs); } static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) { unsigned int hash_buckets; int i; do { hash_buckets = 1U << bits; table->hbs = kvmalloc_array(hash_buckets, sizeof(table->hbs[0]), GFP_KERNEL_ACCOUNT); if (table->hbs) break; if (bits == 1) return -ENOMEM; bits--; } while (1); table->hash_bits = bits; for (i = 0; i < hash_buckets; i++) INIT_HLIST_HEAD(&table->hbs[i].list); return 0; } static void io_free_alloc_caches(struct io_ring_ctx *ctx) { io_alloc_cache_free(&ctx->apoll_cache, kfree); io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free); io_futex_cache_free(ctx); io_rsrc_cache_free(ctx); } static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; int hash_bits; bool ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; xa_init(&ctx->io_bl_xa); /* * Use 5 bits less than the max cq entries, that should give us around * 32 entries per hash list if totally full and uniformly spread, but * don't keep too many buckets to not overconsume memory. */ hash_bits = ilog2(p->cq_entries) - 5; hash_bits = clamp(hash_bits, 1, 8); if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) goto err; if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 0, GFP_KERNEL)) goto err; ctx->flags = p->flags; ctx->hybrid_poll_time = LLONG_MAX; atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT); init_waitqueue_head(&ctx->sqo_sq_wait); INIT_LIST_HEAD(&ctx->sqd_list); INIT_LIST_HEAD(&ctx->cq_overflow_list); ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX, sizeof(struct async_poll), 0); ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_msghdr), offsetof(struct io_async_msghdr, clear)); ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_rw), offsetof(struct io_async_rw, clear)); ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_cmd), sizeof(struct io_async_cmd)); ret |= io_futex_cache_init(ctx); ret |= io_rsrc_cache_init(ctx); if (ret) goto free_ref; init_completion(&ctx->ref_comp); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); mutex_init(&ctx->uring_lock); init_waitqueue_head(&ctx->cq_wait); init_waitqueue_head(&ctx->poll_wq); spin_lock_init(&ctx->completion_lock); raw_spin_lock_init(&ctx->timeout_lock); INIT_WQ_LIST(&ctx->iopoll_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); INIT_LIST_HEAD(&ctx->ltimeout_list); init_llist_head(&ctx->work_llist); INIT_LIST_HEAD(&ctx->tctx_list); ctx->submit_state.free_list.next = NULL; INIT_HLIST_HEAD(&ctx->waitid_list); xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC); #ifdef CONFIG_FUTEX INIT_HLIST_HEAD(&ctx->futex_list); #endif INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); INIT_WQ_LIST(&ctx->submit_state.compl_reqs); INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd); io_napi_init(ctx); mutex_init(&ctx->mmap_lock); return ctx; free_ref: percpu_ref_exit(&ctx->refs); err: io_free_alloc_caches(ctx); kvfree(ctx->cancel_table.hbs); xa_destroy(&ctx->io_bl_xa); kfree(ctx); return NULL; } static void io_clean_op(struct io_kiocb *req) { if (unlikely(req->flags & REQ_F_BUFFER_SELECTED)) io_kbuf_drop_legacy(req); if (req->flags & REQ_F_NEED_CLEANUP) { const struct io_cold_def *def = &io_cold_defs[req->opcode]; if (def->cleanup) def->cleanup(req); } if (req->flags & REQ_F_INFLIGHT) atomic_dec(&req->tctx->inflight_tracked); if (req->flags & REQ_F_CREDS) put_cred(req->creds); if (req->flags & REQ_F_ASYNC_DATA) { kfree(req->async_data); req->async_data = NULL; } req->flags &= ~IO_REQ_CLEAN_FLAGS; } /* * Mark the request as inflight, so that file cancelation will find it. * Can be used if the file is an io_uring instance, or if the request itself * relies on ->mm being alive for the duration of the request. */ inline void io_req_track_inflight(struct io_kiocb *req) { if (!(req->flags & REQ_F_INFLIGHT)) { req->flags |= REQ_F_INFLIGHT; atomic_inc(&req->tctx->inflight_tracked); } } static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) { if (WARN_ON_ONCE(!req->link)) return NULL; req->flags &= ~REQ_F_ARM_LTIMEOUT; req->flags |= REQ_F_LINK_TIMEOUT; /* linked timeouts should have two refs once prep'ed */ io_req_set_refcount(req); __io_req_set_refcount(req->link, 2); return req->link; } static void io_prep_async_work(struct io_kiocb *req) { const struct io_issue_def *def = &io_issue_defs[req->opcode]; struct io_ring_ctx *ctx = req->ctx; if (!(req->flags & REQ_F_CREDS)) { req->flags |= REQ_F_CREDS; req->creds = get_current_cred(); } req->work.list.next = NULL; atomic_set(&req->work.flags, 0); if (req->flags & REQ_F_FORCE_ASYNC) atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags); if (req->file && !(req->flags & REQ_F_FIXED_FILE)) req->flags |= io_file_get_flags(req->file); if (req->file && (req->flags & REQ_F_ISREG)) { bool should_hash = def->hash_reg_file; /* don't serialize this request if the fs doesn't need it */ if (should_hash && (req->file->f_flags & O_DIRECT) && (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE)) should_hash = false; if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { if (def->unbound_nonreg_file) atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags); } } static void io_prep_async_link(struct io_kiocb *req) { struct io_kiocb *cur; if (req->flags & REQ_F_LINK_TIMEOUT) { struct io_ring_ctx *ctx = req->ctx; raw_spin_lock_irq(&ctx->timeout_lock); io_for_each_link(cur, req) io_prep_async_work(cur); raw_spin_unlock_irq(&ctx->timeout_lock); } else { io_for_each_link(cur, req) io_prep_async_work(cur); } } static void io_queue_iowq(struct io_kiocb *req) { struct io_uring_task *tctx = req->tctx; BUG_ON(!tctx); if ((current->flags & PF_KTHREAD) || !tctx->io_wq) { io_req_task_queue_fail(req, -ECANCELED); return; } /* init ->work of the whole link before punting */ io_prep_async_link(req); /* * Not expected to happen, but if we do have a bug where this _can_ * happen, catch it here and ensure the request is marked as * canceled. That will make io-wq go through the usual work cancel * procedure rather than attempt to run this request (or create a new * worker for it). */ if (WARN_ON_ONCE(!same_thread_group(tctx->task, current))) atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags); trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); io_wq_enqueue(tctx->io_wq, &req->work); } static void io_req_queue_iowq_tw(struct io_tw_req tw_req, io_tw_token_t tw) { io_queue_iowq(tw_req.req); } void io_req_queue_iowq(struct io_kiocb *req) { req->io_task_work.func = io_req_queue_iowq_tw; io_req_task_work_add(req); } unsigned io_linked_nr(struct io_kiocb *req) { struct io_kiocb *tmp; unsigned nr = 0; io_for_each_link(tmp, req) nr++; return nr; } static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx) { bool drain_seen = false, first = true; lockdep_assert_held(&ctx->uring_lock); __io_req_caches_free(ctx); while (!list_empty(&ctx->defer_list)) { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, struct io_defer_entry, list); drain_seen |= de->req->flags & REQ_F_IO_DRAIN; if ((drain_seen || first) && ctx->nr_req_allocated != ctx->nr_drained) return; list_del_init(&de->list); ctx->nr_drained -= io_linked_nr(de->req); io_req_task_queue(de->req); kfree(de); first = false; } } void __io_commit_cqring_flush(struct io_ring_ctx *ctx) { if (ctx->poll_activated) io_poll_wq_wake(ctx); if (ctx->off_timeout_used) io_flush_timeouts(ctx); if (ctx->has_evfd) io_eventfd_signal(ctx, true); } static inline void __io_cq_lock(struct io_ring_ctx *ctx) { if (!ctx->lockless_cq) spin_lock(&ctx->completion_lock); } static inline void io_cq_lock(struct io_ring_ctx *ctx) __acquires(ctx->completion_lock) { spin_lock(&ctx->completion_lock); } static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) { io_commit_cqring(ctx); if (!ctx->task_complete) { if (!ctx->lockless_cq) spin_unlock(&ctx->completion_lock); /* IOPOLL rings only need to wake up if it's also SQPOLL */ if (!ctx->syscall_iopoll) io_cqring_wake(ctx); } io_commit_cqring_flush(ctx); } static void io_cq_unlock_post(struct io_ring_ctx *ctx) __releases(ctx->completion_lock) { io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); io_cqring_wake(ctx); io_commit_cqring_flush(ctx); } static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying) { lockdep_assert_held(&ctx->uring_lock); /* don't abort if we're dying, entries must get freed */ if (!dying && __io_cqring_events(ctx) == ctx->cq_entries) return; io_cq_lock(ctx); while (!list_empty(&ctx->cq_overflow_list)) { size_t cqe_size = sizeof(struct io_uring_cqe); struct io_uring_cqe *cqe; struct io_overflow_cqe *ocqe; bool is_cqe32 = false; ocqe = list_first_entry(&ctx->cq_overflow_list, struct io_overflow_cqe, list); if (ocqe->cqe.flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) { is_cqe32 = true; cqe_size <<= 1; } if (ctx->flags & IORING_SETUP_CQE32) is_cqe32 = false; if (!dying) { if (!io_get_cqe_overflow(ctx, &cqe, true, is_cqe32)) break; memcpy(cqe, &ocqe->cqe, cqe_size); } list_del(&ocqe->list); kfree(ocqe); /* * For silly syzbot cases that deliberately overflow by huge * amounts, check if we need to resched and drop and * reacquire the locks if so. Nothing real would ever hit this. * Ideally we'd have a non-posting unlock for this, but hard * to care for a non-real case. */ if (need_resched()) { ctx->cqe_sentinel = ctx->cqe_cached; io_cq_unlock_post(ctx); mutex_unlock(&ctx->uring_lock); cond_resched(); mutex_lock(&ctx->uring_lock); io_cq_lock(ctx); } } if (list_empty(&ctx->cq_overflow_list)) { clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); } io_cq_unlock_post(ctx); } static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) { if (ctx->rings) __io_cqring_overflow_flush(ctx, true); } static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) { mutex_lock(&ctx->uring_lock); __io_cqring_overflow_flush(ctx, false); mutex_unlock(&ctx->uring_lock); } /* must to be called somewhat shortly after putting a request */ static inline void io_put_task(struct io_kiocb *req) { struct io_uring_task *tctx = req->tctx; if (likely(tctx->task == current)) { tctx->cached_refs++; } else { percpu_counter_sub(&tctx->inflight, 1); if (unlikely(atomic_read(&tctx->in_cancel))) wake_up(&tctx->wait); put_task_struct(tctx->task); } } void io_task_refs_refill(struct io_uring_task *tctx) { unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; percpu_counter_add(&tctx->inflight, refill); refcount_add(refill, &current->usage); tctx->cached_refs += refill; } __cold void io_uring_drop_tctx_refs(struct task_struct *task) { struct io_uring_task *tctx = task->io_uring; unsigned int refs = tctx->cached_refs; if (refs) { tctx->cached_refs = 0; percpu_counter_sub(&tctx->inflight, refs); put_task_struct_many(task, refs); } } static __cold bool io_cqring_add_overflow(struct io_ring_ctx *ctx, struct io_overflow_cqe *ocqe) { lockdep_assert_held(&ctx->completion_lock); if (!ocqe) { struct io_rings *r = ctx->rings; /* * If we're in ring overflow flush mode, or in task cancel mode, * or cannot allocate an overflow entry, then we need to drop it * on the floor. */ WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); return false; } if (list_empty(&ctx->cq_overflow_list)) { set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); } list_add_tail(&ocqe->list, &ctx->cq_overflow_list); return true; } static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx, struct io_cqe *cqe, struct io_big_cqe *big_cqe, gfp_t gfp) { struct io_overflow_cqe *ocqe; size_t ocq_size = sizeof(struct io_overflow_cqe); bool is_cqe32 = false; if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) { is_cqe32 = true; ocq_size += sizeof(struct io_uring_cqe); } ocqe = kzalloc(ocq_size, gfp | __GFP_ACCOUNT); trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe); if (ocqe) { ocqe->cqe.user_data = cqe->user_data; ocqe->cqe.res = cqe->res; ocqe->cqe.flags = cqe->flags; if (is_cqe32 && big_cqe) { ocqe->cqe.big_cqe[0] = big_cqe->extra1; ocqe->cqe.big_cqe[1] = big_cqe->extra2; } } if (big_cqe) big_cqe->extra1 = big_cqe->extra2 = 0; return ocqe; } /* * Fill an empty dummy CQE, in case alignment is off for posting a 32b CQE * because the ring is a single 16b entry away from wrapping. */ static bool io_fill_nop_cqe(struct io_ring_ctx *ctx, unsigned int off) { if (__io_cqring_events(ctx) < ctx->cq_entries) { struct io_uring_cqe *cqe = &ctx->rings->cqes[off]; cqe->user_data = 0; cqe->res = 0; cqe->flags = IORING_CQE_F_SKIP; ctx->cached_cq_tail++; return true; } return false; } /* * writes to the cq entry need to come after reading head; the * control dependency is enough as we're using WRITE_ONCE to * fill the cq entry */ bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32) { struct io_rings *rings = ctx->rings; unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); unsigned int free, queued, len; /* * Posting into the CQ when there are pending overflowed CQEs may break * ordering guarantees, which will affect links, F_MORE users and more. * Force overflow the completion. */ if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))) return false; /* * Post dummy CQE if a 32b CQE is needed and there's only room for a * 16b CQE before the ring wraps. */ if (cqe32 && off + 1 == ctx->cq_entries) { if (!io_fill_nop_cqe(ctx, off)) return false; off = 0; } /* userspace may cheat modifying the tail, be safe and do min */ queued = min(__io_cqring_events(ctx), ctx->cq_entries); free = ctx->cq_entries - queued; /* we need a contiguous range, limit based on the current array offset */ len = min(free, ctx->cq_entries - off); if (len < (cqe32 + 1)) return false; if (ctx->flags & IORING_SETUP_CQE32) { off <<= 1; len <<= 1; } ctx->cqe_cached = &rings->cqes[off]; ctx->cqe_sentinel = ctx->cqe_cached + len; return true; } static bool io_fill_cqe_aux32(struct io_ring_ctx *ctx, struct io_uring_cqe src_cqe[2]) { struct io_uring_cqe *cqe; if (WARN_ON_ONCE(!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))) return false; if (unlikely(!io_get_cqe(ctx, &cqe, true))) return false; memcpy(cqe, src_cqe, 2 * sizeof(*cqe)); trace_io_uring_complete(ctx, NULL, cqe); return true; } static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { bool cqe32 = cflags & IORING_CQE_F_32; struct io_uring_cqe *cqe; if (likely(io_get_cqe(ctx, &cqe, cqe32))) { WRITE_ONCE(cqe->user_data, user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, cflags); if (cqe32) { WRITE_ONCE(cqe->big_cqe[0], 0); WRITE_ONCE(cqe->big_cqe[1], 0); } trace_io_uring_complete(ctx, NULL, cqe); return true; } return false; } static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags) { return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags }; } static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe, struct io_big_cqe *big_cqe) { struct io_overflow_cqe *ocqe; ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_KERNEL); spin_lock(&ctx->completion_lock); io_cqring_add_overflow(ctx, ocqe); spin_unlock(&ctx->completion_lock); } static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx, struct io_cqe *cqe, struct io_big_cqe *big_cqe) { struct io_overflow_cqe *ocqe; ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_ATOMIC); return io_cqring_add_overflow(ctx, ocqe); } bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { bool filled; io_cq_lock(ctx); filled = io_fill_cqe_aux(ctx, user_data, res, cflags); if (unlikely(!filled)) { struct io_cqe cqe = io_init_cqe(user_data, res, cflags); filled = io_cqe_overflow_locked(ctx, &cqe, NULL); } io_cq_unlock_post(ctx); return filled; } /* * Must be called from inline task_work so we know a flush will happen later, * and obviously with ctx->uring_lock held (tw always has that). */ void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { lockdep_assert_held(&ctx->uring_lock); lockdep_assert(ctx->lockless_cq); if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) { struct io_cqe cqe = io_init_cqe(user_data, res, cflags); io_cqe_overflow(ctx, &cqe, NULL); } ctx->submit_state.cq_flush = true; } /* * A helper for multishot requests posting additional CQEs. * Should only be used from a task_work including IO_URING_F_MULTISHOT. */ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) { struct io_ring_ctx *ctx = req->ctx; bool posted; /* * If multishot has already posted deferred completions, ensure that * those are flushed first before posting this one. If not, CQEs * could get reordered. */ if (!wq_list_empty(&ctx->submit_state.compl_reqs)) __io_submit_flush_completions(ctx); lockdep_assert(!io_wq_current_is_worker()); lockdep_assert_held(&ctx->uring_lock); if (!ctx->lockless_cq) { spin_lock(&ctx->completion_lock); posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); spin_unlock(&ctx->completion_lock); } else { posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); } ctx->submit_state.cq_flush = true; return posted; } /* * A helper for multishot requests posting additional CQEs. * Should only be used from a task_work including IO_URING_F_MULTISHOT. */ bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe cqe[2]) { struct io_ring_ctx *ctx = req->ctx; bool posted; lockdep_assert(!io_wq_current_is_worker()); lockdep_assert_held(&ctx->uring_lock); cqe[0].user_data = req->cqe.user_data; if (!ctx->lockless_cq) { spin_lock(&ctx->completion_lock); posted = io_fill_cqe_aux32(ctx, cqe); spin_unlock(&ctx->completion_lock); } else { posted = io_fill_cqe_aux32(ctx, cqe); } ctx->submit_state.cq_flush = true; return posted; } static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; bool completed = true; /* * All execution paths but io-wq use the deferred completions by * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here. */ if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ))) return; /* * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires * the submitter task context, IOPOLL protects with uring_lock. */ if (ctx->lockless_cq || (req->flags & REQ_F_REISSUE)) { defer_complete: req->io_task_work.func = io_req_task_complete; io_req_task_work_add(req); return; } io_cq_lock(ctx); if (!(req->flags & REQ_F_CQE_SKIP)) completed = io_fill_cqe_req(ctx, req); io_cq_unlock_post(ctx); if (!completed) goto defer_complete; /* * We don't free the request here because we know it's called from * io-wq only, which holds a reference, so it cannot be the last put. */ req_ref_put(req); } void io_req_defer_failed(struct io_kiocb *req, s32 res) __must_hold(&ctx->uring_lock) { const struct io_cold_def *def = &io_cold_defs[req->opcode]; lockdep_assert_held(&req->ctx->uring_lock); req_set_fail(req); io_req_set_res(req, res, io_put_kbuf(req, res, NULL)); if (def->fail) def->fail(req); io_req_complete_defer(req); } /* * A request might get retired back into the request caches even before opcode * handlers and io_issue_sqe() are done with it, e.g. inline completion path. * Because of that, io_alloc_req() should be called only under ->uring_lock * and with extra caution to not get a request that is still worked on. */ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { gfp_t gfp = GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO; void *reqs[IO_REQ_ALLOC_BATCH]; int ret; ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); /* * Bulk alloc is all-or-nothing. If we fail to get a batch, * retry single alloc to be on the safe side. */ if (unlikely(ret <= 0)) { reqs[0] = kmem_cache_alloc(req_cachep, gfp); if (!reqs[0]) return false; ret = 1; } percpu_ref_get_many(&ctx->refs, ret); ctx->nr_req_allocated += ret; while (ret--) { struct io_kiocb *req = reqs[ret]; io_req_add_to_cache(req, ctx); } return true; } __cold void io_free_req(struct io_kiocb *req) { /* refs were already put, restore them for io_req_task_complete() */ req->flags &= ~REQ_F_REFCOUNT; /* we only want to free it, don't post CQEs */ req->flags |= REQ_F_CQE_SKIP; req->io_task_work.func = io_req_task_complete; io_req_task_work_add(req); } static void __io_req_find_next_prep(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; spin_lock(&ctx->completion_lock); io_disarm_next(req); spin_unlock(&ctx->completion_lock); } static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) { struct io_kiocb *nxt; /* * If LINK is set, we have dependent requests in this chain. If we * didn't fail this request, queue the first one up, moving any other * dependencies to the next request. In case of failure, fail the rest * of the chain. */ if (unlikely(req->flags & IO_DISARM_MASK)) __io_req_find_next_prep(req); nxt = req->link; req->link = NULL; return nxt; } static void ctx_flush_and_put(struct io_ring_ctx *ctx, io_tw_token_t tw) { if (!ctx) return; if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); percpu_ref_put(&ctx->refs); } /* * Run queued task_work, returning the number of entries processed in *count. * If more entries than max_entries are available, stop processing once this * is reached and return the rest of the list. */ struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries) { struct io_ring_ctx *ctx = NULL; struct io_tw_state ts = { }; do { struct llist_node *next = node->next; struct io_kiocb *req = container_of(node, struct io_kiocb, io_task_work.node); if (req->ctx != ctx) { ctx_flush_and_put(ctx, ts); ctx = req->ctx; mutex_lock(&ctx->uring_lock); percpu_ref_get(&ctx->refs); ts.cancel = io_should_terminate_tw(ctx); } INDIRECT_CALL_2(req->io_task_work.func, io_poll_task_func, io_req_rw_complete, (struct io_tw_req){req}, ts); node = next; (*count)++; if (unlikely(need_resched())) { ctx_flush_and_put(ctx, ts); ctx = NULL; cond_resched(); } } while (node && *count < max_entries); ctx_flush_and_put(ctx, ts); return node; } static __cold void __io_fallback_tw(struct llist_node *node, bool sync) { struct io_ring_ctx *last_ctx = NULL; struct io_kiocb *req; while (node) { req = container_of(node, struct io_kiocb, io_task_work.node); node = node->next; if (last_ctx != req->ctx) { if (last_ctx) { if (sync) flush_delayed_work(&last_ctx->fallback_work); percpu_ref_put(&last_ctx->refs); } last_ctx = req->ctx; percpu_ref_get(&last_ctx->refs); } if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist)) schedule_delayed_work(&last_ctx->fallback_work, 1); } if (last_ctx) { if (sync) flush_delayed_work(&last_ctx->fallback_work); percpu_ref_put(&last_ctx->refs); } } static void io_fallback_tw(struct io_uring_task *tctx, bool sync) { struct llist_node *node = llist_del_all(&tctx->task_list); __io_fallback_tw(node, sync); } struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count) { struct llist_node *node; node = llist_del_all(&tctx->task_list); if (node) { node = llist_reverse_order(node); node = io_handle_tw_list(node, count, max_entries); } /* relaxed read is enough as only the task itself sets ->in_cancel */ if (unlikely(atomic_read(&tctx->in_cancel))) io_uring_drop_tctx_refs(current); trace_io_uring_task_work_run(tctx, *count); return node; } void tctx_task_work(struct callback_head *cb) { struct io_uring_task *tctx; struct llist_node *ret; unsigned int count = 0; tctx = container_of(cb, struct io_uring_task, task_work); ret = tctx_task_work_run(tctx, UINT_MAX, &count); /* can't happen */ WARN_ON_ONCE(ret); } static void io_req_local_work_add(struct io_kiocb *req, unsigned flags) { struct io_ring_ctx *ctx = req->ctx; unsigned nr_wait, nr_tw, nr_tw_prev; struct llist_node *head; /* See comment above IO_CQ_WAKE_INIT */ BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES); /* * We don't know how many requests there are in the link and whether * they can even be queued lazily, fall back to non-lazy. */ if (req->flags & IO_REQ_LINK_FLAGS) flags &= ~IOU_F_TWQ_LAZY_WAKE; guard(rcu)(); head = READ_ONCE(ctx->work_llist.first); do { nr_tw_prev = 0; if (head) { struct io_kiocb *first_req = container_of(head, struct io_kiocb, io_task_work.node); /* * Might be executed at any moment, rely on * SLAB_TYPESAFE_BY_RCU to keep it alive. */ nr_tw_prev = READ_ONCE(first_req->nr_tw); } /* * Theoretically, it can overflow, but that's fine as one of * previous adds should've tried to wake the task. */ nr_tw = nr_tw_prev + 1; if (!(flags & IOU_F_TWQ_LAZY_WAKE)) nr_tw = IO_CQ_WAKE_FORCE; req->nr_tw = nr_tw; req->io_task_work.node.next = head; } while (!try_cmpxchg(&ctx->work_llist.first, &head, &req->io_task_work.node)); /* * cmpxchg implies a full barrier, which pairs with the barrier * in set_current_state() on the io_cqring_wait() side. It's used * to ensure that either we see updated ->cq_wait_nr, or waiters * going to sleep will observe the work added to the list, which * is similar to the wait/wawke task state sync. */ if (!head) { if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); if (ctx->has_evfd) io_eventfd_signal(ctx, false); } nr_wait = atomic_read(&ctx->cq_wait_nr); /* not enough or no one is waiting */ if (nr_tw < nr_wait) return; /* the previous add has already woken it up */ if (nr_tw_prev >= nr_wait) return; wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE); } static void io_req_normal_work_add(struct io_kiocb *req) { struct io_uring_task *tctx = req->tctx; struct io_ring_ctx *ctx = req->ctx; /* task_work already pending, we're done */ if (!llist_add(&req->io_task_work.node, &tctx->task_list)) return; if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); /* SQPOLL doesn't need the task_work added, it'll run it itself */ if (ctx->flags & IORING_SETUP_SQPOLL) { __set_notify_signal(tctx->task); return; } if (likely(!task_work_add(tctx->task, &tctx->task_work, ctx->notify_method))) return; io_fallback_tw(tctx, false); } void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) { if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) io_req_local_work_add(req, flags); else io_req_normal_work_add(req); } void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags) { if (WARN_ON_ONCE(!(req->ctx->flags & IORING_SETUP_DEFER_TASKRUN))) return; __io_req_task_work_add(req, flags); } static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) { struct llist_node *node = llist_del_all(&ctx->work_llist); __io_fallback_tw(node, false); node = llist_del_all(&ctx->retry_llist); __io_fallback_tw(node, false); } static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, int min_events) { if (!io_local_work_pending(ctx)) return false; if (events < min_events) return true; if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); return false; } static int __io_run_local_work_loop(struct llist_node **node, io_tw_token_t tw, int events) { int ret = 0; while (*node) { struct llist_node *next = (*node)->next; struct io_kiocb *req = container_of(*node, struct io_kiocb, io_task_work.node); INDIRECT_CALL_2(req->io_task_work.func, io_poll_task_func, io_req_rw_complete, (struct io_tw_req){req}, tw); *node = next; if (++ret >= events) break; } return ret; } static int __io_run_local_work(struct io_ring_ctx *ctx, io_tw_token_t tw, int min_events, int max_events) { struct llist_node *node; unsigned int loops = 0; int ret = 0; if (WARN_ON_ONCE(ctx->submitter_task != current)) return -EEXIST; if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); again: tw.cancel = io_should_terminate_tw(ctx); min_events -= ret; ret = __io_run_local_work_loop(&ctx->retry_llist.first, tw, max_events); if (ctx->retry_llist.first) goto retry_done; /* * llists are in reverse order, flip it back the right way before * running the pending items. */ node = llist_reverse_order(llist_del_all(&ctx->work_llist)); ret += __io_run_local_work_loop(&node, tw, max_events - ret); ctx->retry_llist.first = node; loops++; if (io_run_local_work_continue(ctx, ret, min_events)) goto again; retry_done: io_submit_flush_completions(ctx); if (io_run_local_work_continue(ctx, ret, min_events)) goto again; trace_io_uring_local_work_run(ctx, ret, loops); return ret; } static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, int min_events) { struct io_tw_state ts = {}; if (!io_local_work_pending(ctx)) return 0; return __io_run_local_work(ctx, ts, min_events, max(IO_LOCAL_TW_DEFAULT_MAX, min_events)); } int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events) { struct io_tw_state ts = {}; int ret; mutex_lock(&ctx->uring_lock); ret = __io_run_local_work(ctx, ts, min_events, max_events); mutex_unlock(&ctx->uring_lock); return ret; } static void io_req_task_cancel(struct io_tw_req tw_req, io_tw_token_t tw) { struct io_kiocb *req = tw_req.req; io_tw_lock(req->ctx, tw); io_req_defer_failed(req, req->cqe.res); } void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw) { struct io_kiocb *req = tw_req.req; struct io_ring_ctx *ctx = req->ctx; io_tw_lock(ctx, tw); if (unlikely(tw.cancel)) io_req_defer_failed(req, -EFAULT); else if (req->flags & REQ_F_FORCE_ASYNC) io_queue_iowq(req); else io_queue_sqe(req, 0); } void io_req_task_queue_fail(struct io_kiocb *req, int ret) { io_req_set_res(req, ret, 0); req->io_task_work.func = io_req_task_cancel; io_req_task_work_add(req); } void io_req_task_queue(struct io_kiocb *req) { req->io_task_work.func = io_req_task_submit; io_req_task_work_add(req); } void io_queue_next(struct io_kiocb *req) { struct io_kiocb *nxt = io_req_find_next(req); if (nxt) io_req_task_queue(nxt); } static inline void io_req_put_rsrc_nodes(struct io_kiocb *req) { if (req->file_node) { io_put_rsrc_node(req->ctx, req->file_node); req->file_node = NULL; } if (req->flags & REQ_F_BUF_NODE) io_put_rsrc_node(req->ctx, req->buf_node); } static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node) __must_hold(&ctx->uring_lock) { do { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) { if (req->flags & REQ_F_REISSUE) { node = req->comp_list.next; req->flags &= ~REQ_F_REISSUE; io_queue_iowq(req); continue; } if (req->flags & REQ_F_REFCOUNT) { node = req->comp_list.next; if (!req_ref_put_and_test(req)) continue; } if ((req->flags & REQ_F_POLLED) && req->apoll) { struct async_poll *apoll = req->apoll; if (apoll->double_poll) kfree(apoll->double_poll); io_cache_free(&ctx->apoll_cache, apoll); req->flags &= ~REQ_F_POLLED; } if (req->flags & IO_REQ_LINK_FLAGS) io_queue_next(req); if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) io_clean_op(req); } io_put_file(req); io_req_put_rsrc_nodes(req); io_put_task(req); node = req->comp_list.next; io_req_add_to_cache(req, ctx); } while (node); } void __io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_submit_state *state = &ctx->submit_state; struct io_wq_work_node *node; __io_cq_lock(ctx); __wq_list_for_each(node, &state->compl_reqs) { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); /* * Requests marked with REQUEUE should not post a CQE, they * will go through the io-wq retry machinery and post one * later. */ if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) && unlikely(!io_fill_cqe_req(ctx, req))) { if (ctx->lockless_cq) io_cqe_overflow(ctx, &req->cqe, &req->big_cqe); else io_cqe_overflow_locked(ctx, &req->cqe, &req->big_cqe); } } __io_cq_unlock_post(ctx); if (!wq_list_empty(&state->compl_reqs)) { io_free_batch_list(ctx, state->compl_reqs.first); INIT_WQ_LIST(&state->compl_reqs); } if (unlikely(ctx->drain_active)) io_queue_deferred(ctx); ctx->submit_state.cq_flush = false; } static unsigned io_cqring_events(struct io_ring_ctx *ctx) { /* See comment at the top of this file */ smp_rmb(); return __io_cqring_events(ctx); } /* * We can't just wait for polled events to come to us, we have to actively * find and complete them. */ __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) { if (!(ctx->flags & IORING_SETUP_IOPOLL)) return; mutex_lock(&ctx->uring_lock); while (!wq_list_empty(&ctx->iopoll_list)) { /* let it sleep and repeat later if can't complete a request */ if (io_do_iopoll(ctx, true) == 0) break; /* * Ensure we allow local-to-the-cpu processing to take place, * in this case we need to ensure that we reap all events. * Also let task_work, etc. to progress by releasing the mutex */ if (need_resched()) { mutex_unlock(&ctx->uring_lock); cond_resched(); mutex_lock(&ctx->uring_lock); } } mutex_unlock(&ctx->uring_lock); if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) io_move_task_work_from_local(ctx); } static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events) { unsigned int nr_events = 0; unsigned long check_cq; min_events = min(min_events, ctx->cq_entries); lockdep_assert_held(&ctx->uring_lock); if (!io_allowed_run_tw(ctx)) return -EEXIST; check_cq = READ_ONCE(ctx->check_cq); if (unlikely(check_cq)) { if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) __io_cqring_overflow_flush(ctx, false); /* * Similarly do not spin if we have not informed the user of any * dropped CQE. */ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) return -EBADR; } /* * Don't enter poll loop if we already have events pending. * If we do, we can potentially be spinning for commands that * already triggered a CQE (eg in error). */ if (io_cqring_events(ctx)) return 0; do { int ret = 0; /* * If a submit got punted to a workqueue, we can have the * application entering polling for a command before it gets * issued. That app will hold the uring_lock for the duration * of the poll right here, so we need to take a breather every * now and then to ensure that the issue has a chance to add * the poll to the issued list. Otherwise we can spin here * forever, while the workqueue is stuck trying to acquire the * very same mutex. */ if (wq_list_empty(&ctx->iopoll_list) || io_task_work_pending(ctx)) { u32 tail = ctx->cached_cq_tail; (void) io_run_local_work_locked(ctx, min_events); if (task_work_pending(current) || wq_list_empty(&ctx->iopoll_list)) { mutex_unlock(&ctx->uring_lock); io_run_task_work(); mutex_lock(&ctx->uring_lock); } /* some requests don't go through iopoll_list */ if (tail != ctx->cached_cq_tail || wq_list_empty(&ctx->iopoll_list)) break; } ret = io_do_iopoll(ctx, !min_events); if (unlikely(ret < 0)) return ret; if (task_sigpending(current)) return -EINTR; if (need_resched()) break; nr_events += ret; } while (nr_events < min_events); return 0; } void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw) { io_req_complete_defer(tw_req.req); } /* * After the iocb has been issued, it's safe to be found on the poll list. * Adding the kiocb to the list AFTER submission ensures that we don't * find it from a io_do_iopoll() thread before the issuer is done * accessing the kiocb cookie. */ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; /* workqueue context doesn't hold uring_lock, grab it now */ if (unlikely(needs_lock)) mutex_lock(&ctx->uring_lock); /* * Track whether we have multiple files in our lists. This will impact * how we do polling eventually, not spinning if we're on potentially * different devices. */ if (wq_list_empty(&ctx->iopoll_list)) { ctx->poll_multi_queue = false; } else if (!ctx->poll_multi_queue) { struct io_kiocb *list_req; list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, comp_list); if (list_req->file != req->file) ctx->poll_multi_queue = true; } /* * For fast devices, IO may have already completed. If it has, add * it to the front so we find it first. */ if (READ_ONCE(req->iopoll_completed)) wq_list_add_head(&req->comp_list, &ctx->iopoll_list); else wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); if (unlikely(needs_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle * in sq thread task context or in io worker task context. If * current task context is sq thread, we don't need to check * whether should wake up sq thread. */ if ((ctx->flags & IORING_SETUP_SQPOLL) && wq_has_sleeper(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); mutex_unlock(&ctx->uring_lock); } } io_req_flags_t io_file_get_flags(struct file *file) { io_req_flags_t res = 0; BUILD_BUG_ON(REQ_F_ISREG_BIT != REQ_F_SUPPORT_NOWAIT_BIT + 1); if (S_ISREG(file_inode(file)->i_mode)) res |= REQ_F_ISREG; if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT)) res |= REQ_F_SUPPORT_NOWAIT; return res; } static __cold void io_drain_req(struct io_kiocb *req) __must_hold(&ctx->uring_lock) { struct io_ring_ctx *ctx = req->ctx; bool drain = req->flags & IOSQE_IO_DRAIN; struct io_defer_entry *de; de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT); if (!de) { io_req_defer_failed(req, -ENOMEM); return; } io_prep_async_link(req); trace_io_uring_defer(req); de->req = req; ctx->nr_drained += io_linked_nr(req); list_add_tail(&de->list, &ctx->defer_list); io_queue_deferred(ctx); if (!drain && list_empty(&ctx->defer_list)) ctx->drain_active = false; } static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, unsigned int issue_flags) { if (req->file || !def->needs_file) return true; if (req->flags & REQ_F_FIXED_FILE) req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags); else req->file = io_file_get_normal(req, req->cqe.fd); return !!req->file; } #define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT) static inline int __io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags, const struct io_issue_def *def) { const struct cred *creds = NULL; struct io_kiocb *link = NULL; int ret; if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) { if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) creds = override_creds(req->creds); if (req->flags & REQ_F_ARM_LTIMEOUT) link = __io_prep_linked_timeout(req); } if (!def->audit_skip) audit_uring_entry(req->opcode); ret = def->issue(req, issue_flags); if (!def->audit_skip) audit_uring_exit(!ret, ret); if (unlikely(creds || link)) { if (creds) revert_creds(creds); if (link) io_queue_linked_timeout(link); } return ret; } static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) { const struct io_issue_def *def = &io_issue_defs[req->opcode]; int ret; if (unlikely(!io_assign_file(req, def, issue_flags))) return -EBADF; ret = __io_issue_sqe(req, issue_flags, def); if (ret == IOU_COMPLETE) { if (issue_flags & IO_URING_F_COMPLETE_DEFER) io_req_complete_defer(req); else io_req_complete_post(req, issue_flags); return 0; } if (ret == IOU_ISSUE_SKIP_COMPLETE) { ret = 0; /* If the op doesn't have a file, we're not polling for it */ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) io_iopoll_req_issued(req, issue_flags); } return ret; } int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw) { const unsigned int issue_flags = IO_URING_F_NONBLOCK | IO_URING_F_MULTISHOT | IO_URING_F_COMPLETE_DEFER; int ret; io_tw_lock(req->ctx, tw); WARN_ON_ONCE(!req->file); if (WARN_ON_ONCE(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EFAULT; ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]); WARN_ON_ONCE(ret == IOU_ISSUE_SKIP_COMPLETE); return ret; } struct io_wq_work *io_wq_free_work(struct io_wq_work *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *nxt = NULL; if (req_ref_put_and_test_atomic(req)) { if (req->flags & IO_REQ_LINK_FLAGS) nxt = io_req_find_next(req); io_free_req(req); } return nxt ? &nxt->work : NULL; } void io_wq_submit_work(struct io_wq_work *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); const struct io_issue_def *def = &io_issue_defs[req->opcode]; unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ; bool needs_poll = false; int ret = 0, err = -ECANCELED; /* one will be dropped by io_wq_free_work() after returning to io-wq */ if (!(req->flags & REQ_F_REFCOUNT)) __io_req_set_refcount(req, 2); else req_ref_get(req); /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) { fail: io_req_task_queue_fail(req, err); return; } if (!io_assign_file(req, def, issue_flags)) { err = -EBADF; atomic_or(IO_WQ_WORK_CANCEL, &work->flags); goto fail; } /* * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the * submitter task context. Final request completions are handed to the * right context, however this is not the case of auxiliary CQEs, * which is the main mean of operation for multishot requests. * Don't allow any multishot execution from io-wq. It's more restrictive * than necessary and also cleaner. */ if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) { err = -EBADFD; if (!io_file_can_poll(req)) goto fail; if (req->file->f_flags & O_NONBLOCK || req->file->f_mode & FMODE_NOWAIT) { err = -ECANCELED; if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) goto fail; return; } else { req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT); } } if (req->flags & REQ_F_FORCE_ASYNC) { bool opcode_poll = def->pollin || def->pollout; if (opcode_poll && io_file_can_poll(req)) { needs_poll = true; issue_flags |= IO_URING_F_NONBLOCK; } } do { ret = io_issue_sqe(req, issue_flags); if (ret != -EAGAIN) break; /* * If REQ_F_NOWAIT is set, then don't wait or retry with * poll. -EAGAIN is final for that case. */ if (req->flags & REQ_F_NOWAIT) break; /* * We can get EAGAIN for iopolled IO even though we're * forcing a sync submission from here, since we can't * wait for request slots on the block side. */ if (!needs_poll) { if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) break; if (io_wq_worker_stopped()) break; cond_resched(); continue; } if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) return; /* aborted or ready, in either case retry blocking */ needs_poll = false; issue_flags &= ~IO_URING_F_NONBLOCK; } while (1); /* avoid locking problems by failing it from a clean context */ if (ret) io_req_task_queue_fail(req, ret); } inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_rsrc_node *node; struct file *file = NULL; io_ring_submit_lock(ctx, issue_flags); node = io_rsrc_node_lookup(&ctx->file_table.data, fd); if (node) { node->refs++; req->file_node = node; req->flags |= io_slot_flags(node); file = io_slot_file(node); } io_ring_submit_unlock(ctx, issue_flags); return file; } struct file *io_file_get_normal(struct io_kiocb *req, int fd) { struct file *file = fget(fd); trace_io_uring_file_get(req, fd); /* we don't allow fixed io_uring files */ if (file && io_is_uring_fops(file)) io_req_track_inflight(req); return file; } static int io_req_sqe_copy(struct io_kiocb *req, unsigned int issue_flags) { const struct io_cold_def *def = &io_cold_defs[req->opcode]; if (req->flags & REQ_F_SQE_COPIED) return 0; req->flags |= REQ_F_SQE_COPIED; if (!def->sqe_copy) return 0; if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_INLINE))) return -EFAULT; def->sqe_copy(req); return 0; } static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret) __must_hold(&req->ctx->uring_lock) { if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { fail: io_req_defer_failed(req, ret); return; } ret = io_req_sqe_copy(req, issue_flags); if (unlikely(ret)) goto fail; switch (io_arm_poll_handler(req, 0)) { case IO_APOLL_READY: io_req_task_queue(req); break; case IO_APOLL_ABORTED: io_queue_iowq(req); break; case IO_APOLL_OK: break; } } static inline void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags) __must_hold(&req->ctx->uring_lock) { unsigned int issue_flags = IO_URING_F_NONBLOCK | IO_URING_F_COMPLETE_DEFER | extra_flags; int ret; ret = io_issue_sqe(req, issue_flags); /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ if (unlikely(ret)) io_queue_async(req, issue_flags, ret); } static void io_queue_sqe_fallback(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { if (unlikely(req->flags & REQ_F_FAIL)) { /* * We don't submit, fail them all, for that replace hardlinks * with normal links. Extra REQ_F_LINK is tolerated. */ req->flags &= ~REQ_F_HARDLINK; req->flags |= REQ_F_LINK; io_req_defer_failed(req, req->cqe.res); } else { /* can't fail with IO_URING_F_INLINE */ io_req_sqe_copy(req, IO_URING_F_INLINE); if (unlikely(req->ctx->drain_active)) io_drain_req(req); else io_queue_iowq(req); } } /* * Check SQE restrictions (opcode and flags). * * Returns 'true' if SQE is allowed, 'false' otherwise. */ static inline bool io_check_restriction(struct io_ring_ctx *ctx, struct io_kiocb *req, unsigned int sqe_flags) { if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) return false; if ((sqe_flags & ctx->restrictions.sqe_flags_required) != ctx->restrictions.sqe_flags_required) return false; if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | ctx->restrictions.sqe_flags_required)) return false; return true; } static void io_init_drain(struct io_ring_ctx *ctx) { struct io_kiocb *head = ctx->submit_state.link.head; ctx->drain_active = true; if (head) { /* * If we need to drain a request in the middle of a link, drain * the head request and the next request/link after the current * link. Considering sequential execution of links, * REQ_F_IO_DRAIN will be maintained for every request of our * link. */ head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; ctx->drain_next = true; } } static __cold int io_init_fail_req(struct io_kiocb *req, int err) { /* ensure per-opcode data is cleared if we fail before prep */ memset(&req->cmd.data, 0, sizeof(req->cmd.data)); return err; } static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe, unsigned int *left) __must_hold(&ctx->uring_lock) { const struct io_issue_def *def; unsigned int sqe_flags; int personality; u8 opcode; req->ctx = ctx; req->opcode = opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ sqe_flags = READ_ONCE(sqe->flags); req->flags = (__force io_req_flags_t) sqe_flags; req->cqe.user_data = READ_ONCE(sqe->user_data); req->file = NULL; req->tctx = current->io_uring; req->cancel_seq_set = false; req->async_data = NULL; if (unlikely(opcode >= IORING_OP_LAST)) { req->opcode = 0; return io_init_fail_req(req, -EINVAL); } opcode = array_index_nospec(opcode, IORING_OP_LAST); def = &io_issue_defs[opcode]; if (def->is_128 && !(ctx->flags & IORING_SETUP_SQE128)) { /* * A 128b op on a non-128b SQ requires mixed SQE support as * well as 2 contiguous entries. */ if (!(ctx->flags & IORING_SETUP_SQE_MIXED) || *left < 2 || !(ctx->cached_sq_head & (ctx->sq_entries - 1))) return io_init_fail_req(req, -EINVAL); /* * A 128b operation on a mixed SQ uses two entries, so we have * to increment the head and cached refs, and decrement what's * left. */ current->io_uring->cached_refs++; ctx->cached_sq_head++; (*left)--; } if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { /* enforce forwards compatibility on users */ if (sqe_flags & ~SQE_VALID_FLAGS) return io_init_fail_req(req, -EINVAL); if (sqe_flags & IOSQE_BUFFER_SELECT) { if (!def->buffer_select) return io_init_fail_req(req, -EOPNOTSUPP); req->buf_index = READ_ONCE(sqe->buf_group); } if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) ctx->drain_disabled = true; if (sqe_flags & IOSQE_IO_DRAIN) { if (ctx->drain_disabled) return io_init_fail_req(req, -EOPNOTSUPP); io_init_drain(ctx); } } if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) return io_init_fail_req(req, -EACCES); /* knock it to the slow queue path, will be drained there */ if (ctx->drain_active) req->flags |= REQ_F_FORCE_ASYNC; /* if there is no link, we're at "next" request and need to drain */ if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { ctx->drain_next = false; ctx->drain_active = true; req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; } } if (!def->ioprio && sqe->ioprio) return io_init_fail_req(req, -EINVAL); if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL)) return io_init_fail_req(req, -EINVAL); if (def->needs_file) { struct io_submit_state *state = &ctx->submit_state; req->cqe.fd = READ_ONCE(sqe->fd); /* * Plug now if we have more than 2 IO left after this, and the * target is potentially a read/write to block based storage. */ if (state->need_plug && def->plug) { state->plug_started = true; state->need_plug = false; blk_start_plug_nr_ios(&state->plug, state->submit_nr); } } personality = READ_ONCE(sqe->personality); if (personality) { int ret; req->creds = xa_load(&ctx->personalities, personality); if (!req->creds) return io_init_fail_req(req, -EINVAL); get_cred(req->creds); ret = security_uring_override_creds(req->creds); if (ret) { put_cred(req->creds); return io_init_fail_req(req, ret); } req->flags |= REQ_F_CREDS; } return def->prep(req, sqe); } static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, struct io_kiocb *req, int ret) { struct io_ring_ctx *ctx = req->ctx; struct io_submit_link *link = &ctx->submit_state.link; struct io_kiocb *head = link->head; trace_io_uring_req_failed(sqe, req, ret); /* * Avoid breaking links in the middle as it renders links with SQPOLL * unusable. Instead of failing eagerly, continue assembling the link if * applicable and mark the head with REQ_F_FAIL. The link flushing code * should find the flag and handle the rest. */ req_fail_link_node(req, ret); if (head && !(head->flags & REQ_F_FAIL)) req_fail_link_node(head, -ECANCELED); if (!(req->flags & IO_REQ_LINK_FLAGS)) { if (head) { link->last->link = req; link->head = NULL; req = head; } io_queue_sqe_fallback(req); return ret; } if (head) link->last->link = req; else link->head = req; link->last = req; return 0; } static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe, unsigned int *left) __must_hold(&ctx->uring_lock) { struct io_submit_link *link = &ctx->submit_state.link; int ret; ret = io_init_req(ctx, req, sqe, left); if (unlikely(ret)) return io_submit_fail_init(sqe, req, ret); trace_io_uring_submit_req(req); /* * If we already have a head request, queue this one for async * submittal once the head completes. If we don't have a head but * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be * submitted sync once the chain is complete. If none of those * conditions are true (normal request), then just queue it. */ if (unlikely(link->head)) { trace_io_uring_link(req, link->last); io_req_sqe_copy(req, IO_URING_F_INLINE); link->last->link = req; link->last = req; if (req->flags & IO_REQ_LINK_FLAGS) return 0; /* last request of the link, flush it */ req = link->head; link->head = NULL; if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)) goto fallback; } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS | REQ_F_FORCE_ASYNC | REQ_F_FAIL))) { if (req->flags & IO_REQ_LINK_FLAGS) { link->head = req; link->last = req; } else { fallback: io_queue_sqe_fallback(req); } return 0; } io_queue_sqe(req, IO_URING_F_INLINE); return 0; } /* * Batched submission is done, ensure local IO is flushed out. */ static void io_submit_state_end(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; if (unlikely(state->link.head)) io_queue_sqe_fallback(state->link.head); /* flush only after queuing links as they can generate completions */ io_submit_flush_completions(ctx); if (state->plug_started) blk_finish_plug(&state->plug); } /* * Start submission side cache. */ static void io_submit_state_start(struct io_submit_state *state, unsigned int max_ios) { state->plug_started = false; state->need_plug = max_ios > 2; state->submit_nr = max_ios; /* set only head, no need to init link_last in advance */ state->link.head = NULL; } static void io_commit_sqring(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; /* * Ensure any loads from the SQEs are done at this point, * since once we write the new head, the application could * write new data to them. */ smp_store_release(&rings->sq.head, ctx->cached_sq_head); } /* * Fetch an sqe, if one is available. Note this returns a pointer to memory * that is mapped by userspace. This means that care needs to be taken to * ensure that reads are stable, as we cannot rely on userspace always * being a good citizen. If members of the sqe are validated and then later * used, it's important that those reads are done through READ_ONCE() to * prevent a re-load down the line. */ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) { unsigned mask = ctx->sq_entries - 1; unsigned head = ctx->cached_sq_head++ & mask; if (static_branch_unlikely(&io_key_has_sqarray) && (!(ctx->flags & IORING_SETUP_NO_SQARRAY))) { head = READ_ONCE(ctx->sq_array[head]); if (unlikely(head >= ctx->sq_entries)) { WRITE_ONCE(ctx->rings->sq_dropped, READ_ONCE(ctx->rings->sq_dropped) + 1); return false; } head = array_index_nospec(head, ctx->sq_entries); } /* * The cached sq head (or cq tail) serves two purposes: * * 1) allows us to batch the cost of updating the user visible * head updates. * 2) allows the kernel side to track the head on its own, even * though the application is the one updating it. */ /* double index for 128-byte SQEs, twice as long */ if (ctx->flags & IORING_SETUP_SQE128) head <<= 1; *sqe = &ctx->sq_sqes[head]; return true; } int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) __must_hold(&ctx->uring_lock) { unsigned int entries = io_sqring_entries(ctx); unsigned int left; int ret; entries = min(nr, entries); if (unlikely(!entries)) return 0; ret = left = entries; io_get_task_refs(left); io_submit_state_start(&ctx->submit_state, left); do { const struct io_uring_sqe *sqe; struct io_kiocb *req; if (unlikely(!io_alloc_req(ctx, &req))) break; if (unlikely(!io_get_sqe(ctx, &sqe))) { io_req_add_to_cache(req, ctx); break; } /* * Continue submitting even for sqe failure if the * ring was setup with IORING_SETUP_SUBMIT_ALL */ if (unlikely(io_submit_sqe(ctx, req, sqe, &left)) && !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) { left--; break; } } while (--left); if (unlikely(left)) { ret -= left; /* try again if it submitted nothing and can't allocate a req */ if (!ret && io_req_cache_empty(ctx)) ret = -EAGAIN; current->io_uring->cached_refs += left; } io_submit_state_end(ctx); /* Commit SQ ring head once we've consumed and submitted all SQEs */ io_commit_sqring(ctx); return ret; } static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, int wake_flags, void *key) { struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq); /* * Cannot safely flush overflowed CQEs from here, ensure we wake up * the task, and the next invocation will do it. */ if (io_should_wake(iowq) || io_has_work(iowq->ctx)) return autoremove_wake_function(curr, mode, wake_flags, key); return -1; } int io_run_task_work_sig(struct io_ring_ctx *ctx) { if (io_local_work_pending(ctx)) { __set_current_state(TASK_RUNNING); if (io_run_local_work(ctx, INT_MAX, IO_LOCAL_TW_DEFAULT_MAX) > 0) return 0; } if (io_run_task_work() > 0) return 0; if (task_sigpending(current)) return -EINTR; return 0; } static bool current_pending_io(void) { struct io_uring_task *tctx = current->io_uring; if (!tctx) return false; return percpu_counter_read_positive(&tctx->inflight); } static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer) { struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t); WRITE_ONCE(iowq->hit_timeout, 1); iowq->min_timeout = 0; wake_up_process(iowq->wq.private); return HRTIMER_NORESTART; } /* * Doing min_timeout portion. If we saw any timeouts, events, or have work, * wake up. If not, and we have a normal timeout, switch to that and keep * sleeping. */ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer) { struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t); struct io_ring_ctx *ctx = iowq->ctx; /* no general timeout, or shorter (or equal), we are done */ if (iowq->timeout == KTIME_MAX || ktime_compare(iowq->min_timeout, iowq->timeout) >= 0) goto out_wake; /* work we may need to run, wake function will see if we need to wake */ if (io_has_work(ctx)) goto out_wake; /* got events since we started waiting, min timeout is done */ if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail)) goto out_wake; /* if we have any events and min timeout expired, we're done */ if (io_cqring_events(ctx)) goto out_wake; /* * If using deferred task_work running and application is waiting on * more than one request, ensure we reset it now where we are switching * to normal sleeps. Any request completion post min_wait should wake * the task and return. */ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { atomic_set(&ctx->cq_wait_nr, 1); smp_mb(); if (!llist_empty(&ctx->work_llist)) goto out_wake; } /* any generated CQE posted past this time should wake us up */ iowq->cq_tail = iowq->cq_min_tail; hrtimer_update_function(&iowq->t, io_cqring_timer_wakeup); hrtimer_set_expires(timer, iowq->timeout); return HRTIMER_RESTART; out_wake: return io_cqring_timer_wakeup(timer); } static int io_cqring_schedule_timeout(struct io_wait_queue *iowq, clockid_t clock_id, ktime_t start_time) { ktime_t timeout; if (iowq->min_timeout) { timeout = ktime_add_ns(iowq->min_timeout, start_time); hrtimer_setup_on_stack(&iowq->t, io_cqring_min_timer_wakeup, clock_id, HRTIMER_MODE_ABS); } else { timeout = iowq->timeout; hrtimer_setup_on_stack(&iowq->t, io_cqring_timer_wakeup, clock_id, HRTIMER_MODE_ABS); } hrtimer_set_expires_range_ns(&iowq->t, timeout, 0); hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS); if (!READ_ONCE(iowq->hit_timeout)) schedule(); hrtimer_cancel(&iowq->t); destroy_hrtimer_on_stack(&iowq->t); __set_current_state(TASK_RUNNING); return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0; } struct ext_arg { size_t argsz; struct timespec64 ts; const sigset_t __user *sig; ktime_t min_time; bool ts_set; bool iowait; }; static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx, struct io_wait_queue *iowq, struct ext_arg *ext_arg, ktime_t start_time) { int ret = 0; /* * Mark us as being in io_wait if we have pending requests, so cpufreq * can take into account that the task is waiting for IO - turns out * to be important for low QD IO. */ if (ext_arg->iowait && current_pending_io()) current->in_iowait = 1; if (iowq->timeout != KTIME_MAX || iowq->min_timeout) ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time); else schedule(); current->in_iowait = 0; return ret; } /* If this returns > 0, the caller should retry */ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, struct io_wait_queue *iowq, struct ext_arg *ext_arg, ktime_t start_time) { if (unlikely(READ_ONCE(ctx->check_cq))) return 1; if (unlikely(io_local_work_pending(ctx))) return 1; if (unlikely(task_work_pending(current))) return 1; if (unlikely(task_sigpending(current))) return -EINTR; if (unlikely(io_should_wake(iowq))) return 0; return __io_cqring_wait_schedule(ctx, iowq, ext_arg, start_time); } /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring. */ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags, struct ext_arg *ext_arg) { struct io_wait_queue iowq; struct io_rings *rings = ctx->rings; ktime_t start_time; int ret; min_events = min_t(int, min_events, ctx->cq_entries); if (!io_allowed_run_tw(ctx)) return -EEXIST; if (io_local_work_pending(ctx)) io_run_local_work(ctx, min_events, max(IO_LOCAL_TW_DEFAULT_MAX, min_events)); io_run_task_work(); if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))) io_cqring_do_overflow_flush(ctx); if (__io_cqring_events_user(ctx) >= min_events) return 0; init_waitqueue_func_entry(&iowq.wq, io_wake_function); iowq.wq.private = current; INIT_LIST_HEAD(&iowq.wq.entry); iowq.ctx = ctx; iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail); iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); iowq.hit_timeout = 0; iowq.min_timeout = ext_arg->min_time; iowq.timeout = KTIME_MAX; start_time = io_get_time(ctx); if (ext_arg->ts_set) { iowq.timeout = timespec64_to_ktime(ext_arg->ts); if (!(flags & IORING_ENTER_ABS_TIMER)) iowq.timeout = ktime_add(iowq.timeout, start_time); } if (ext_arg->sig) { #ifdef CONFIG_COMPAT if (in_compat_syscall()) ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig, ext_arg->argsz); else #endif ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz); if (ret) return ret; } io_napi_busy_loop(ctx, &iowq); trace_io_uring_cqring_wait(ctx, min_events); do { unsigned long check_cq; int nr_wait; /* if min timeout has been hit, don't reset wait count */ if (!iowq.hit_timeout) nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail); else nr_wait = 1; if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { atomic_set(&ctx->cq_wait_nr, nr_wait); set_current_state(TASK_INTERRUPTIBLE); } else { prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, TASK_INTERRUPTIBLE); } ret = io_cqring_wait_schedule(ctx, &iowq, ext_arg, start_time); __set_current_state(TASK_RUNNING); atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT); /* * Run task_work after scheduling and before io_should_wake(). * If we got woken because of task_work being processed, run it * now rather than let the caller do another wait loop. */ if (io_local_work_pending(ctx)) io_run_local_work(ctx, nr_wait, nr_wait); io_run_task_work(); /* * Non-local task_work will be run on exit to userspace, but * if we're using DEFER_TASKRUN, then we could have waited * with a timeout for a number of requests. If the timeout * hits, we could have some requests ready to process. Ensure * this break is _after_ we have run task_work, to avoid * deferring running potentially pending requests until the * next time we wait for events. */ if (ret < 0) break; check_cq = READ_ONCE(ctx->check_cq); if (unlikely(check_cq)) { /* let the caller flush overflows, retry */ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) io_cqring_do_overflow_flush(ctx); if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) { ret = -EBADR; break; } } if (io_should_wake(&iowq)) { ret = 0; break; } cond_resched(); } while (1); if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) finish_wait(&ctx->cq_wait, &iowq.wq); restore_saved_sigmask_unless(ret == -EINTR); return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; } static void io_rings_free(struct io_ring_ctx *ctx) { io_free_region(ctx->user, &ctx->sq_region); io_free_region(ctx->user, &ctx->ring_region); ctx->rings = NULL; ctx->sq_sqes = NULL; } static int rings_size(unsigned int flags, unsigned int sq_entries, unsigned int cq_entries, struct io_rings_layout *rl) { struct io_rings *rings; size_t sqe_size; size_t off; if (flags & IORING_SETUP_CQE_MIXED) { if (cq_entries < 2) return -EOVERFLOW; } if (flags & IORING_SETUP_SQE_MIXED) { if (sq_entries < 2) return -EOVERFLOW; } rl->sq_array_offset = SIZE_MAX; sqe_size = sizeof(struct io_uring_sqe); if (flags & IORING_SETUP_SQE128) sqe_size *= 2; rl->sq_size = array_size(sqe_size, sq_entries); if (rl->sq_size == SIZE_MAX) return -EOVERFLOW; off = struct_size(rings, cqes, cq_entries); if (flags & IORING_SETUP_CQE32) off = size_mul(off, 2); if (off == SIZE_MAX) return -EOVERFLOW; #ifdef CONFIG_SMP off = ALIGN(off, SMP_CACHE_BYTES); if (off == 0) return -EOVERFLOW; #endif if (!(flags & IORING_SETUP_NO_SQARRAY)) { size_t sq_array_size; rl->sq_array_offset = off; sq_array_size = array_size(sizeof(u32), sq_entries); off = size_add(off, sq_array_size); if (off == SIZE_MAX) return -EOVERFLOW; } rl->rings_size = off; return 0; } static __cold void __io_req_caches_free(struct io_ring_ctx *ctx) { struct io_kiocb *req; int nr = 0; while (!io_req_cache_empty(ctx)) { req = io_extract_req(ctx); io_poison_req(req); kmem_cache_free(req_cachep, req); nr++; } if (nr) { ctx->nr_req_allocated -= nr; percpu_ref_put_many(&ctx->refs, nr); } } static __cold void io_req_caches_free(struct io_ring_ctx *ctx) { guard(mutex)(&ctx->uring_lock); __io_req_caches_free(ctx); } static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_sq_thread_finish(ctx); mutex_lock(&ctx->uring_lock); io_sqe_buffers_unregister(ctx); io_sqe_files_unregister(ctx); io_unregister_zcrx_ifqs(ctx); io_cqring_overflow_kill(ctx); io_eventfd_unregister(ctx); io_free_alloc_caches(ctx); io_destroy_buffers(ctx); io_free_region(ctx->user, &ctx->param_region); mutex_unlock(&ctx->uring_lock); if (ctx->sq_creds) put_cred(ctx->sq_creds); if (ctx->submitter_task) put_task_struct(ctx->submitter_task); WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); if (ctx->mm_account) { mmdrop(ctx->mm_account); ctx->mm_account = NULL; } io_rings_free(ctx); if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) static_branch_dec(&io_key_has_sqarray); percpu_ref_exit(&ctx->refs); free_uid(ctx->user); io_req_caches_free(ctx); WARN_ON_ONCE(ctx->nr_req_allocated); if (ctx->hash_map) io_wq_put_hash(ctx->hash_map); io_napi_free(ctx); kvfree(ctx->cancel_table.hbs); xa_destroy(&ctx->io_bl_xa); kfree(ctx); } static __cold void io_activate_pollwq_cb(struct callback_head *cb) { struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, poll_wq_task_work); mutex_lock(&ctx->uring_lock); ctx->poll_activated = true; mutex_unlock(&ctx->uring_lock); /* * Wake ups for some events between start of polling and activation * might've been lost due to loose synchronisation. */ wake_up_all(&ctx->poll_wq); percpu_ref_put(&ctx->refs); } __cold void io_activate_pollwq(struct io_ring_ctx *ctx) { spin_lock(&ctx->completion_lock); /* already activated or in progress */ if (ctx->poll_activated || ctx->poll_wq_task_work.func) goto out; if (WARN_ON_ONCE(!ctx->task_complete)) goto out; if (!ctx->submitter_task) goto out; /* * with ->submitter_task only the submitter task completes requests, we * only need to sync with it, which is done by injecting a tw */ init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb); percpu_ref_get(&ctx->refs); if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL)) percpu_ref_put(&ctx->refs); out: spin_unlock(&ctx->completion_lock); } static __poll_t io_uring_poll(struct file *file, poll_table *wait) { struct io_ring_ctx *ctx = file->private_data; __poll_t mask = 0; if (unlikely(!ctx->poll_activated)) io_activate_pollwq(ctx); /* * provides mb() which pairs with barrier from wq_has_sleeper * call in io_commit_cqring */ poll_wait(file, &ctx->poll_wq, wait); if (!io_sqring_full(ctx)) mask |= EPOLLOUT | EPOLLWRNORM; /* * Don't flush cqring overflow list here, just do a simple check. * Otherwise there could possible be ABBA deadlock: * CPU0 CPU1 * ---- ---- * lock(&ctx->uring_lock); * lock(&ep->mtx); * lock(&ctx->uring_lock); * lock(&ep->mtx); * * Users may get EPOLLIN meanwhile seeing nothing in cqring, this * pushes them to do the flush. */ if (__io_cqring_events_user(ctx) || io_has_work(ctx)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } struct io_tctx_exit { struct callback_head task_work; struct completion completion; struct io_ring_ctx *ctx; }; static __cold void io_tctx_exit_cb(struct callback_head *cb) { struct io_uring_task *tctx = current->io_uring; struct io_tctx_exit *work; work = container_of(cb, struct io_tctx_exit, task_work); /* * When @in_cancel, we're in cancellation and it's racy to remove the * node. It'll be removed by the end of cancellation, just ignore it. * tctx can be NULL if the queueing of this task_work raced with * work cancelation off the exec path. */ if (tctx && !atomic_read(&tctx->in_cancel)) io_uring_del_tctx_node((unsigned long)work->ctx); complete(&work->completion); } static __cold void io_ring_exit_work(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); unsigned long timeout = jiffies + HZ * 60 * 5; unsigned long interval = HZ / 20; struct io_tctx_exit exit; struct io_tctx_node *node; int ret; /* * If we're doing polled IO and end up having requests being * submitted async (out-of-line), then completions can come in while * we're waiting for refs to drop. We need to reap these manually, * as nobody else will be looking for them. */ do { if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { mutex_lock(&ctx->uring_lock); io_cqring_overflow_kill(ctx); mutex_unlock(&ctx->uring_lock); } if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) io_move_task_work_from_local(ctx); /* The SQPOLL thread never reaches this path */ while (io_uring_try_cancel_requests(ctx, NULL, true, false)) cond_resched(); if (ctx->sq_data) { struct io_sq_data *sqd = ctx->sq_data; struct task_struct *tsk; io_sq_thread_park(sqd); tsk = sqpoll_task_locked(sqd); if (tsk && tsk->io_uring && tsk->io_uring->io_wq) io_wq_cancel_cb(tsk->io_uring->io_wq, io_cancel_ctx_cb, ctx, true); io_sq_thread_unpark(sqd); } io_req_caches_free(ctx); if (WARN_ON_ONCE(time_after(jiffies, timeout))) { /* there is little hope left, don't run it too often */ interval = HZ * 60; } /* * This is really an uninterruptible wait, as it has to be * complete. But it's also run from a kworker, which doesn't * take signals, so it's fine to make it interruptible. This * avoids scenarios where we knowingly can wait much longer * on completions, for example if someone does a SIGSTOP on * a task that needs to finish task_work to make this loop * complete. That's a synthetic situation that should not * cause a stuck task backtrace, and hence a potential panic * on stuck tasks if that is enabled. */ } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval)); init_completion(&exit.completion); init_task_work(&exit.task_work, io_tctx_exit_cb); exit.ctx = ctx; mutex_lock(&ctx->uring_lock); while (!list_empty(&ctx->tctx_list)) { WARN_ON_ONCE(time_after(jiffies, timeout)); node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, ctx_node); /* don't spin on a single task if cancellation failed */ list_rotate_left(&ctx->tctx_list); ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); if (WARN_ON_ONCE(ret)) continue; mutex_unlock(&ctx->uring_lock); /* * See comment above for * wait_for_completion_interruptible_timeout() on why this * wait is marked as interruptible. */ wait_for_completion_interruptible(&exit.completion); mutex_lock(&ctx->uring_lock); } mutex_unlock(&ctx->uring_lock); spin_lock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock); /* pairs with RCU read section in io_req_local_work_add() */ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) synchronize_rcu(); io_ring_ctx_free(ctx); } static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { unsigned long index; struct creds *creds; mutex_lock(&ctx->uring_lock); percpu_ref_kill(&ctx->refs); xa_for_each(&ctx->personalities, index, creds) io_unregister_personality(ctx, index); mutex_unlock(&ctx->uring_lock); flush_delayed_work(&ctx->fallback_work); INIT_WORK(&ctx->exit_work, io_ring_exit_work); /* * Use system_dfl_wq to avoid spawning tons of event kworkers * if we're exiting a ton of rings at the same time. It just adds * noise and overhead, there's no discernable change in runtime * over using system_percpu_wq. */ queue_work(iou_wq, &ctx->exit_work); } static int io_uring_release(struct inode *inode, struct file *file) { struct io_ring_ctx *ctx = file->private_data; file->private_data = NULL; io_ring_ctx_wait_and_kill(ctx); return 0; } static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx, const struct io_uring_getevents_arg __user *uarg) { unsigned long size = sizeof(struct io_uring_reg_wait); unsigned long offset = (uintptr_t)uarg; unsigned long end; if (unlikely(offset % sizeof(long))) return ERR_PTR(-EFAULT); /* also protects from NULL ->cq_wait_arg as the size would be 0 */ if (unlikely(check_add_overflow(offset, size, &end) || end > ctx->cq_wait_size)) return ERR_PTR(-EFAULT); offset = array_index_nospec(offset, ctx->cq_wait_size - size); return ctx->cq_wait_arg + offset; } static int io_validate_ext_arg(struct io_ring_ctx *ctx, unsigned flags, const void __user *argp, size_t argsz) { struct io_uring_getevents_arg arg; if (!(flags & IORING_ENTER_EXT_ARG)) return 0; if (flags & IORING_ENTER_EXT_ARG_REG) return -EINVAL; if (argsz != sizeof(arg)) return -EINVAL; if (copy_from_user(&arg, argp, sizeof(arg))) return -EFAULT; return 0; } static int io_get_ext_arg(struct io_ring_ctx *ctx, unsigned flags, const void __user *argp, struct ext_arg *ext_arg) { const struct io_uring_getevents_arg __user *uarg = argp; struct io_uring_getevents_arg arg; ext_arg->iowait = !(flags & IORING_ENTER_NO_IOWAIT); /* * If EXT_ARG isn't set, then we have no timespec and the argp pointer * is just a pointer to the sigset_t. */ if (!(flags & IORING_ENTER_EXT_ARG)) { ext_arg->sig = (const sigset_t __user *) argp; return 0; } if (flags & IORING_ENTER_EXT_ARG_REG) { struct io_uring_reg_wait *w; if (ext_arg->argsz != sizeof(struct io_uring_reg_wait)) return -EINVAL; w = io_get_ext_arg_reg(ctx, argp); if (IS_ERR(w)) return PTR_ERR(w); if (w->flags & ~IORING_REG_WAIT_TS) return -EINVAL; ext_arg->min_time = READ_ONCE(w->min_wait_usec) * NSEC_PER_USEC; ext_arg->sig = u64_to_user_ptr(READ_ONCE(w->sigmask)); ext_arg->argsz = READ_ONCE(w->sigmask_sz); if (w->flags & IORING_REG_WAIT_TS) { ext_arg->ts.tv_sec = READ_ONCE(w->ts.tv_sec); ext_arg->ts.tv_nsec = READ_ONCE(w->ts.tv_nsec); ext_arg->ts_set = true; } return 0; } /* * EXT_ARG is set - ensure we agree on the size of it and copy in our * timespec and sigset_t pointers if good. */ if (ext_arg->argsz != sizeof(arg)) return -EINVAL; #ifdef CONFIG_64BIT if (!user_access_begin(uarg, sizeof(*uarg))) return -EFAULT; unsafe_get_user(arg.sigmask, &uarg->sigmask, uaccess_end); unsafe_get_user(arg.sigmask_sz, &uarg->sigmask_sz, uaccess_end); unsafe_get_user(arg.min_wait_usec, &uarg->min_wait_usec, uaccess_end); unsafe_get_user(arg.ts, &uarg->ts, uaccess_end); user_access_end(); #else if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; #endif ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC; ext_arg->sig = u64_to_user_ptr(arg.sigmask); ext_arg->argsz = arg.sigmask_sz; if (arg.ts) { if (get_timespec64(&ext_arg->ts, u64_to_user_ptr(arg.ts))) return -EFAULT; ext_arg->ts_set = true; } return 0; #ifdef CONFIG_64BIT uaccess_end: user_access_end(); return -EFAULT; #endif } SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, u32, min_complete, u32, flags, const void __user *, argp, size_t, argsz) { struct io_ring_ctx *ctx; struct file *file; long ret; if (unlikely(flags & ~IORING_ENTER_FLAGS)) return -EINVAL; /* * Ring fd has been registered via IORING_REGISTER_RING_FDS, we * need only dereference our task private array to find it. */ if (flags & IORING_ENTER_REGISTERED_RING) { struct io_uring_task *tctx = current->io_uring; if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) return -EINVAL; fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); file = tctx->registered_rings[fd]; if (unlikely(!file)) return -EBADF; } else { file = fget(fd); if (unlikely(!file)) return -EBADF; ret = -EOPNOTSUPP; if (unlikely(!io_is_uring_fops(file))) goto out; } ctx = file->private_data; ret = -EBADFD; if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) goto out; /* * For SQ polling, the thread will do all submissions and completions. * Just return the requested submit count, and wake the thread if * we were asked to. */ ret = 0; if (ctx->flags & IORING_SETUP_SQPOLL) { if (unlikely(ctx->sq_data->thread == NULL)) { ret = -EOWNERDEAD; goto out; } if (flags & IORING_ENTER_SQ_WAKEUP) wake_up(&ctx->sq_data->wait); if (flags & IORING_ENTER_SQ_WAIT) io_sqpoll_wait_sq(ctx); ret = to_submit; } else if (to_submit) { ret = io_uring_add_tctx_node(ctx); if (unlikely(ret)) goto out; mutex_lock(&ctx->uring_lock); ret = io_submit_sqes(ctx, to_submit); if (ret != to_submit) { mutex_unlock(&ctx->uring_lock); goto out; } if (flags & IORING_ENTER_GETEVENTS) { if (ctx->syscall_iopoll) goto iopoll_locked; /* * Ignore errors, we'll soon call io_cqring_wait() and * it should handle ownership problems if any. */ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) (void)io_run_local_work_locked(ctx, min_complete); } mutex_unlock(&ctx->uring_lock); } if (flags & IORING_ENTER_GETEVENTS) { int ret2; if (ctx->syscall_iopoll) { /* * We disallow the app entering submit/complete with * polling, but we still need to lock the ring to * prevent racing with polled issue that got punted to * a workqueue. */ mutex_lock(&ctx->uring_lock); iopoll_locked: ret2 = io_validate_ext_arg(ctx, flags, argp, argsz); if (likely(!ret2)) ret2 = io_iopoll_check(ctx, min_complete); mutex_unlock(&ctx->uring_lock); } else { struct ext_arg ext_arg = { .argsz = argsz }; ret2 = io_get_ext_arg(ctx, flags, argp, &ext_arg); if (likely(!ret2)) ret2 = io_cqring_wait(ctx, min_complete, flags, &ext_arg); } if (!ret) { ret = ret2; /* * EBADR indicates that one or more CQE were dropped. * Once the user has been informed we can clear the bit * as they are obviously ok with those drops. */ if (unlikely(ret2 == -EBADR)) clear_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); } } out: if (!(flags & IORING_ENTER_REGISTERED_RING)) fput(file); return ret; } static const struct file_operations io_uring_fops = { .release = io_uring_release, .mmap = io_uring_mmap, .get_unmapped_area = io_uring_get_unmapped_area, #ifndef CONFIG_MMU .mmap_capabilities = io_uring_nommu_mmap_capabilities, #endif .poll = io_uring_poll, #ifdef CONFIG_PROC_FS .show_fdinfo = io_uring_show_fdinfo, #endif }; bool io_is_uring_fops(struct file *file) { return file->f_op == &io_uring_fops; } static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, struct io_ctx_config *config) { struct io_uring_params *p = &config->p; struct io_rings_layout *rl = &config->layout; struct io_uring_region_desc rd; struct io_rings *rings; int ret; /* make sure these are sane, as we already accounted them */ ctx->sq_entries = p->sq_entries; ctx->cq_entries = p->cq_entries; memset(&rd, 0, sizeof(rd)); rd.size = PAGE_ALIGN(rl->rings_size); if (ctx->flags & IORING_SETUP_NO_MMAP) { rd.user_addr = p->cq_off.user_addr; rd.flags |= IORING_MEM_REGION_TYPE_USER; } ret = io_create_region(ctx, &ctx->ring_region, &rd, IORING_OFF_CQ_RING); if (ret) return ret; ctx->rings = rings = io_region_get_ptr(&ctx->ring_region); if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset); memset(&rd, 0, sizeof(rd)); rd.size = PAGE_ALIGN(rl->sq_size); if (ctx->flags & IORING_SETUP_NO_MMAP) { rd.user_addr = p->sq_off.user_addr; rd.flags |= IORING_MEM_REGION_TYPE_USER; } ret = io_create_region(ctx, &ctx->sq_region, &rd, IORING_OFF_SQES); if (ret) { io_rings_free(ctx); return ret; } ctx->sq_sqes = io_region_get_ptr(&ctx->sq_region); memset(rings, 0, sizeof(*rings)); WRITE_ONCE(rings->sq_ring_mask, ctx->sq_entries - 1); WRITE_ONCE(rings->cq_ring_mask, ctx->cq_entries - 1); WRITE_ONCE(rings->sq_ring_entries, ctx->sq_entries); WRITE_ONCE(rings->cq_ring_entries, ctx->cq_entries); return 0; } static int io_uring_install_fd(struct file *file) { int fd; fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); if (fd < 0) return fd; fd_install(fd, file); return fd; } /* * Allocate an anonymous fd, this is what constitutes the application * visible backing of an io_uring instance. The application mmaps this * fd to gain access to the SQ/CQ ring details. */ static struct file *io_uring_get_file(struct io_ring_ctx *ctx) { /* Create a new inode so that the LSM can block the creation. */ return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx, O_RDWR | O_CLOEXEC, NULL); } static int io_uring_sanitise_params(struct io_uring_params *p) { unsigned flags = p->flags; if (flags & ~IORING_SETUP_FLAGS) return -EINVAL; /* There is no way to mmap rings without a real fd */ if ((flags & IORING_SETUP_REGISTERED_FD_ONLY) && !(flags & IORING_SETUP_NO_MMAP)) return -EINVAL; if (flags & IORING_SETUP_SQPOLL) { /* IPI related flags don't make sense with SQPOLL */ if (flags & (IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | IORING_SETUP_DEFER_TASKRUN)) return -EINVAL; } if (flags & IORING_SETUP_TASKRUN_FLAG) { if (!(flags & (IORING_SETUP_COOP_TASKRUN | IORING_SETUP_DEFER_TASKRUN))) return -EINVAL; } /* HYBRID_IOPOLL only valid with IOPOLL */ if ((flags & IORING_SETUP_HYBRID_IOPOLL) && !(flags & IORING_SETUP_IOPOLL)) return -EINVAL; /* * For DEFER_TASKRUN we require the completion task to be the same as * the submission task. This implies that there is only one submitter. */ if ((flags & IORING_SETUP_DEFER_TASKRUN) && !(flags & IORING_SETUP_SINGLE_ISSUER)) return -EINVAL; /* * Nonsensical to ask for CQE32 and mixed CQE support, it's not * supported to post 16b CQEs on a ring setup with CQE32. */ if ((flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)) == (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)) return -EINVAL; /* * Nonsensical to ask for SQE128 and mixed SQE support, it's not * supported to post 64b SQEs on a ring setup with SQE128. */ if ((flags & (IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED)) == (IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED)) return -EINVAL; return 0; } static int io_uring_fill_params(struct io_uring_params *p) { unsigned entries = p->sq_entries; if (!entries) return -EINVAL; if (entries > IORING_MAX_ENTRIES) { if (!(p->flags & IORING_SETUP_CLAMP)) return -EINVAL; entries = IORING_MAX_ENTRIES; } /* * Use twice as many entries for the CQ ring. It's possible for the * application to drive a higher depth than the size of the SQ ring, * since the sqes are only used at submission time. This allows for * some flexibility in overcommitting a bit. If the application has * set IORING_SETUP_CQSIZE, it will have passed in the desired number * of CQ ring entries manually. */ p->sq_entries = roundup_pow_of_two(entries); if (p->flags & IORING_SETUP_CQSIZE) { /* * If IORING_SETUP_CQSIZE is set, we do the same roundup * to a power-of-two, if it isn't already. We do NOT impose * any cq vs sq ring sizing. */ if (!p->cq_entries) return -EINVAL; if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { if (!(p->flags & IORING_SETUP_CLAMP)) return -EINVAL; p->cq_entries = IORING_MAX_CQ_ENTRIES; } p->cq_entries = roundup_pow_of_two(p->cq_entries); if (p->cq_entries < p->sq_entries) return -EINVAL; } else { p->cq_entries = 2 * p->sq_entries; } return 0; } int io_prepare_config(struct io_ctx_config *config) { struct io_uring_params *p = &config->p; int ret; ret = io_uring_sanitise_params(p); if (ret) return ret; ret = io_uring_fill_params(p); if (ret) return ret; ret = rings_size(p->flags, p->sq_entries, p->cq_entries, &config->layout); if (ret) return ret; p->sq_off.head = offsetof(struct io_rings, sq.head); p->sq_off.tail = offsetof(struct io_rings, sq.tail); p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); p->sq_off.flags = offsetof(struct io_rings, sq_flags); p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); p->sq_off.resv1 = 0; if (!(p->flags & IORING_SETUP_NO_MMAP)) p->sq_off.user_addr = 0; p->cq_off.head = offsetof(struct io_rings, cq.head); p->cq_off.tail = offsetof(struct io_rings, cq.tail); p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); p->cq_off.cqes = offsetof(struct io_rings, cqes); p->cq_off.flags = offsetof(struct io_rings, cq_flags); p->cq_off.resv1 = 0; if (!(p->flags & IORING_SETUP_NO_MMAP)) p->cq_off.user_addr = 0; if (!(p->flags & IORING_SETUP_NO_SQARRAY)) p->sq_off.array = config->layout.sq_array_offset; return 0; } static __cold int io_uring_create(struct io_ctx_config *config) { struct io_uring_params *p = &config->p; struct io_ring_ctx *ctx; struct io_uring_task *tctx; struct file *file; int ret; ret = io_prepare_config(config); if (ret) return ret; ctx = io_ring_ctx_alloc(p); if (!ctx) return -ENOMEM; ctx->clockid = CLOCK_MONOTONIC; ctx->clock_offset = 0; if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) static_branch_inc(&io_key_has_sqarray); if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && !(ctx->flags & IORING_SETUP_IOPOLL) && !(ctx->flags & IORING_SETUP_SQPOLL)) ctx->task_complete = true; if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) ctx->lockless_cq = true; /* * lazy poll_wq activation relies on ->task_complete for synchronisation * purposes, see io_activate_pollwq() */ if (!ctx->task_complete) ctx->poll_activated = true; /* * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user * space applications don't need to do io completion events * polling again, they can rely on io_sq_thread to do polling * work, which can reduce cpu usage and uring_lock contention. */ if (ctx->flags & IORING_SETUP_IOPOLL && !(ctx->flags & IORING_SETUP_SQPOLL)) ctx->syscall_iopoll = 1; ctx->compat = in_compat_syscall(); if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK)) ctx->user = get_uid(current_user()); /* * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if * COOP_TASKRUN is set, then IPIs are never needed by the app. */ if (ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_COOP_TASKRUN)) ctx->notify_method = TWA_SIGNAL_NO_IPI; else ctx->notify_method = TWA_SIGNAL; /* * This is just grabbed for accounting purposes. When a process exits, * the mm is exited and dropped before the files, hence we need to hang * on to this mm purely for the purposes of being able to unaccount * memory (locked/pinned vm). It's not used for anything else. */ mmgrab(current->mm); ctx->mm_account = current->mm; ret = io_allocate_scq_urings(ctx, config); if (ret) goto err; ret = io_sq_offload_create(ctx, p); if (ret) goto err; p->features = IORING_FEAT_FLAGS; if (copy_to_user(config->uptr, p, sizeof(*p))) { ret = -EFAULT; goto err; } if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !(ctx->flags & IORING_SETUP_R_DISABLED)) { /* * Unlike io_register_enable_rings(), don't need WRITE_ONCE() * since ctx isn't yet accessible from other tasks */ ctx->submitter_task = get_task_struct(current); } file = io_uring_get_file(ctx); if (IS_ERR(file)) { ret = PTR_ERR(file); goto err; } ret = __io_uring_add_tctx_node(ctx); if (ret) goto err_fput; tctx = current->io_uring; /* * Install ring fd as the very last thing, so we don't risk someone * having closed it before we finish setup */ if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY) ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX); else ret = io_uring_install_fd(file); if (ret < 0) goto err_fput; trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); return ret; err: io_ring_ctx_wait_and_kill(ctx); return ret; err_fput: fput(file); return ret; } /* * Sets up an aio uring context, and returns the fd. Applications asks for a * ring size, we return the actual sq/cq ring sizes (among other things) in the * params structure passed in. */ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) { struct io_ctx_config config; memset(&config, 0, sizeof(config)); if (copy_from_user(&config.p, params, sizeof(config.p))) return -EFAULT; if (!mem_is_zero(&config.p.resv, sizeof(config.p.resv))) return -EINVAL; config.p.sq_entries = entries; config.uptr = params; return io_uring_create(&config); } static inline int io_uring_allowed(void) { int disabled = READ_ONCE(sysctl_io_uring_disabled); kgid_t io_uring_group; if (disabled == 2) return -EPERM; if (disabled == 0 || capable(CAP_SYS_ADMIN)) goto allowed_lsm; io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group); if (!gid_valid(io_uring_group)) return -EPERM; if (!in_group_p(io_uring_group)) return -EPERM; allowed_lsm: return security_uring_allowed(); } SYSCALL_DEFINE2(io_uring_setup, u32, entries, struct io_uring_params __user *, params) { int ret; ret = io_uring_allowed(); if (ret) return ret; return io_uring_setup(entries, params); } static int __init io_uring_init(void) { struct kmem_cache_args kmem_args = { .useroffset = offsetof(struct io_kiocb, cmd.data), .usersize = sizeof_field(struct io_kiocb, cmd.data), .freeptr_offset = offsetof(struct io_kiocb, work), .use_freeptr_offset = true, }; #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ } while (0) #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename) #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \ __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename) BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); BUILD_BUG_SQE_ELEM(0, __u8, opcode); BUILD_BUG_SQE_ELEM(1, __u8, flags); BUILD_BUG_SQE_ELEM(2, __u16, ioprio); BUILD_BUG_SQE_ELEM(4, __s32, fd); BUILD_BUG_SQE_ELEM(8, __u64, off); BUILD_BUG_SQE_ELEM(8, __u64, addr2); BUILD_BUG_SQE_ELEM(8, __u32, cmd_op); BUILD_BUG_SQE_ELEM(12, __u32, __pad1); BUILD_BUG_SQE_ELEM(16, __u64, addr); BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); BUILD_BUG_SQE_ELEM(24, __u32, len); BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); BUILD_BUG_SQE_ELEM(28, __u32, open_flags); BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); BUILD_BUG_SQE_ELEM(28, __u32, rename_flags); BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags); BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags); BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags); BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags); BUILD_BUG_SQE_ELEM(32, __u64, user_data); BUILD_BUG_SQE_ELEM(40, __u16, buf_index); BUILD_BUG_SQE_ELEM(40, __u16, buf_group); BUILD_BUG_SQE_ELEM(42, __u16, personality); BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); BUILD_BUG_SQE_ELEM(44, __u32, file_index); BUILD_BUG_SQE_ELEM(44, __u16, addr_len); BUILD_BUG_SQE_ELEM(44, __u8, write_stream); BUILD_BUG_SQE_ELEM(45, __u8, __pad4[0]); BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); BUILD_BUG_SQE_ELEM(48, __u64, addr3); BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); BUILD_BUG_SQE_ELEM(48, __u64, attr_ptr); BUILD_BUG_SQE_ELEM(56, __u64, attr_type_mask); BUILD_BUG_SQE_ELEM(56, __u64, __pad2); BUILD_BUG_ON(sizeof(struct io_uring_files_update) != sizeof(struct io_uring_rsrc_update)); BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > sizeof(struct io_uring_rsrc_update2)); /* ->buf_index is u16 */ BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != offsetof(struct io_uring_buf_ring, tail)); /* should fit into one byte */ BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags)); BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); /* top 8bits are for internal use */ BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0); io_uring_optable_init(); /* imu->dir is u8 */ BUILD_BUG_ON((IO_IMU_DEST | IO_IMU_SOURCE) > U8_MAX); /* * Allow user copy in the per-command field, which starts after the * file in io_kiocb and until the opcode field. The openat2 handling * requires copying in user memory into the io_kiocb object in that * range, and HARDENED_USERCOPY will complain if we haven't * correctly annotated this range. */ req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args, SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU); iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64); BUG_ON(!iou_wq); #ifdef CONFIG_SYSCTL register_sysctl_init("kernel", kernel_io_uring_disabled_table); #endif return 0; }; __initcall(io_uring_init);
7 63 26 12 259 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CGROUP_INTERNAL_H #define __CGROUP_INTERNAL_H #include <linux/cgroup.h> #include <linux/kernfs.h> #include <linux/workqueue.h> #include <linux/list.h> #include <linux/refcount.h> #include <linux/fs_parser.h> #define TRACE_CGROUP_PATH_LEN 1024 extern spinlock_t trace_cgroup_path_lock; extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; extern void __init enable_debug_cgroup(void); /* * cgroup_path() takes a spin lock. It is good practice not to take * spin locks within trace point handlers, as they are mostly hidden * from normal view. As cgroup_path() can take the kernfs_rename_lock * spin lock, it is best to not call that function from the trace event * handler. * * Note: trace_cgroup_##type##_enabled() is a static branch that will only * be set when the trace event is enabled. */ #define TRACE_CGROUP_PATH(type, cgrp, ...) \ do { \ if (trace_cgroup_##type##_enabled()) { \ unsigned long flags; \ spin_lock_irqsave(&trace_cgroup_path_lock, \ flags); \ cgroup_path(cgrp, trace_cgroup_path, \ TRACE_CGROUP_PATH_LEN); \ trace_cgroup_##type(cgrp, trace_cgroup_path, \ ##__VA_ARGS__); \ spin_unlock_irqrestore(&trace_cgroup_path_lock, \ flags); \ } \ } while (0) /* * The cgroup filesystem superblock creation/mount context. */ struct cgroup_fs_context { struct kernfs_fs_context kfc; struct cgroup_root *root; struct cgroup_namespace *ns; unsigned int flags; /* CGRP_ROOT_* flags */ /* cgroup1 bits */ bool cpuset_clone_children; bool none; /* User explicitly requested empty subsystem */ bool all_ss; /* Seen 'all' option */ u16 subsys_mask; /* Selected subsystems */ char *name; /* Hierarchy name */ char *release_agent; /* Path for release notifications */ }; static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; return container_of(kfc, struct cgroup_fs_context, kfc); } struct cgroup_pidlist; struct cgroup_file_ctx { struct cgroup_namespace *ns; struct { void *trigger; } psi; struct { bool started; struct css_task_iter iter; } procs; struct { struct cgroup_pidlist *pidlist; } procs1; struct cgroup_of_peak peak; }; /* * A cgroup can be associated with multiple css_sets as different tasks may * belong to different cgroups on different hierarchies. In the other * direction, a css_set is naturally associated with multiple cgroups. * This M:N relationship is represented by the following link structure * which exists for each association and allows traversing the associations * from both sides. */ struct cgrp_cset_link { /* the cgroup and css_set this link associates */ struct cgroup *cgrp; struct css_set *cset; /* list of cgrp_cset_links anchored at cgrp->cset_links */ struct list_head cset_link; /* list of cgrp_cset_links anchored at css_set->cgrp_links */ struct list_head cgrp_link; }; /* used to track tasks and csets during migration */ struct cgroup_taskset { /* the src and dst cset list running through cset->mg_node */ struct list_head src_csets; struct list_head dst_csets; /* the number of tasks in the set */ int nr_tasks; /* the subsys currently being processed */ int ssid; /* * Fields for cgroup_taskset_*() iteration. * * Before migration is committed, the target migration tasks are on * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of * the csets on ->dst_csets. ->csets point to either ->src_csets * or ->dst_csets depending on whether migration is committed. * * ->cur_csets and ->cur_task point to the current task position * during iteration. */ struct list_head *csets; struct css_set *cur_cset; struct task_struct *cur_task; }; /* migration context also tracks preloading */ struct cgroup_mgctx { /* * Preloaded source and destination csets. Used to guarantee * atomic success or failure on actual migration. */ struct list_head preloaded_src_csets; struct list_head preloaded_dst_csets; /* tasks and csets to migrate */ struct cgroup_taskset tset; /* subsystems affected by migration */ u16 ss_mask; }; #define CGROUP_TASKSET_INIT(tset) \ { \ .src_csets = LIST_HEAD_INIT(tset.src_csets), \ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ .csets = &tset.src_csets, \ } #define CGROUP_MGCTX_INIT(name) \ { \ LIST_HEAD_INIT(name.preloaded_src_csets), \ LIST_HEAD_INIT(name.preloaded_dst_csets), \ CGROUP_TASKSET_INIT(name.tset), \ } #define DEFINE_CGROUP_MGCTX(name) \ struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name) extern struct cgroup_subsys *cgroup_subsys[]; extern struct list_head cgroup_roots; extern bool cgrp_dfl_visible; /* iterate across the hierarchies */ #define for_each_root(root) \ list_for_each_entry_rcu((root), &cgroup_roots, root_list, \ lockdep_is_held(&cgroup_mutex)) /** * for_each_subsys - iterate all enabled cgroup subsystems * @ss: the iteration cursor * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end */ #define for_each_subsys(ss, ssid) \ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) static inline bool cgroup_is_dead(const struct cgroup *cgrp) { return !(cgrp->self.flags & CSS_ONLINE); } static inline bool notify_on_release(const struct cgroup *cgrp) { return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); } void put_css_set_locked(struct css_set *cset); static inline void put_css_set(struct css_set *cset) { unsigned long flags; /* * Ensure that the refcount doesn't hit zero while any readers * can see it. Similar to atomic_dec_and_lock(), but for an * rwlock */ if (refcount_dec_not_one(&cset->refcount)) return; spin_lock_irqsave(&css_set_lock, flags); put_css_set_locked(cset); spin_unlock_irqrestore(&css_set_lock, flags); } /* * refcounted get/put for css_set objects */ static inline void get_css_set(struct css_set *cset) { refcount_inc(&cset->refcount); } bool cgroup_ssid_enabled(int ssid); bool cgroup_on_dfl(const struct cgroup *cgrp); struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root); struct cgroup *task_cgroup_from_root(struct task_struct *task, struct cgroup_root *root); struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline); void cgroup_kn_unlock(struct kernfs_node *kn); int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, struct cgroup_namespace *ns); void cgroup_favor_dynmods(struct cgroup_root *root, bool favor); void cgroup_free_root(struct cgroup_root *root); void init_cgroup_root(struct cgroup_fs_context *ctx); int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask); int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask); int cgroup_do_get_tree(struct fs_context *fc); int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp); void cgroup_migrate_finish(struct cgroup_mgctx *mgctx); void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp, struct cgroup_mgctx *mgctx); int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx); int cgroup_migrate(struct task_struct *leader, bool threadgroup, struct cgroup_mgctx *mgctx); int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, bool threadgroup); void cgroup_attach_lock(enum cgroup_attach_lock_mode lock_mode, struct task_struct *tsk); void cgroup_attach_unlock(enum cgroup_attach_lock_mode lock_mode, struct task_struct *tsk); struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, enum cgroup_attach_lock_mode *lock_mode) __acquires(&cgroup_threadgroup_rwsem); void cgroup_procs_write_finish(struct task_struct *task, enum cgroup_attach_lock_mode lock_mode) __releases(&cgroup_threadgroup_rwsem); void cgroup_lock_and_drain_offline(struct cgroup *cgrp); int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode); int cgroup_rmdir(struct kernfs_node *kn); int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, struct kernfs_root *kf_root); int __cgroup_task_count(const struct cgroup *cgrp); int cgroup_task_count(const struct cgroup *cgrp); /* * rstat.c */ int css_rstat_init(struct cgroup_subsys_state *css); void css_rstat_exit(struct cgroup_subsys_state *css); int ss_rstat_init(struct cgroup_subsys *ss); void cgroup_base_stat_cputime_show(struct seq_file *seq); /* * namespace.c */ extern const struct proc_ns_operations cgroupns_operations; /* * cgroup-v1.c */ extern struct cftype cgroup1_base_files[]; extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops; extern const struct fs_parameter_spec cgroup1_fs_parameters[]; int proc_cgroupstats_show(struct seq_file *m, void *v); bool cgroup1_ssid_disabled(int ssid); void cgroup1_pidlist_destroy_all(struct cgroup *cgrp); void cgroup1_release_agent(struct work_struct *work); void cgroup1_check_for_release(struct cgroup *cgrp); int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param); int cgroup1_get_tree(struct fs_context *fc); int cgroup1_reconfigure(struct fs_context *ctx); #endif /* __CGROUP_INTERNAL_H */
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 // SPDX-License-Identifier: GPL-2.0 /* * proc_tty.c -- handles /proc/tty * * Copyright 1997, Theodore Ts'o */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/tty.h> #include <linux/seq_file.h> #include <linux/bitops.h> #include "internal.h" /* * The /proc/tty directory inodes... */ static struct proc_dir_entry *proc_tty_driver; /* * This is the handler for /proc/tty/drivers */ static void show_tty_range(struct seq_file *m, struct tty_driver *p, dev_t from, int num) { seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown"); seq_printf(m, "/dev/%-8s ", p->name); if (p->num > 1) { seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from), MINOR(from) + num - 1); } else { seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from)); } switch (p->type) { case TTY_DRIVER_TYPE_SYSTEM: seq_puts(m, "system"); if (p->subtype == SYSTEM_TYPE_TTY) seq_puts(m, ":/dev/tty"); else if (p->subtype == SYSTEM_TYPE_SYSCONS) seq_puts(m, ":console"); else if (p->subtype == SYSTEM_TYPE_CONSOLE) seq_puts(m, ":vtmaster"); break; case TTY_DRIVER_TYPE_CONSOLE: seq_puts(m, "console"); break; case TTY_DRIVER_TYPE_SERIAL: seq_puts(m, "serial"); break; case TTY_DRIVER_TYPE_PTY: if (p->subtype == PTY_TYPE_MASTER) seq_puts(m, "pty:master"); else if (p->subtype == PTY_TYPE_SLAVE) seq_puts(m, "pty:slave"); else seq_puts(m, "pty"); break; default: seq_printf(m, "type:%d.%d", p->type, p->subtype); } seq_putc(m, '\n'); } static int show_tty_driver(struct seq_file *m, void *v) { struct tty_driver *p = list_entry(v, struct tty_driver, tty_drivers); dev_t from = MKDEV(p->major, p->minor_start); dev_t to = from + p->num; if (&p->tty_drivers == tty_drivers.next) { /* pseudo-drivers first */ seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0); seq_puts(m, "system:/dev/tty\n"); seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1); seq_puts(m, "system:console\n"); #ifdef CONFIG_UNIX98_PTYS seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2); seq_puts(m, "system\n"); #endif #ifdef CONFIG_VT seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0"); seq_printf(m, "%3d %7d ", TTY_MAJOR, 0); seq_puts(m, "system:vtmaster\n"); #endif } while (MAJOR(from) < MAJOR(to)) { dev_t next = MKDEV(MAJOR(from)+1, 0); show_tty_range(m, p, from, next - from); from = next; } if (from != to) show_tty_range(m, p, from, to - from); return 0; } /* iterator */ static void *t_start(struct seq_file *m, loff_t *pos) { mutex_lock(&tty_mutex); return seq_list_start(&tty_drivers, *pos); } static void *t_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &tty_drivers, pos); } static void t_stop(struct seq_file *m, void *v) { mutex_unlock(&tty_mutex); } static const struct seq_operations tty_drivers_op = { .start = t_start, .next = t_next, .stop = t_stop, .show = show_tty_driver }; /* * This function is called by tty_register_driver() to handle * registering the driver's /proc handler into /proc/tty/driver/<foo> */ void proc_tty_register_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; if (!driver->driver_name || driver->proc_entry || !driver->ops->proc_show) return; ent = proc_create_single_data(driver->driver_name, 0, proc_tty_driver, driver->ops->proc_show, driver); driver->proc_entry = ent; } /* * This function is called by tty_unregister_driver() */ void proc_tty_unregister_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; ent = driver->proc_entry; if (!ent) return; remove_proc_entry(ent->name, proc_tty_driver); driver->proc_entry = NULL; } /* * Called by proc_root_init() to initialize the /proc/tty subtree */ void __init proc_tty_init(void) { if (!proc_mkdir("tty", NULL)) return; proc_mkdir("tty/ldisc", NULL); /* Preserved: it's userspace visible */ /* * /proc/tty/driver/serial reveals the exact character counts for * serial links which is just too easy to abuse for inferring * password lengths and inter-keystroke timings during password * entry. */ proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL); proc_create_seq("tty/ldiscs", 0, NULL, &tty_ldiscs_seq_ops); proc_create_seq("tty/drivers", 0, NULL, &tty_drivers_op); }
35 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PATH_H #define _LINUX_PATH_H struct dentry; struct vfsmount; struct path { struct vfsmount *mnt; struct dentry *dentry; } __randomize_layout; extern void path_get(const struct path *); extern void path_put(const struct path *); static inline int path_equal(const struct path *path1, const struct path *path2) { return path1->mnt == path2->mnt && path1->dentry == path2->dentry; } /* * Cleanup macro for use with __free(path_put). Avoids dereference and * copying @path unlike DEFINE_FREE(). path_put() will handle the empty * path correctly just ensure @path is initialized: * * struct path path __free(path_put) = {}; */ #define __free_path_put path_put #endif /* _LINUX_PATH_H */
18 227 6743 1593 1805 1386 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Descending-priority-sorted double-linked list * * (C) 2002-2003 Intel Corp * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. * * 2001-2005 (c) MontaVista Software, Inc. * Daniel Walker <dwalker@mvista.com> * * (C) 2005 Thomas Gleixner <tglx@linutronix.de> * * Simplifications of the original code by * Oleg Nesterov <oleg@tv-sign.ru> * * Based on simple lists (include/linux/list.h). * * This is a priority-sorted list of nodes; each node has a * priority from INT_MIN (highest) to INT_MAX (lowest). * * Addition is O(K), removal is O(1), change of priority of a node is * O(K) and K is the number of RT priority levels used in the system. * (1 <= K <= 99) * * This list is really a list of lists: * * - The tier 1 list is the prio_list, different priority nodes. * * - The tier 2 list is the node_list, serialized nodes. * * Simple ASCII art explanation: * * pl:prio_list (only for plist_node) * nl:node_list * HEAD| NODE(S) * | * ||------------------------------------| * ||->|pl|<->|pl|<--------------->|pl|<-| * | |10| |21| |21| |21| |40| (prio) * | | | | | | | | | | | * | | | | | | | | | | | * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| * |-------------------------------------------| * * The nodes on the prio_list list are sorted by priority to simplify * the insertion of new nodes. There are no nodes with duplicate * priorites on the list. * * The nodes on the node_list are ordered by priority and can contain * entries which have the same priority. Those entries are ordered * FIFO * * Addition means: look for the prio_list node in the prio_list * for the priority of the node and insert it before the node_list * entry of the next prio_list node. If it is the first node of * that priority, add it to the prio_list in the right position and * insert it into the serialized node_list list * * Removal means remove it from the node_list and remove it from * the prio_list if the node_list list_head is non empty. In case * of removal from the prio_list it must be checked whether other * entries of the same priority are on the list or not. If there * is another entry of the same priority then this entry has to * replace the removed entry on the prio_list. If the entry which * is removed is the only entry of this priority then a simple * remove from both list is sufficient. * * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX * is lowest priority. * * No locking is done, up to the caller. */ #ifndef _LINUX_PLIST_H_ #define _LINUX_PLIST_H_ #include <linux/container_of.h> #include <linux/list.h> #include <linux/plist_types.h> #include <asm/bug.h> /** * PLIST_HEAD_INIT - static struct plist_head initializer * @head: struct plist_head variable name */ #define PLIST_HEAD_INIT(head) \ { \ .node_list = LIST_HEAD_INIT((head).node_list) \ } /** * PLIST_HEAD - declare and init plist_head * @head: name for struct plist_head variable */ #define PLIST_HEAD(head) \ struct plist_head head = PLIST_HEAD_INIT(head) /** * PLIST_NODE_INIT - static struct plist_node initializer * @node: struct plist_node variable name * @__prio: initial node priority */ #define PLIST_NODE_INIT(node, __prio) \ { \ .prio = (__prio), \ .prio_list = LIST_HEAD_INIT((node).prio_list), \ .node_list = LIST_HEAD_INIT((node).node_list), \ } /** * plist_head_init - dynamic struct plist_head initializer * @head: &struct plist_head pointer */ static inline void plist_head_init(struct plist_head *head) { INIT_LIST_HEAD(&head->node_list); } /** * plist_node_init - Dynamic struct plist_node initializer * @node: &struct plist_node pointer * @prio: initial node priority */ static inline void plist_node_init(struct plist_node *node, int prio) { node->prio = prio; INIT_LIST_HEAD(&node->prio_list); INIT_LIST_HEAD(&node->node_list); } extern void plist_add(struct plist_node *node, struct plist_head *head); extern void plist_del(struct plist_node *node, struct plist_head *head); extern void plist_requeue(struct plist_node *node, struct plist_head *head); /** * plist_for_each - iterate over the plist * @pos: the type * to use as a loop counter * @head: the head for your list */ #define plist_for_each(pos, head) \ list_for_each_entry(pos, &(head)->node_list, node_list) /** * plist_for_each_continue - continue iteration over the plist * @pos: the type * to use as a loop cursor * @head: the head for your list * * Continue to iterate over plist, continuing after the current position. */ #define plist_for_each_continue(pos, head) \ list_for_each_entry_continue(pos, &(head)->node_list, node_list) /** * plist_for_each_safe - iterate safely over a plist of given type * @pos: the type * to use as a loop counter * @n: another type * to use as temporary storage * @head: the head for your list * * Iterate over a plist of given type, safe against removal of list entry. */ #define plist_for_each_safe(pos, n, head) \ list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) /** * plist_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop counter * @head: the head for your list * @mem: the name of the list_head within the struct */ #define plist_for_each_entry(pos, head, mem) \ list_for_each_entry(pos, &(head)->node_list, mem.node_list) /** * plist_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor * @head: the head for your list * @m: the name of the list_head within the struct * * Continue to iterate over list of given type, continuing after * the current position. */ #define plist_for_each_entry_continue(pos, head, m) \ list_for_each_entry_continue(pos, &(head)->node_list, m.node_list) /** * plist_for_each_entry_safe - iterate safely over list of given type * @pos: the type * to use as a loop counter * @n: another type * to use as temporary storage * @head: the head for your list * @m: the name of the list_head within the struct * * Iterate over list of given type, safe against removal of list entry. */ #define plist_for_each_entry_safe(pos, n, head, m) \ list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) /** * plist_head_empty - return !0 if a plist_head is empty * @head: &struct plist_head pointer */ static inline int plist_head_empty(const struct plist_head *head) { return list_empty(&head->node_list); } /** * plist_node_empty - return !0 if plist_node is not on a list * @node: &struct plist_node pointer */ static inline int plist_node_empty(const struct plist_node *node) { return list_empty(&node->node_list); } /* All functions below assume the plist_head is not empty. */ /** * plist_first_entry - get the struct for the first entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PLIST # define plist_first_entry(head, type, member) \ ({ \ WARN_ON(plist_head_empty(head)); \ container_of(plist_first(head), type, member); \ }) #else # define plist_first_entry(head, type, member) \ container_of(plist_first(head), type, member) #endif /** * plist_last_entry - get the struct for the last entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PLIST # define plist_last_entry(head, type, member) \ ({ \ WARN_ON(plist_head_empty(head)); \ container_of(plist_last(head), type, member); \ }) #else # define plist_last_entry(head, type, member) \ container_of(plist_last(head), type, member) #endif /** * plist_next - get the next entry in list * @pos: the type * to cursor */ #define plist_next(pos) \ list_next_entry(pos, node_list) /** * plist_prev - get the prev entry in list * @pos: the type * to cursor */ #define plist_prev(pos) \ list_prev_entry(pos, node_list) /** * plist_first - return the first node (and thus, highest priority) * @head: the &struct plist_head pointer * * Assumes the plist is _not_ empty. */ static inline struct plist_node *plist_first(const struct plist_head *head) { return list_entry(head->node_list.next, struct plist_node, node_list); } /** * plist_last - return the last node (and thus, lowest priority) * @head: the &struct plist_head pointer * * Assumes the plist is _not_ empty. */ static inline struct plist_node *plist_last(const struct plist_head *head) { return list_entry(head->node_list.prev, struct plist_node, node_list); } #endif
1232 1 1 1415 1415 1286 6664 6228 127 140 1066 1228 1228 1228 1228 2862 6303 1387 800 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAP_H #define _LINUX_SWAP_H #include <linux/spinlock.h> #include <linux/linkage.h> #include <linux/mmzone.h> #include <linux/list.h> #include <linux/memcontrol.h> #include <linux/sched.h> #include <linux/node.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/page-flags.h> #include <uapi/linux/mempolicy.h> #include <asm/page.h> struct notifier_block; struct bio; struct pagevec; #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ SWAP_FLAG_DISCARD_PAGES) #define SWAP_BATCH 64 static inline int current_is_kswapd(void) { return current->flags & PF_KSWAPD; } /* * MAX_SWAPFILES defines the maximum number of swaptypes: things which can * be swapped to. The swap type and the offset into that swap type are * encoded into pte's and into pgoff_t's in the swapcache. Using five bits * for the type means that the maximum number of swapcache pages is 27 bits * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs * the type/offset into the pte as 5/27 as well. */ #define MAX_SWAPFILES_SHIFT 5 /* * Use some of the swap files numbers for other purposes. This * is a convenient way to hook into the VM to trigger special * actions on faults. */ /* * PTE markers are used to persist information onto PTEs that otherwise * should be a none pte. As its name "PTE" hints, it should only be * applied to the leaves of pgtables. */ #define SWP_PTE_MARKER_NUM 1 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ SWP_MIGRATION_NUM + SWP_DEVICE_NUM) /* * Unaddressable device memory support. See include/linux/hmm.h and * Documentation/mm/hmm.rst. Short description is we need struct pages for * device memory that is unaddressable (inaccessible) by CPU, so that we can * migrate part of a process memory to device memory. * * When a page is migrated from CPU to device, we set the CPU page table entry * to a special SWP_DEVICE_{READ|WRITE} entry. * * When a page is mapped by the device for exclusive access we set the CPU page * table entries to a special SWP_DEVICE_EXCLUSIVE entry. */ #ifdef CONFIG_DEVICE_PRIVATE #define SWP_DEVICE_NUM 3 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) #define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) #else #define SWP_DEVICE_NUM 0 #endif /* * Page migration support. * * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and * indicates that the referenced (part of) an anonymous page is exclusive to * a single process. For SWP_MIGRATION_WRITE, that information is implicit: * (part of) an anonymous page that are mapped writable are exclusive to a * single process. */ #ifdef CONFIG_MIGRATION #define SWP_MIGRATION_NUM 3 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) #else #define SWP_MIGRATION_NUM 0 #endif /* * Handling of hardware poisoned pages with memory corruption. */ #ifdef CONFIG_MEMORY_FAILURE #define SWP_HWPOISON_NUM 1 #define SWP_HWPOISON MAX_SWAPFILES #else #define SWP_HWPOISON_NUM 0 #endif #define MAX_SWAPFILES \ ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ SWP_PTE_MARKER_NUM) /* * Magic header for a swap area. The first part of the union is * what the swap magic looks like for the old (limited to 128MB) * swap area format, the second part of the union adds - in the * old reserved area - some extra information. Note that the first * kilobyte is reserved for boot loader or disk label stuff... * * Having the magic at the end of the PAGE_SIZE makes detecting swap * areas somewhat tricky on machines that support multiple page sizes. * For 2.5 we'll probably want to move the magic to just beyond the * bootbits... */ union swap_header { struct { char reserved[PAGE_SIZE - 10]; char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ } magic; struct { char bootbits[1024]; /* Space for disklabel etc. */ __u32 version; __u32 last_page; __u32 nr_badpages; unsigned char sws_uuid[16]; unsigned char sws_volume[16]; __u32 padding[117]; __u32 badpages[1]; } info; }; /* * current->reclaim_state points to one of these when a task is running * memory reclaim */ struct reclaim_state { /* pages reclaimed outside of LRU-based reclaim */ unsigned long reclaimed; #ifdef CONFIG_LRU_GEN /* per-thread mm walk data */ struct lru_gen_mm_walk *mm_walk; #endif }; /* * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based * reclaim * @pages: number of pages reclaimed * * If the current process is undergoing a reclaim operation, increment the * number of reclaimed pages by @pages. */ static inline void mm_account_reclaimed_pages(unsigned long pages) { if (current->reclaim_state) current->reclaim_state->reclaimed += pages; } #ifdef __KERNEL__ struct address_space; struct sysinfo; struct writeback_control; struct zone; /* * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart * from setup, they're handled identically. * * We always assume that blocks are of size PAGE_SIZE. */ struct swap_extent { struct rb_node rb_node; pgoff_t start_page; pgoff_t nr_pages; sector_t start_block; }; /* * Max bad pages in the new format.. */ #define MAX_SWAP_BADPAGES \ ((offsetof(union swap_header, magic.magic) - \ offsetof(union swap_header, info.badpages)) / sizeof(int)) enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ SWP_BLKDEV = (1 << 6), /* its a block device */ SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ /* add others here before... */ }; #define SWAP_CLUSTER_MAX 32UL #define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10) #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX /* Bit flag in swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ /* Special value in first swap_map */ #define SWAP_MAP_MAX 0x3e /* Max count */ #define SWAP_MAP_BAD 0x3f /* Note page is bad */ #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ /* Special value in each swap_map continuation */ #define SWAP_CONT_MAX 0x7f /* Max count */ /* * The first page in the swap file is the swap header, which is always marked * bad to prevent it from being allocated as an entry. This also prevents the * cluster to which it belongs being marked free. Therefore 0 is safe to use as * a sentinel to indicate an entry is not valid. */ #define SWAP_ENTRY_INVALID 0 #ifdef CONFIG_THP_SWAP #define SWAP_NR_ORDERS (PMD_ORDER + 1) #else #define SWAP_NR_ORDERS 1 #endif /* * We keep using same cluster for rotational device so IO will be sequential. * The purpose is to optimize SWAP throughput on these device. */ struct swap_sequential_cluster { unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; /* * The in-memory structure used to track swap areas. */ struct swap_info_struct { struct percpu_ref users; /* indicate and keep swap device valid. */ unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct list_head free_clusters; /* free clusters list */ struct list_head full_clusters; /* full clusters list */ struct list_head nonfull_clusters[SWAP_NR_ORDERS]; /* list of cluster that contains at least one free slot */ struct list_head frag_clusters[SWAP_NR_ORDERS]; /* list of cluster that are fragmented or contented */ unsigned int pages; /* total of usable pages of swap */ atomic_long_t inuse_pages; /* number of those currently in use */ struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ struct completion comp; /* seldom referenced */ spinlock_t lock; /* * protect map scan related fields like * swap_map, inuse_pages and all cluster * lists. other fields are only changed * at swapon/swapoff, so are protected * by swap_lock. changing flags need * hold this lock and swap_lock. If * both locks need hold, hold swap_lock * first. */ spinlock_t cont_lock; /* * protect swap count continuation page * list. */ struct work_struct discard_work; /* discard worker */ struct work_struct reclaim_work; /* reclaim worker */ struct list_head discard_clusters; /* discard clusters list */ struct plist_node avail_list; /* entry in swap_avail_head */ }; static inline swp_entry_t page_swap_entry(struct page *page) { struct folio *folio = page_folio(page); swp_entry_t entry = folio->swap; entry.val += folio_page_idx(folio, page); return entry; } /* linux/mm/workingset.c */ bool workingset_test_recent(void *shadow, bool file, bool *workingset, bool flush); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); void workingset_activation(struct folio *folio); /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) /* linux/mm/swap.c */ void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, unsigned int nr_io, unsigned int nr_rotated) __releases(lruvec->lru_lock); void lru_note_cost_refault(struct folio *); void folio_add_lru(struct folio *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *); void mark_page_accessed(struct page *); void folio_mark_accessed(struct folio *); static inline bool folio_may_be_lru_cached(struct folio *folio) { /* * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting. * Holding small numbers of low-order mTHP folios in per-CPU LRU cache * will be sensible, but nobody has implemented and tested that yet. */ return !folio_test_large(folio); } extern atomic_t lru_disable_count; static inline bool lru_cache_disabled(void) { return atomic_read(&lru_disable_count); } static inline void lru_cache_enable(void) { atomic_dec(&lru_disable_count); } extern void lru_cache_disable(void); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) #define MIN_SWAPPINESS 0 #define MAX_SWAPPINESS 200 /* Just reclaim from anon folios in proactive memory reclaim */ #define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1) extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, unsigned int reclaim_options, int *swappiness); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; long remove_mapping(struct address_space *mapping, struct folio *folio); #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) extern int reclaim_register_node(struct node *node); extern void reclaim_unregister_node(struct node *node); #else static inline int reclaim_register_node(struct node *node) { return 0; } static inline void reclaim_unregister_node(struct node *node) { } #endif /* CONFIG_SYSFS && CONFIG_NUMA */ #ifdef CONFIG_NUMA extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; #endif void check_move_unevictable_folios(struct folio_batch *fbatch); extern void __meminit kswapd_run(int nid); extern void __meminit kswapd_stop(int nid); #ifdef CONFIG_SWAP int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block); int generic_swapfile_activate(struct swap_info_struct *, struct file *, sector_t *); static inline unsigned long total_swapcache_pages(void) { return global_node_page_state(NR_SWAPCACHE); } void free_swap_cache(struct folio *folio); void free_folio_and_swap_cache(struct folio *folio); void free_pages_and_swap_cache(struct encoded_page **, int); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; extern atomic_t nr_rotate_swap; /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) { return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; } static inline long get_nr_swap_pages(void) { return atomic_long_read(&nr_swap_pages); } extern void si_swapinfo(struct sysinfo *); int folio_alloc_swap(struct folio *folio); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); struct backing_dev_info; extern struct swap_info_struct *get_swap_device(swp_entry_t entry); sector_t swap_folio_sector(struct folio *folio); static inline void put_swap_device(struct swap_info_struct *si) { percpu_ref_put(&si->users); } #else /* CONFIG_SWAP */ static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) { return NULL; } static inline void put_swap_device(struct swap_info_struct *si) { } #define get_nr_swap_pages() 0L #define total_swap_pages 0L #define total_swapcache_pages() 0UL #define vm_swap_full() 0 #define si_swapinfo(val) \ do { (val)->freeswap = (val)->totalswap = 0; } while (0) #define free_folio_and_swap_cache(folio) \ folio_put(folio) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) { } static inline void free_swap_cache(struct folio *folio) { } static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { return 0; } static inline void swap_shmem_alloc(swp_entry_t swp, int nr) { } static inline int swap_duplicate(swp_entry_t swp) { return 0; } static inline int swapcache_prepare(swp_entry_t swp, int nr) { return 0; } static inline void swap_free_nr(swp_entry_t entry, int nr_pages) { } static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) { } static inline int __swap_count(swp_entry_t entry) { return 0; } static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { return false; } static inline int swp_swapcount(swp_entry_t entry) { return 0; } static inline int folio_alloc_swap(struct folio *folio) { return -EINVAL; } static inline bool folio_free_swap(struct folio *folio) { return false; } static inline int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) { return -EINVAL; } #endif /* CONFIG_SWAP */ static inline void free_swap_and_cache(swp_entry_t entry) { free_swap_and_cache_nr(entry, 1); } static inline void swap_free(swp_entry_t entry) { swap_free_nr(entry, 1); } #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { /* Cgroup2 doesn't have per-cgroup swappiness */ if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return READ_ONCE(vm_swappiness); /* root ? */ if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) return READ_ONCE(vm_swappiness); return READ_ONCE(memcg->swappiness); } #else static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) { return READ_ONCE(vm_swappiness); } #endif #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { if (mem_cgroup_disabled()) return; __folio_throttle_swaprate(folio, gfp); } #else static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { } #endif #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { if (mem_cgroup_disabled()) return 0; return __mem_cgroup_try_charge_swap(folio, entry); } extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { if (mem_cgroup_disabled()) return; __mem_cgroup_uncharge_swap(entry, nr_pages); } extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct folio *folio); #else static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { return 0; } static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { } static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) { return get_nr_swap_pages(); } static inline bool mem_cgroup_swap_full(struct folio *folio) { return vm_swap_full(); } #endif #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2009 IBM Corporation * Author: Mimi Zohar <zohar@us.ibm.com> */ #ifndef _LINUX_INTEGRITY_H #define _LINUX_INTEGRITY_H #include <linux/fs.h> #include <linux/iversion.h> enum integrity_status { INTEGRITY_PASS = 0, INTEGRITY_PASS_IMMUTABLE, INTEGRITY_FAIL, INTEGRITY_FAIL_IMMUTABLE, INTEGRITY_NOLABEL, INTEGRITY_NOXATTRS, INTEGRITY_UNKNOWN, }; #ifdef CONFIG_INTEGRITY extern void __init integrity_load_keys(void); #else static inline void integrity_load_keys(void) { } #endif /* CONFIG_INTEGRITY */ /* An inode's attributes for detection of changes */ struct integrity_inode_attributes { u64 version; /* track inode changes */ unsigned long ino; dev_t dev; }; /* * On stacked filesystems the i_version alone is not enough to detect file data * or metadata change. Additional metadata is required. */ static inline void integrity_inode_attrs_store(struct integrity_inode_attributes *attrs, u64 i_version, const struct inode *inode) { attrs->version = i_version; attrs->dev = inode->i_sb->s_dev; attrs->ino = inode->i_ino; } /* * On stacked filesystems detect whether the inode or its content has changed. */ static inline bool integrity_inode_attrs_changed(const struct integrity_inode_attributes *attrs, const struct inode *inode) { return (inode->i_sb->s_dev != attrs->dev || inode->i_ino != attrs->ino || !inode_eq_iversion(inode, attrs->version)); } #endif /* _LINUX_INTEGRITY_H */
63 62 62 65 64 62 63 61 22 60 60 17 16 17 43 43 63 65 69 69 68 67 69 65 54 49 67 8 69 1 431 431 431 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 /* * Copyright (c) 2017 Mellanox Technologies Inc. All rights reserved. * Copyright (c) 2010 Voltaire Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ #include <linux/export.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> #include <rdma/rdma_netlink.h> #include <linux/module.h> #include "core_priv.h" static struct { const struct rdma_nl_cbs *cb_table; /* Synchronizes between ongoing netlink commands and netlink client * unregistration. */ struct rw_semaphore sem; } rdma_nl_types[RDMA_NL_NUM_CLIENTS]; bool rdma_nl_chk_listeners(unsigned int group) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net); return netlink_has_listeners(rnet->nl_sock, group); } EXPORT_SYMBOL(rdma_nl_chk_listeners); static bool is_nl_msg_valid(unsigned int type, unsigned int op) { static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = { [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS, [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS, [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS, }; /* * This BUILD_BUG_ON is intended to catch addition of new * RDMA netlink protocol without updating the array above. */ BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6); if (type >= RDMA_NL_NUM_CLIENTS) return false; return op < max_num_ops[type]; } static const struct rdma_nl_cbs * get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op) { const struct rdma_nl_cbs *cb_table; /* * Currently only NLDEV client is supporting netlink commands in * non init_net net namespace. */ if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV) return NULL; cb_table = READ_ONCE(rdma_nl_types[type].cb_table); if (!cb_table) { /* * Didn't get valid reference of the table, attempt module * load once. */ up_read(&rdma_nl_types[type].sem); request_module("rdma-netlink-subsys-%u", type); down_read(&rdma_nl_types[type].sem); cb_table = READ_ONCE(rdma_nl_types[type].cb_table); } if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit)) return NULL; return cb_table; } void rdma_nl_register(unsigned int index, const struct rdma_nl_cbs cb_table[]) { if (WARN_ON(!is_nl_msg_valid(index, 0)) || WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table))) return; /* Pairs with the READ_ONCE in is_nl_valid() */ smp_store_release(&rdma_nl_types[index].cb_table, cb_table); } EXPORT_SYMBOL(rdma_nl_register); void rdma_nl_unregister(unsigned int index) { down_write(&rdma_nl_types[index].sem); rdma_nl_types[index].cb_table = NULL; up_write(&rdma_nl_types[index].sem); } EXPORT_SYMBOL(rdma_nl_unregister); void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, int len, int client, int op, int flags) { *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags); if (!*nlh) return NULL; return nlmsg_data(*nlh); } EXPORT_SYMBOL(ibnl_put_msg); int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, int len, void *data, int type) { if (nla_put(skb, type, len, data)) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } return 0; } EXPORT_SYMBOL(ibnl_put_attr); static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { int type = nlh->nlmsg_type; unsigned int index = RDMA_NL_GET_CLIENT(type); unsigned int op = RDMA_NL_GET_OP(type); const struct rdma_nl_cbs *cb_table; int err = -EINVAL; if (!is_nl_msg_valid(index, op)) return -EINVAL; down_read(&rdma_nl_types[index].sem); cb_table = get_cb_table(skb, index, op); if (!cb_table) goto done; if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) && !netlink_capable(skb, CAP_NET_ADMIN)) { err = -EPERM; goto done; } /* * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't * mistakenly call the .dump() function. */ if (index == RDMA_NL_LS) { if (cb_table[op].doit) err = cb_table[op].doit(skb, nlh, extack); goto done; } /* FIXME: Convert IWCM to properly handle doit callbacks */ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) { struct netlink_dump_control c = { .dump = cb_table[op].dump, }; if (c.dump) err = netlink_dump_start(skb->sk, skb, nlh, &c); goto done; } if (cb_table[op].doit) err = cb_table[op].doit(skb, nlh, extack); done: up_read(&rdma_nl_types[index].sem); return err; } /* * This function is similar to netlink_rcv_skb with one exception: * It calls to the callback for the netlink messages without NLM_F_REQUEST * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed * for that consumer only. */ static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *)) { struct netlink_ext_ack extack = {}; struct nlmsghdr *nlh; int err; while (skb->len >= nlmsg_total_size(0)) { int msglen; nlh = nlmsg_hdr(skb); err = 0; if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return 0; /* * Generally speaking, the only requests are handled * by the kernel, but RDMA_NL_LS is different, because it * runs backward netlink scheme. Kernel initiates messages * and waits for reply with data to keep pathrecord cache * in sync. */ if (!(nlh->nlmsg_flags & NLM_F_REQUEST) && (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS)) goto ack; /* Skip control messages */ if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; err = cb(skb, nlh, &extack); if (err == -EINTR) goto skip; ack: if (nlh->nlmsg_flags & NLM_F_ACK || err) netlink_ack(skb, nlh, err, &extack); skip: msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) msglen = skb->len; skb_pull(skb, msglen); } return 0; } static void rdma_nl_rcv(struct sk_buff *skb) { rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg); } int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); int err; err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT); return (err < 0) ? err : 0; } EXPORT_SYMBOL(rdma_nl_unicast); int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); int err; err = netlink_unicast(rnet->nl_sock, skb, pid, 0); return (err < 0) ? err : 0; } EXPORT_SYMBOL(rdma_nl_unicast_wait); int rdma_nl_multicast(struct net *net, struct sk_buff *skb, unsigned int group, gfp_t flags) { struct rdma_dev_net *rnet = rdma_net_to_dev_net(net); return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags); } EXPORT_SYMBOL(rdma_nl_multicast); void rdma_nl_init(void) { int idx; for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) init_rwsem(&rdma_nl_types[idx].sem); } void rdma_nl_exit(void) { int idx; for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) WARN(rdma_nl_types[idx].cb_table, "Netlink client %d wasn't released prior to unloading %s\n", idx, KBUILD_MODNAME); } int rdma_nl_net_init(struct rdma_dev_net *rnet) { struct net *net = read_pnet(&rnet->net); struct netlink_kernel_cfg cfg = { .input = rdma_nl_rcv, .flags = NL_CFG_F_NONROOT_RECV, }; struct sock *nls; nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg); if (!nls) return -ENOMEM; nls->sk_sndtimeo = 10 * HZ; rnet->nl_sock = nls; return 0; } void rdma_nl_net_exit(struct rdma_dev_net *rnet) { netlink_kernel_release(rnet->nl_sock); } MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
2 2 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 7 7 7 2 2 2 2 3 2 3 3 2 2 2 1 1 1 1 1 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 27 27 26 26 27 3 4 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 4 2 1 1 1 1 1 1 2 2 2 2 2 1 2 4 3 3 1 3 3 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 // SPDX-License-Identifier: GPL-2.0 /* * PCI Bus Services, see include/linux/pci.h for further explanation. * * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang * * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/init.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/log2.h> #include <linux/logic_pio.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> #include <linux/vmalloc.h> #include <asm/dma.h> #include <linux/aer.h> #include <linux/bitfield.h> #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); const char *pci_power_names[] = { "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", }; EXPORT_SYMBOL_GPL(pci_power_names); #ifdef CONFIG_X86_32 int isa_dma_bridge_buggy; EXPORT_SYMBOL(isa_dma_bridge_buggy); #endif int pci_pci_problems; EXPORT_SYMBOL(pci_pci_problems); unsigned int pci_pm_d3hot_delay; static void pci_pme_list_scan(struct work_struct *work); static LIST_HEAD(pci_pme_list); static DEFINE_MUTEX(pci_pme_list_mutex); static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); struct pci_pme_device { struct list_head list; struct pci_dev *dev; }; #define PME_TIMEOUT 1000 /* How long between PME checks */ /* * Following exit from Conventional Reset, devices must be ready within 1 sec * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional * Reset (PCIe r6.0 sec 5.8). */ #define PCI_RESET_WAIT 1000 /* msec */ /* * Devices may extend the 1 sec period through Request Retry Status * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper * limit, but 60 sec ought to be enough for any device to become * responsive. */ #define PCIE_RESET_READY_POLL_MS 60000 /* msec */ static void pci_dev_d3_sleep(struct pci_dev *dev) { unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay); unsigned int upper; if (delay_ms) { /* Use a 20% upper bound, 1ms minimum */ upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U); usleep_range(delay_ms * USEC_PER_MSEC, (delay_ms + upper) * USEC_PER_MSEC); } } bool pci_reset_supported(struct pci_dev *dev) { return dev->reset_methods[0] != 0; } #ifdef CONFIG_PCI_DOMAINS int pci_domains_supported = 1; #endif #define DEFAULT_CARDBUS_IO_SIZE (256) #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) /* pci=cbmemsize=nnM,cbiosize=nn can override this */ unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; #define DEFAULT_HOTPLUG_IO_SIZE (256) #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024) #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024) /* hpiosize=nn can override this */ unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; /* * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size, * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size; * pci=hpmemsize=nnM overrides both */ unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE; unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE; #define DEFAULT_HOTPLUG_BUS_SIZE 1 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */ #ifdef CONFIG_PCIE_BUS_TUNE_OFF enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; #elif defined CONFIG_PCIE_BUS_SAFE enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; #elif defined CONFIG_PCIE_BUS_PERFORMANCE enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; #elif defined CONFIG_PCIE_BUS_PEER2PEER enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER; #else enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; #endif /* * The default CLS is used if arch didn't set CLS explicitly and not * all pci devices agree on the same value. Arch can override either * the dfl or actual value as it sees fit. Don't forget this is * measured in 32-bit words, not bytes. */ u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2; u8 pci_cache_line_size __ro_after_init ; /* * If we set up a device for bus mastering, we need to check the latency * timer as certain BIOSes forget to set it properly. */ unsigned int pcibios_max_latency = 255; /* If set, the PCIe ARI capability will not be used. */ static bool pcie_ari_disabled; /* If set, the PCIe ATS capability will not be used. */ static bool pcie_ats_disabled; /* If set, the PCI config space of each device is printed during boot. */ bool pci_early_dump; bool pci_ats_disabled(void) { return pcie_ats_disabled; } EXPORT_SYMBOL_GPL(pci_ats_disabled); /* Disable bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) pci_bridge_d3_disable = true; else if (!strcmp(str, "force")) pci_bridge_d3_force = true; return 1; } __setup("pcie_port_pm=", pcie_port_pm_setup); /** * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children * @bus: pointer to PCI bus structure to search * * Given a PCI bus, returns the highest PCI bus number present in the set * including the given PCI bus and its list of child PCI buses. */ unsigned char pci_bus_max_busnr(struct pci_bus *bus) { struct pci_bus *tmp; unsigned char max, n; max = bus->busn_res.end; list_for_each_entry(tmp, &bus->children, node) { n = pci_bus_max_busnr(tmp); if (n > max) max = n; } return max; } EXPORT_SYMBOL_GPL(pci_bus_max_busnr); /** * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS * @pdev: the PCI device * * Returns error bits set in PCI_STATUS and clears them. */ int pci_status_get_and_clear_errors(struct pci_dev *pdev) { u16 status; int ret; ret = pci_read_config_word(pdev, PCI_STATUS, &status); if (ret != PCIBIOS_SUCCESSFUL) return -EIO; status &= PCI_STATUS_ERROR_BITS; if (status) pci_write_config_word(pdev, PCI_STATUS, status); return status; } EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors); #ifdef CONFIG_HAS_IOMEM static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar, bool write_combine) { struct resource *res = &pdev->resource[bar]; resource_size_t start = res->start; resource_size_t size = resource_size(res); /* * Make sure the BAR is actually a memory resource, not an IO resource */ if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res); return NULL; } if (write_combine) return ioremap_wc(start, size); return ioremap(start, size); } void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) { return __pci_ioremap_resource(pdev, bar, false); } EXPORT_SYMBOL_GPL(pci_ioremap_bar); void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar) { return __pci_ioremap_resource(pdev, bar, true); } EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar); #endif /** * pci_dev_str_match_path - test if a path string matches a device * @dev: the PCI device to test * @path: string to match the device against * @endptr: pointer to the string after the match * * Test if a string (typically from a kernel parameter) formatted as a * path of device/function addresses matches a PCI device. The string must * be of the form: * * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* * * A path for a device can be obtained using 'lspci -t'. Using a path * is more robust against bus renumbering than using only a single bus, * device and function address. * * Returns 1 if the string matches the device, 0 if it does not and * a negative error code if it fails to parse the string. */ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, const char **endptr) { int ret; unsigned int seg, bus, slot, func; char *wpath, *p; char end; *endptr = strchrnul(path, ';'); wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); if (!wpath) return -ENOMEM; while (1) { p = strrchr(wpath, '/'); if (!p) break; ret = sscanf(p, "/%x.%x%c", &slot, &func, &end); if (ret != 2) { ret = -EINVAL; goto free_and_exit; } if (dev->devfn != PCI_DEVFN(slot, func)) { ret = 0; goto free_and_exit; } /* * Note: we don't need to get a reference to the upstream * bridge because we hold a reference to the top level * device which should hold a reference to the bridge, * and so on. */ dev = pci_upstream_bridge(dev); if (!dev) { ret = 0; goto free_and_exit; } *p = 0; } ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot, &func, &end); if (ret != 4) { seg = 0; ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end); if (ret != 3) { ret = -EINVAL; goto free_and_exit; } } ret = (seg == pci_domain_nr(dev->bus) && bus == dev->bus->number && dev->devfn == PCI_DEVFN(slot, func)); free_and_exit: kfree(wpath); return ret; } /** * pci_dev_str_match - test if a string matches a device * @dev: the PCI device to test * @p: string to match the device against * @endptr: pointer to the string after the match * * Test if a string (typically from a kernel parameter) matches a specified * PCI device. The string may be of one of the following formats: * * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* * pci:<vendor>:<device>[:<subvendor>:<subdevice>] * * The first format specifies a PCI bus/device/function address which * may change if new hardware is inserted, if motherboard firmware changes, * or due to changes caused in kernel parameters. If the domain is * left unspecified, it is taken to be 0. In order to be robust against * bus renumbering issues, a path of PCI device/function numbers may be used * to address the specific device. The path for a device can be determined * through the use of 'lspci -t'. * * The second format matches devices using IDs in the configuration * space which may match multiple devices in the system. A value of 0 * for any field will match all devices. (Note: this differs from * in-kernel code that uses PCI_ANY_ID which is ~0; this is for * legacy reasons and convenience so users don't have to specify * FFFFFFFFs on the command line.) * * Returns 1 if the string matches the device, 0 if it does not and * a negative error code if the string cannot be parsed. */ static int pci_dev_str_match(struct pci_dev *dev, const char *p, const char **endptr) { int ret; int count; unsigned short vendor, device, subsystem_vendor, subsystem_device; if (strncmp(p, "pci:", 4) == 0) { /* PCI vendor/device (subvendor/subdevice) IDs are specified */ p += 4; ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device, &subsystem_vendor, &subsystem_device, &count); if (ret != 4) { ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count); if (ret != 2) return -EINVAL; subsystem_vendor = 0; subsystem_device = 0; } p += count; if ((!vendor || vendor == dev->vendor) && (!device || device == dev->device) && (!subsystem_vendor || subsystem_vendor == dev->subsystem_vendor) && (!subsystem_device || subsystem_device == dev->subsystem_device)) goto found; } else { /* * PCI Bus, Device, Function IDs are specified * (optionally, may include a path of devfns following it) */ ret = pci_dev_str_match_path(dev, p, &p); if (ret < 0) return ret; else if (ret) goto found; } *endptr = p; return 0; found: *endptr = p; return 1; } static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) { return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn); } u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) { return __pci_find_next_cap(dev->bus, dev->devfn, pos + PCI_CAP_LIST_NEXT, cap); } EXPORT_SYMBOL_GPL(pci_find_next_capability); static u8 __pci_bus_find_cap_start(struct pci_bus *bus, unsigned int devfn, u8 hdr_type) { u16 status; pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); if (!(status & PCI_STATUS_CAP_LIST)) return 0; switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: case PCI_HEADER_TYPE_BRIDGE: return PCI_CAPABILITY_LIST; case PCI_HEADER_TYPE_CARDBUS: return PCI_CB_CAPABILITY_LIST; } return 0; } /** * pci_find_capability - query for devices' capabilities * @dev: PCI device to query * @cap: capability code * * Tell if a device supports a given PCI capability. * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not * support it. Possible values for @cap include: * * %PCI_CAP_ID_PM Power Management * %PCI_CAP_ID_AGP Accelerated Graphics Port * %PCI_CAP_ID_VPD Vital Product Data * %PCI_CAP_ID_SLOTID Slot Identification * %PCI_CAP_ID_MSI Message Signalled Interrupts * %PCI_CAP_ID_CHSWP CompactPCI HotSwap * %PCI_CAP_ID_PCIX PCI-X * %PCI_CAP_ID_EXP PCI Express */ u8 pci_find_capability(struct pci_dev *dev, int cap) { u8 pos; pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); if (pos) pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); return pos; } EXPORT_SYMBOL(pci_find_capability); /** * pci_bus_find_capability - query for devices' capabilities * @bus: the PCI bus to query * @devfn: PCI device to query * @cap: capability code * * Like pci_find_capability() but works for PCI devices that do not have a * pci_dev structure set up yet. * * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not * support it. */ u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) { u8 hdr_type, pos; pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK); if (pos) pos = __pci_find_next_cap(bus, devfn, pos, cap); return pos; } EXPORT_SYMBOL(pci_bus_find_capability); /** * pci_find_next_ext_capability - Find an extended capability * @dev: PCI device to query * @start: address at which to start looking (0 to start at beginning of list) * @cap: capability code * * Returns the address of the next matching extended capability structure * within the device's PCI configuration space or 0 if the device does * not support it. Some capabilities can occur several times, e.g., the * vendor-specific capability, and this provides a way to find them all. */ u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap) { if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) return 0; return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap, dev->bus, dev->devfn); } EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); /** * pci_find_ext_capability - Find an extended capability * @dev: PCI device to query * @cap: capability code * * Returns the address of the requested extended capability structure * within the device's PCI configuration space or 0 if the device does * not support it. Possible values for @cap include: * * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting * %PCI_EXT_CAP_ID_VC Virtual Channel * %PCI_EXT_CAP_ID_DSN Device Serial Number * %PCI_EXT_CAP_ID_PWR Power Budgeting */ u16 pci_find_ext_capability(struct pci_dev *dev, int cap) { return pci_find_next_ext_capability(dev, 0, cap); } EXPORT_SYMBOL_GPL(pci_find_ext_capability); /** * pci_get_dsn - Read and return the 8-byte Device Serial Number * @dev: PCI device to query * * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial * Number. * * Returns the DSN, or zero if the capability does not exist. */ u64 pci_get_dsn(struct pci_dev *dev) { u32 dword; u64 dsn; int pos; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); if (!pos) return 0; /* * The Device Serial Number is two dwords offset 4 bytes from the * capability position. The specification says that the first dword is * the lower half, and the second dword is the upper half. */ pos += 4; pci_read_config_dword(dev, pos, &dword); dsn = (u64)dword; pci_read_config_dword(dev, pos + 4, &dword); dsn |= ((u64)dword) << 32; return dsn; } EXPORT_SYMBOL_GPL(pci_get_dsn); static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap) { int rc; u8 cap, mask; if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) mask = HT_3BIT_CAP_MASK; else mask = HT_5BIT_CAP_MASK; pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, PCI_CAP_ID_HT, dev->bus, dev->devfn); while (pos) { rc = pci_read_config_byte(dev, pos + 3, &cap); if (rc != PCIBIOS_SUCCESSFUL) return 0; if ((cap & mask) == ht_cap) return pos; pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos + PCI_CAP_LIST_NEXT, PCI_CAP_ID_HT, dev->bus, dev->devfn); } return 0; } /** * pci_find_next_ht_capability - query a device's HyperTransport capabilities * @dev: PCI device to query * @pos: Position from which to continue searching * @ht_cap: HyperTransport capability code * * To be used in conjunction with pci_find_ht_capability() to search for * all capabilities matching @ht_cap. @pos should always be a value returned * from pci_find_ht_capability(). * * NB. To be 100% safe against broken PCI devices, the caller should take * steps to avoid an infinite loop. */ u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap) { return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); } EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); /** * pci_find_ht_capability - query a device's HyperTransport capabilities * @dev: PCI device to query * @ht_cap: HyperTransport capability code * * Tell if a device supports a given HyperTransport capability. * Returns an address within the device's PCI configuration space * or 0 in case the device does not support the request capability. * The address points to the PCI capability, of type PCI_CAP_ID_HT, * which has a HyperTransport capability matching @ht_cap. */ u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap) { u8 pos; pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); if (pos) pos = __pci_find_next_ht_cap(dev, pos, ht_cap); return pos; } EXPORT_SYMBOL_GPL(pci_find_ht_capability); /** * pci_find_vsec_capability - Find a vendor-specific extended capability * @dev: PCI device to query * @vendor: Vendor ID for which capability is defined * @cap: Vendor-specific capability ID * * If @dev has Vendor ID @vendor, search for a VSEC capability with * VSEC ID @cap. If found, return the capability offset in * config space; otherwise return 0. */ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap) { u16 vsec = 0; u32 header; int ret; if (vendor != dev->vendor) return 0; while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) { ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header); if (ret != PCIBIOS_SUCCESSFUL) continue; if (PCI_VNDR_HEADER_ID(header) == cap) return vsec; } return 0; } EXPORT_SYMBOL_GPL(pci_find_vsec_capability); /** * pci_find_dvsec_capability - Find DVSEC for vendor * @dev: PCI device to query * @vendor: Vendor ID to match for the DVSEC * @dvsec: Designated Vendor-specific capability ID * * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability * offset in config space; otherwise return 0. */ u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec) { int pos; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC); if (!pos) return 0; while (pos) { u16 v, id; pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v); pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id); if (vendor == v && dvsec == id) return pos; pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC); } return 0; } EXPORT_SYMBOL_GPL(pci_find_dvsec_capability); /** * pci_find_parent_resource - return resource region of parent bus of given * region * @dev: PCI device structure contains resources to be searched * @res: child resource record for which parent is sought * * For given resource region of given device, return the resource region of * parent bus the given region is contained in. */ struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) { const struct pci_bus *bus = dev->bus; struct resource *r; pci_bus_for_each_resource(bus, r) { if (!r) continue; if (resource_contains(r, res)) { /* * If the window is prefetchable but the BAR is * not, the allocator made a mistake. */ if (r->flags & IORESOURCE_PREFETCH && !(res->flags & IORESOURCE_PREFETCH)) return NULL; /* * If we're below a transparent bridge, there may * be both a positively-decoded aperture and a * subtractively-decoded region that contain the BAR. * We want the positively-decoded one, so this depends * on pci_bus_for_each_resource() giving us those * first. */ return r; } } return NULL; } EXPORT_SYMBOL(pci_find_parent_resource); /** * pci_find_resource - Return matching PCI device resource * @dev: PCI device to query * @res: Resource to look for * * Goes over standard PCI resources (BARs) and checks if the given resource * is partially or fully contained in any of them. In that case the * matching resource is returned, %NULL otherwise. */ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct resource *r = &dev->resource[i]; if (r->start && resource_contains(r, res)) return r; } return NULL; } EXPORT_SYMBOL(pci_find_resource); /** * pci_resource_name - Return the name of the PCI resource * @dev: PCI device to query * @i: index of the resource * * Return the standard PCI resource (BAR) name according to their index. */ const char *pci_resource_name(struct pci_dev *dev, unsigned int i) { static const char * const bar_name[] = { "BAR 0", "BAR 1", "BAR 2", "BAR 3", "BAR 4", "BAR 5", "ROM", #ifdef CONFIG_PCI_IOV "VF BAR 0", "VF BAR 1", "VF BAR 2", "VF BAR 3", "VF BAR 4", "VF BAR 5", #endif "bridge window", /* "io" included in %pR */ "bridge window", /* "mem" included in %pR */ "bridge window", /* "mem pref" included in %pR */ }; static const char * const cardbus_name[] = { "BAR 1", "unknown", "unknown", "unknown", "unknown", "unknown", #ifdef CONFIG_PCI_IOV "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", #endif "CardBus bridge window 0", /* I/O */ "CardBus bridge window 1", /* I/O */ "CardBus bridge window 0", /* mem */ "CardBus bridge window 1", /* mem */ }; if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS && i < ARRAY_SIZE(cardbus_name)) return cardbus_name[i]; if (i < ARRAY_SIZE(bar_name)) return bar_name[i]; return "unknown"; } /** * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos * @dev: the PCI device to operate on * @pos: config space offset of status word * @mask: mask of bit(s) to care about in status word * * Return 1 when mask bit(s) in status word clear, 0 otherwise. */ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) { int i; /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { u16 status; if (i) msleep((1 << (i - 1)) * 100); pci_read_config_word(dev, pos, &status); if (!(status & mask)) return 1; } return 0; } static int pci_acs_enable; /** * pci_request_acs - ask for ACS to be enabled if supported */ void pci_request_acs(void) { pci_acs_enable = 1; } static const char *disable_acs_redir_param; static const char *config_acs_param; struct pci_acs { u16 cap; u16 ctrl; u16 fw_ctrl; }; static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, const char *p, const u16 acs_mask, const u16 acs_flags) { u16 flags = acs_flags; u16 mask = acs_mask; char *delimit; int ret = 0; if (!p) return; while (*p) { if (!acs_mask) { /* Check for ACS flags */ delimit = strstr(p, "@"); if (delimit) { int end; u32 shift = 0; end = delimit - p - 1; mask = 0; flags = 0; while (end > -1) { if (*(p + end) == '0') { mask |= 1 << shift; shift++; end--; } else if (*(p + end) == '1') { mask |= 1 << shift; flags |= 1 << shift; shift++; end--; } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) { shift++; end--; } else { pci_err(dev, "Invalid ACS flags... Ignoring\n"); return; } } p = delimit + 1; } else { pci_err(dev, "ACS Flags missing\n"); return; } } if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) { pci_err(dev, "Invalid ACS flags specified\n"); return; } ret = pci_dev_str_match(dev, p, &p); if (ret < 0) { pr_info_once("PCI: Can't parse ACS command line parameter\n"); break; } else if (ret == 1) { /* Found a match */ break; } if (*p != ';' && *p != ',') { /* End of param or invalid format */ break; } p++; } if (ret != 1) return; if (!pci_dev_specific_disable_acs_redir(dev)) return; pci_dbg(dev, "ACS mask = %#06x\n", mask); pci_dbg(dev, "ACS flags = %#06x\n", flags); pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl); pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl); /* * For mask bits that are 0, copy them from the firmware setting * and apply flags for all the mask bits that are 1. */ caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask); pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl); } /** * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities * @dev: the PCI device * @caps: default ACS controls */ static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps) { /* Source Validation */ caps->ctrl |= (caps->cap & PCI_ACS_SV); /* P2P Request Redirect */ caps->ctrl |= (caps->cap & PCI_ACS_RR); /* P2P Completion Redirect */ caps->ctrl |= (caps->cap & PCI_ACS_CR); /* Upstream Forwarding */ caps->ctrl |= (caps->cap & PCI_ACS_UF); /* Enable Translation Blocking for external devices and noats */ if (pci_ats_disabled() || dev->external_facing || dev->untrusted) caps->ctrl |= (caps->cap & PCI_ACS_TB); } /** * pci_enable_acs - enable ACS if hardware support it * @dev: the PCI device */ static void pci_enable_acs(struct pci_dev *dev) { struct pci_acs caps; bool enable_acs = false; int pos; /* If an iommu is present we start with kernel default caps */ if (pci_acs_enable) { if (pci_dev_specific_enable_acs(dev)) enable_acs = true; } pos = dev->acs_cap; if (!pos) return; pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap); pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl); caps.fw_ctrl = caps.ctrl; if (enable_acs) pci_std_enable_acs(dev, &caps); /* * Always apply caps from the command line, even if there is no iommu. * Trust that the admin has a reason to change the ACS settings. */ __pci_config_acs(dev, &caps, disable_acs_redir_param, PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC, ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC)); __pci_config_acs(dev, &caps, config_acs_param, 0, 0); pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl); } /** * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) * @dev: PCI device to have its BARs restored * * Restore the BAR values for a given device, so as to make it * accessible by its driver. */ static void pci_restore_bars(struct pci_dev *dev) { int i; for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) pci_update_resource(dev, i); } static inline bool platform_pci_power_manageable(struct pci_dev *dev) { if (pci_use_mid_pm()) return true; return acpi_pci_power_manageable(dev); } static inline int platform_pci_set_power_state(struct pci_dev *dev, pci_power_t t) { if (pci_use_mid_pm()) return mid_pci_set_power_state(dev, t); return acpi_pci_set_power_state(dev, t); } static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev) { if (pci_use_mid_pm()) return mid_pci_get_power_state(dev); return acpi_pci_get_power_state(dev); } static inline void platform_pci_refresh_power_state(struct pci_dev *dev) { if (!pci_use_mid_pm()) acpi_pci_refresh_power_state(dev); } static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) { if (pci_use_mid_pm()) return PCI_POWER_ERROR; return acpi_pci_choose_state(dev); } static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable) { if (pci_use_mid_pm()) return PCI_POWER_ERROR; return acpi_pci_wakeup(dev, enable); } static inline bool platform_pci_need_resume(struct pci_dev *dev) { if (pci_use_mid_pm()) return false; return acpi_pci_need_resume(dev); } static inline bool platform_pci_bridge_d3(struct pci_dev *dev) { if (pci_use_mid_pm()) return false; return acpi_pci_bridge_d3(dev); } /** * pci_update_current_state - Read power state of given device and cache it * @dev: PCI device to handle. * @state: State to cache in case the device doesn't have the PM capability * * The power state is read from the PMCSR register, which however is * inaccessible in D3cold. The platform firmware is therefore queried first * to detect accessibility of the register. In case the platform firmware * reports an incorrect state or the device isn't power manageable by the * platform at all, we try to detect D3cold by testing accessibility of the * vendor ID in config space. */ void pci_update_current_state(struct pci_dev *dev, pci_power_t state) { if (platform_pci_get_power_state(dev) == PCI_D3cold) { dev->current_state = PCI_D3cold; } else if (dev->pm_cap) { u16 pmcsr; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (PCI_POSSIBLE_ERROR(pmcsr)) { dev->current_state = PCI_D3cold; return; } dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; } else { dev->current_state = state; } } /** * pci_refresh_power_state - Refresh the given device's power state data * @dev: Target PCI device. * * Ask the platform to refresh the devices power state information and invoke * pci_update_current_state() to update its current PCI power state. */ void pci_refresh_power_state(struct pci_dev *dev) { platform_pci_refresh_power_state(dev); pci_update_current_state(dev, dev->current_state); } /** * pci_platform_power_transition - Use platform to change device power state * @dev: PCI device to handle. * @state: State to put the device into. */ int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) { int error; error = platform_pci_set_power_state(dev, state); if (!error) pci_update_current_state(dev, state); else if (!dev->pm_cap) /* Fall back to PCI_D0 */ dev->current_state = PCI_D0; return error; } EXPORT_SYMBOL_GPL(pci_platform_power_transition); static int pci_resume_one(struct pci_dev *pci_dev, void *ign) { pm_request_resume(&pci_dev->dev); return 0; } /** * pci_resume_bus - Walk given bus and runtime resume devices on it * @bus: Top bus of the subtree to walk. */ void pci_resume_bus(struct pci_bus *bus) { if (bus) pci_walk_bus(bus, pci_resume_one, NULL); } static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) { int delay = 1; bool retrain = false; struct pci_dev *root, *bridge; root = pcie_find_root_port(dev); if (pci_is_pcie(dev)) { bridge = pci_upstream_bridge(dev); if (bridge) retrain = true; } /* * The caller has already waited long enough after a reset that the * device should respond to config requests, but it may respond * with Request Retry Status (RRS) if it needs more time to * initialize. * * If the device is below a Root Port with Configuration RRS * Software Visibility enabled, reading the Vendor ID returns a * special data value if the device responded with RRS. Read the * Vendor ID until we get non-RRS status. * * If there's no Root Port or Configuration RRS Software Visibility * is not enabled, the device may still respond with RRS, but * hardware may retry the config request. If no retries receive * Successful Completion, hardware generally synthesizes ~0 * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor * ID for VFs and non-existent devices also returns ~0, so read the * Command register until it returns something other than ~0. */ for (;;) { u32 id; if (pci_dev_is_disconnected(dev)) { pci_dbg(dev, "disconnected; not waiting\n"); return -ENOTTY; } if (root && root->config_rrs_sv) { pci_read_config_dword(dev, PCI_VENDOR_ID, &id); if (!pci_bus_rrs_vendor_id(id)) break; } else { pci_read_config_dword(dev, PCI_COMMAND, &id); if (!PCI_POSSIBLE_ERROR(id)) break; } if (delay > timeout) { pci_warn(dev, "not ready %dms after %s; giving up\n", delay - 1, reset_type); return -ENOTTY; } if (delay > PCI_RESET_WAIT) { if (retrain) { retrain = false; if (pcie_failed_link_retrain(bridge) == 0) { delay = 1; continue; } } pci_info(dev, "not ready %dms after %s; waiting\n", delay - 1, reset_type); } msleep(delay); delay *= 2; } if (delay > PCI_RESET_WAIT) pci_info(dev, "ready %dms after %s\n", delay - 1, reset_type); else pci_dbg(dev, "ready %dms after %s\n", delay - 1, reset_type); return 0; } /** * pci_power_up - Put the given device into D0 * @dev: PCI device to power up * * On success, return 0 or 1, depending on whether or not it is necessary to * restore the device's BARs subsequently (1 is returned in that case). * * On failure, return a negative error code. Always return failure if @dev * lacks a Power Management Capability, even if the platform was able to * put the device in D0 via non-PCI means. */ int pci_power_up(struct pci_dev *dev) { bool need_restore; pci_power_t state; u16 pmcsr; platform_pci_set_power_state(dev, PCI_D0); if (!dev->pm_cap) { state = platform_pci_get_power_state(dev); if (state == PCI_UNKNOWN) dev->current_state = PCI_D0; else dev->current_state = state; return -EIO; } if (pci_dev_is_disconnected(dev)) { dev->current_state = PCI_D3cold; return -EIO; } pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (PCI_POSSIBLE_ERROR(pmcsr)) { pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n", pci_power_name(dev->current_state)); dev->current_state = PCI_D3cold; return -EIO; } state = pmcsr & PCI_PM_CTRL_STATE_MASK; need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET); if (state == PCI_D0) goto end; /* * Force the entire word to 0. This doesn't affect PME_Status, disables * PME_En, and sets PowerState to 0. */ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0); /* Mandatory transition delays; see PCI PM 1.2. */ if (state == PCI_D3hot) pci_dev_d3_sleep(dev); else if (state == PCI_D2) udelay(PCI_PM_D2_DELAY); end: dev->current_state = PCI_D0; if (need_restore) return 1; return 0; } /** * pci_set_full_power_state - Put a PCI device into D0 and update its state * @dev: PCI device to power up * @locked: whether pci_bus_sem is held * * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register * to confirm the state change, restore its BARs if they might be lost and * reconfigure ASPM in accordance with the new power state. * * If pci_restore_state() is going to be called right after a power state change * to D0, it is more efficient to use pci_power_up() directly instead of this * function. */ static int pci_set_full_power_state(struct pci_dev *dev, bool locked) { u16 pmcsr; int ret; ret = pci_power_up(dev); if (ret < 0) { if (dev->current_state == PCI_D0) return 0; return ret; } pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; if (dev->current_state != PCI_D0) { pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n", pci_power_name(dev->current_state)); } else if (ret > 0) { /* * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning * from D3hot to D0 _may_ perform an internal reset, thereby * going to "D0 Uninitialized" rather than "D0 Initialized". * For example, at least some versions of the 3c905B and the * 3c556B exhibit this behaviour. * * At least some laptop BIOSen (e.g. the Thinkpad T21) leave * devices in a D3hot state at boot. Consequently, we need to * restore at least the BARs so that the device will be * accessible to its driver. */ pci_restore_bars(dev); } if (dev->bus->self) pcie_aspm_pm_state_change(dev->bus->self, locked); return 0; } /** * __pci_dev_set_current_state - Set current state of a PCI device * @dev: Device to handle * @data: pointer to state to be set */ static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) { pci_power_t state = *(pci_power_t *)data; dev->current_state = state; return 0; } /** * pci_bus_set_current_state - Walk given bus and set current state of devices * @bus: Top bus of the subtree to walk. * @state: state to be set */ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) { if (bus) pci_walk_bus(bus, __pci_dev_set_current_state, &state); } static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked) { if (!bus) return; if (locked) pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state); else pci_walk_bus(bus, __pci_dev_set_current_state, &state); } /** * pci_set_low_power_state - Put a PCI device into a low-power state. * @dev: PCI device to handle. * @state: PCI power state (D1, D2, D3hot) to put the device into. * @locked: whether pci_bus_sem is held * * Use the device's PCI_PM_CTRL register to put it into a low-power state. * * RETURN VALUE: * -EINVAL if the requested state is invalid. * -EIO if device does not support PCI PM or its PM capabilities register has a * wrong version, or device doesn't support the requested state. * 0 if device already is in the requested state. * 0 if device's power state has been successfully changed. */ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked) { u16 pmcsr; if (!dev->pm_cap) return -EIO; /* * Validate transition: We can enter D0 from any state, but if * we're already in a low-power state, we can only go deeper. E.g., * we can go from D1 to D3, but we can't go directly from D3 to D1; * we'd have to go from D3 to D0, then to D1. */ if (dev->current_state <= PCI_D3cold && dev->current_state > state) { pci_dbg(dev, "Invalid power transition (from %s to %s)\n", pci_power_name(dev->current_state), pci_power_name(state)); return -EINVAL; } /* Check if this device supports the desired state */ if ((state == PCI_D1 && !dev->d1_support) || (state == PCI_D2 && !dev->d2_support)) return -EIO; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (PCI_POSSIBLE_ERROR(pmcsr)) { pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n", pci_power_name(dev->current_state), pci_power_name(state)); dev->current_state = PCI_D3cold; return -EIO; } pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= state; /* Enter specified state */ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); /* Mandatory power management transition delays; see PCI PM 1.2. */ if (state == PCI_D3hot) pci_dev_d3_sleep(dev); else if (state == PCI_D2) udelay(PCI_PM_D2_DELAY); pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; if (dev->current_state != state) pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n", pci_power_name(dev->current_state), pci_power_name(state)); if (dev->bus->self) pcie_aspm_pm_state_change(dev->bus->self, locked); return 0; } static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked) { int error; /* Bound the state we're entering */ if (state > PCI_D3cold) state = PCI_D3cold; else if (state < PCI_D0) state = PCI_D0; else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) /* * If the device or the parent bridge do not support PCI * PM, ignore the request if we're doing anything other * than putting it into D0 (which would only happen on * boot). */ return 0; /* Check if we're already there */ if (dev->current_state == state) return 0; if (state == PCI_D0) return pci_set_full_power_state(dev, locked); /* * This device is quirked not to be put into D3, so don't put it in * D3 */ if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) return 0; if (state == PCI_D3cold) { /* * To put the device in D3cold, put it into D3hot in the native * way, then put it into D3cold using platform ops. */ error = pci_set_low_power_state(dev, PCI_D3hot, locked); if (pci_platform_power_transition(dev, PCI_D3cold)) return error; /* Powering off a bridge may power off the whole hierarchy */ if (dev->current_state == PCI_D3cold) __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked); } else { error = pci_set_low_power_state(dev, state, locked); if (pci_platform_power_transition(dev, state)) return error; } return 0; } /** * pci_set_power_state - Set the power state of a PCI device * @dev: PCI device to handle. * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. * * Transition a device to a new power state, using the platform firmware and/or * the device's PCI PM registers. * * RETURN VALUE: * -EINVAL if the requested state is invalid. * -EIO if device does not support PCI PM or its PM capabilities register has a * wrong version, or device doesn't support the requested state. * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. * 0 if device already is in the requested state. * 0 if the transition is to D3 but D3 is not supported. * 0 if device's power state has been successfully changed. */ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return __pci_set_power_state(dev, state, false); } EXPORT_SYMBOL(pci_set_power_state); int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) { lockdep_assert_held(&pci_bus_sem); return __pci_set_power_state(dev, state, true); } EXPORT_SYMBOL(pci_set_power_state_locked); #define PCI_EXP_SAVE_REGS 7 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, u16 cap, bool extended) { struct pci_cap_saved_state *tmp; hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap) return tmp; } return NULL; } struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap) { return _pci_find_saved_cap(dev, cap, false); } struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap) { return _pci_find_saved_cap(dev, cap, true); } static int pci_save_pcie_state(struct pci_dev *dev) { int i = 0; struct pci_cap_saved_state *save_state; u16 *cap; if (!pci_is_pcie(dev)) return 0; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); if (!save_state) { pci_err(dev, "buffer not found in %s\n", __func__); return -ENOMEM; } cap = (u16 *)&save_state->cap.data[0]; pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); pci_save_aspm_l1ss_state(dev); pci_save_ltr_state(dev); return 0; } static void pci_restore_pcie_state(struct pci_dev *dev) { int i = 0; struct pci_cap_saved_state *save_state; u16 *cap; /* * Restore max latencies (in the LTR capability) before enabling * LTR itself in PCI_EXP_DEVCTL2. */ pci_restore_ltr_state(dev); pci_restore_aspm_l1ss_state(dev); save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); if (!save_state) return; /* * Downstream ports reset the LTR enable bit when link goes down. * Check and re-configure the bit here before restoring device. * PCIe r5.0, sec 7.5.3.16. */ pci_bridge_reconfigure_ltr(dev); cap = (u16 *)&save_state->cap.data[0]; pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); } static int pci_save_pcix_state(struct pci_dev *dev) { int pos; struct pci_cap_saved_state *save_state; pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) return 0; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); if (!save_state) { pci_err(dev, "buffer not found in %s\n", __func__); return -ENOMEM; } pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->cap.data); return 0; } static void pci_restore_pcix_state(struct pci_dev *dev) { int i = 0, pos; struct pci_cap_saved_state *save_state; u16 *cap; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!save_state || !pos) return; cap = (u16 *)&save_state->cap.data[0]; pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); } /** * pci_save_state - save the PCI configuration space of a device before * suspending * @dev: PCI device that we're dealing with */ int pci_save_state(struct pci_dev *dev) { int i; /* XXX: 100% dword access ok here? */ for (i = 0; i < 16; i++) { pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); pci_dbg(dev, "save config %#04x: %#010x\n", i * 4, dev->saved_config_space[i]); } dev->state_saved = true; i = pci_save_pcie_state(dev); if (i != 0) return i; i = pci_save_pcix_state(dev); if (i != 0) return i; pci_save_dpc_state(dev); pci_save_aer_state(dev); pci_save_ptm_state(dev); pci_save_tph_state(dev); return pci_save_vc_state(dev); } EXPORT_SYMBOL(pci_save_state); static void pci_restore_config_dword(struct pci_dev *pdev, int offset, u32 saved_val, int retry, bool force) { u32 val; pci_read_config_dword(pdev, offset, &val); if (!force && val == saved_val) return; for (;;) { pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n", offset, val, saved_val); pci_write_config_dword(pdev, offset, saved_val); if (retry-- <= 0) return; pci_read_config_dword(pdev, offset, &val); if (val == saved_val) return; mdelay(1); } } static void pci_restore_config_space_range(struct pci_dev *pdev, int start, int end, int retry, bool force) { int index; for (index = end; index >= start; index--) pci_restore_config_dword(pdev, 4 * index, pdev->saved_config_space[index], retry, force); } static void pci_restore_config_space(struct pci_dev *pdev) { if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { pci_restore_config_space_range(pdev, 10, 15, 0, false); /* Restore BARs before the command register. */ pci_restore_config_space_range(pdev, 4, 9, 10, false); pci_restore_config_space_range(pdev, 0, 3, 0, false); } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { pci_restore_config_space_range(pdev, 12, 15, 0, false); /* * Force rewriting of prefetch registers to avoid S3 resume * issues on Intel PCI bridges that occur when these * registers are not explicitly written. */ pci_restore_config_space_range(pdev, 9, 11, 0, true); pci_restore_config_space_range(pdev, 0, 8, 0, false); } else { pci_restore_config_space_range(pdev, 0, 15, 0, false); } } /** * pci_restore_state - Restore the saved state of a PCI device * @dev: PCI device that we're dealing with */ void pci_restore_state(struct pci_dev *dev) { pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); pci_restore_pri_state(dev); pci_restore_ats_state(dev); pci_restore_vc_state(dev); pci_restore_rebar_state(dev); pci_restore_dpc_state(dev); pci_restore_ptm_state(dev); pci_restore_tph_state(dev); pci_aer_clear_status(dev); pci_restore_aer_state(dev); pci_restore_config_space(dev); pci_restore_pcix_state(dev); pci_restore_msi_state(dev); /* Restore ACS and IOV configuration state */ pci_enable_acs(dev); pci_restore_iov_state(dev); dev->state_saved = false; } EXPORT_SYMBOL(pci_restore_state); struct pci_saved_state { u32 config_space[16]; struct pci_cap_saved_data cap[]; }; /** * pci_store_saved_state - Allocate and return an opaque struct containing * the device saved state. * @dev: PCI device that we're dealing with * * Return NULL if no state or error. */ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) { struct pci_saved_state *state; struct pci_cap_saved_state *tmp; struct pci_cap_saved_data *cap; size_t size; if (!dev->state_saved) return NULL; size = sizeof(*state) + sizeof(struct pci_cap_saved_data); hlist_for_each_entry(tmp, &dev->saved_cap_space, next) size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; state = kzalloc(size, GFP_KERNEL); if (!state) return NULL; memcpy(state->config_space, dev->saved_config_space, sizeof(state->config_space)); cap = state->cap; hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; memcpy(cap, &tmp->cap, len); cap = (struct pci_cap_saved_data *)((u8 *)cap + len); } /* Empty cap_save terminates list */ return state; } EXPORT_SYMBOL_GPL(pci_store_saved_state); /** * pci_load_saved_state - Reload the provided save state into struct pci_dev. * @dev: PCI device that we're dealing with * @state: Saved state returned from pci_store_saved_state() */ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) { struct pci_cap_saved_data *cap; dev->state_saved = false; if (!state) return 0; memcpy(dev->saved_config_space, state->config_space, sizeof(state->config_space)); cap = state->cap; while (cap->size) { struct pci_cap_saved_state *tmp; tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended); if (!tmp || tmp->cap.size != cap->size) return -EINVAL; memcpy(tmp->cap.data, cap->data, tmp->cap.size); cap = (struct pci_cap_saved_data *)((u8 *)cap + sizeof(struct pci_cap_saved_data) + cap->size); } dev->state_saved = true; return 0; } EXPORT_SYMBOL_GPL(pci_load_saved_state); /** * pci_load_and_free_saved_state - Reload the save state pointed to by state, * and free the memory allocated for it. * @dev: PCI device that we're dealing with * @state: Pointer to saved state returned from pci_store_saved_state() */ int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state) { int ret = pci_load_saved_state(dev, *state); kfree(*state); *state = NULL; return ret; } EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); int __weak pcibios_enable_device(struct pci_dev *dev, int bars) { return pci_enable_resources(dev, bars); } static int pci_host_bridge_enable_device(struct pci_dev *dev) { struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus); int err; if (host_bridge && host_bridge->enable_device) { err = host_bridge->enable_device(host_bridge, dev); if (err) return err; } return 0; } static void pci_host_bridge_disable_device(struct pci_dev *dev) { struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus); if (host_bridge && host_bridge->disable_device) host_bridge->disable_device(host_bridge, dev); } static int do_pci_enable_device(struct pci_dev *dev, int bars) { int err; struct pci_dev *bridge; u16 cmd; u8 pin; err = pci_set_power_state(dev, PCI_D0); if (err < 0 && err != -EIO) return err; bridge = pci_upstream_bridge(dev); if (bridge) pcie_aspm_powersave_config_link(bridge); err = pci_host_bridge_enable_device(dev); if (err) return err; err = pcibios_enable_device(dev, bars); if (err < 0) goto err_enable; pci_fixup_device(pci_fixup_enable, dev); if (dev->msi_enabled || dev->msix_enabled) return 0; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_INTX_DISABLE) pci_write_config_word(dev, PCI_COMMAND, cmd & ~PCI_COMMAND_INTX_DISABLE); } return 0; err_enable: pci_host_bridge_disable_device(dev); return err; } /** * pci_reenable_device - Resume abandoned device * @dev: PCI device to be resumed * * NOTE: This function is a backend of pci_default_resume() and is not supposed * to be called by normal code, write proper resume handler and use it instead. */ int pci_reenable_device(struct pci_dev *dev) { if (pci_is_enabled(dev)) return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); return 0; } EXPORT_SYMBOL(pci_reenable_device); static void pci_enable_bridge(struct pci_dev *dev) { struct pci_dev *bridge; int retval; bridge = pci_upstream_bridge(dev); if (bridge) pci_enable_bridge(bridge); if (pci_is_enabled(dev)) { if (!dev->is_busmaster) pci_set_master(dev); return; } retval = pci_enable_device(dev); if (retval) pci_err(dev, "Error enabling bridge (%d), continuing\n", retval); pci_set_master(dev); } static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) { struct pci_dev *bridge; int err; int i, bars = 0; /* * Power state could be unknown at this point, either due to a fresh * boot or a device removal call. So get the current power state * so that things like MSI message writing will behave as expected * (e.g. if the device really is in D0 at enable time). */ pci_update_current_state(dev, dev->current_state); if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ bridge = pci_upstream_bridge(dev); if (bridge) pci_enable_bridge(bridge); /* only skip sriov related */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) if (dev->resource[i].flags & flags) bars |= (1 << i); for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++) if (dev->resource[i].flags & flags) bars |= (1 << i); err = do_pci_enable_device(dev, bars); if (err < 0) atomic_dec(&dev->enable_cnt); return err; } /** * pci_enable_device_mem - Initialize a device for use with Memory space * @dev: PCI device to be initialized * * Initialize device before it's used by a driver. Ask low-level code * to enable Memory resources. Wake up the device if it was suspended. * Beware, this function can fail. */ int pci_enable_device_mem(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_MEM); } EXPORT_SYMBOL(pci_enable_device_mem); /** * pci_enable_device - Initialize device before it's used by a driver. * @dev: PCI device to be initialized * * Initialize device before it's used by a driver. Ask low-level code * to enable I/O and memory. Wake up the device if it was suspended. * Beware, this function can fail. * * Note we don't actually enable the device many times if we call * this function repeatedly (we just increment the count). */ int pci_enable_device(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); } EXPORT_SYMBOL(pci_enable_device); /* * pcibios_device_add - provide arch specific hooks when adding device dev * @dev: the PCI device being added * * Permits the platform to provide architecture specific functionality when * devices are added. This is the default implementation. Architecture * implementations can override this. */ int __weak pcibios_device_add(struct pci_dev *dev) { return 0; } /** * pcibios_release_device - provide arch specific hooks when releasing * device dev * @dev: the PCI device being released * * Permits the platform to provide architecture specific functionality when * devices are released. This is the default implementation. Architecture * implementations can override this. */ void __weak pcibios_release_device(struct pci_dev *dev) {} /** * pcibios_disable_device - disable arch specific PCI resources for device dev * @dev: the PCI device to disable * * Disables architecture specific PCI resources for the device. This * is the default implementation. Architecture implementations can * override this. */ void __weak pcibios_disable_device(struct pci_dev *dev) {} static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; pci_read_config_word(dev, PCI_COMMAND, &pci_command); if (pci_command & PCI_COMMAND_MASTER) { pci_command &= ~PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); } pcibios_disable_device(dev); } /** * pci_disable_enabled_device - Disable device without updating enable_cnt * @dev: PCI device to disable * * NOTE: This function is a backend of PCI power management routines and is * not supposed to be called drivers. */ void pci_disable_enabled_device(struct pci_dev *dev) { if (pci_is_enabled(dev)) do_pci_disable_device(dev); } /** * pci_disable_device - Disable PCI device after use * @dev: PCI device to be disabled * * Signal to the system that the PCI device is not in use by the system * anymore. This only involves disabling PCI bus-mastering, if active. * * Note we don't actually disable the device until all callers of * pci_enable_device() have called pci_disable_device(). */ void pci_disable_device(struct pci_dev *dev) { dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, "disabling already-disabled device"); if (atomic_dec_return(&dev->enable_cnt) != 0) return; pci_host_bridge_disable_device(dev); do_pci_disable_device(dev); dev->is_busmaster = 0; } EXPORT_SYMBOL(pci_disable_device); /** * pcibios_set_pcie_reset_state - set reset state for device dev * @dev: the PCIe device reset * @state: Reset state to enter into * * Set the PCIe reset state for the device. This is the default * implementation. Architecture implementations can override this. */ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) { return -EINVAL; } /** * pci_set_pcie_reset_state - set reset state for device dev * @dev: the PCIe device reset * @state: Reset state to enter into * * Sets the PCI reset state for the device. */ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) { return pcibios_set_pcie_reset_state(dev, state); } EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); #ifdef CONFIG_PCIEAER void pcie_clear_device_status(struct pci_dev *dev) { u16 sta; pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); } #endif /** * pcie_clear_root_pme_status - Clear root port PME interrupt status. * @dev: PCIe root port or event collector. */ void pcie_clear_root_pme_status(struct pci_dev *dev) { pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME); } /** * pci_check_pme_status - Check if given device has generated PME. * @dev: Device to check. * * Check the PME status of the device and if set, clear it and clear PME enable * (if set). Return 'true' if PME status and PME enable were both set or * 'false' otherwise. */ bool pci_check_pme_status(struct pci_dev *dev) { int pmcsr_pos; u16 pmcsr; bool ret = false; if (!dev->pm_cap) return false; pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; pci_read_config_word(dev, pmcsr_pos, &pmcsr); if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) return false; /* Clear PME status. */ pmcsr |= PCI_PM_CTRL_PME_STATUS; if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { /* Disable PME to avoid interrupt flood. */ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; ret = true; } pci_write_config_word(dev, pmcsr_pos, pmcsr); return ret; } /** * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. * @dev: Device to handle. * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. * * Check if @dev has generated PME and queue a resume request for it in that * case. */ static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) { if (pme_poll_reset && dev->pme_poll) dev->pme_poll = false; if (pci_check_pme_status(dev)) { pci_wakeup_event(dev); pm_request_resume(&dev->dev); } return 0; } /** * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. * @bus: Top bus of the subtree to walk. */ void pci_pme_wakeup_bus(struct pci_bus *bus) { if (bus) pci_walk_bus(bus, pci_pme_wakeup, (void *)true); } /** * pci_pme_capable - check the capability of PCI device to generate PME# * @dev: PCI device to handle. * @state: PCI state from which device will issue PME#. */ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) { if (!dev->pm_cap) return false; return !!(dev->pme_support & (1 << state)); } EXPORT_SYMBOL(pci_pme_capable); static void pci_pme_list_scan(struct work_struct *work) { struct pci_pme_device *pme_dev, *n; mutex_lock(&pci_pme_list_mutex); list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { struct pci_dev *pdev = pme_dev->dev; if (pdev->pme_poll) { struct pci_dev *bridge = pdev->bus->self; struct device *dev = &pdev->dev; struct device *bdev = bridge ? &bridge->dev : NULL; int bref = 0; /* * If we have a bridge, it should be in an active/D0 * state or the configuration space of subordinate * devices may not be accessible or stable over the * course of the call. */ if (bdev) { bref = pm_runtime_get_if_active(bdev); if (!bref) continue; if (bridge->current_state != PCI_D0) goto put_bridge; } /* * The device itself should be suspended but config * space must be accessible, therefore it cannot be in * D3cold. */ if (pm_runtime_suspended(dev) && pdev->current_state != PCI_D3cold) pci_pme_wakeup(pdev, NULL); put_bridge: if (bref > 0) pm_runtime_put(bdev); } else { list_del(&pme_dev->list); kfree(pme_dev); } } if (!list_empty(&pci_pme_list)) queue_delayed_work(system_freezable_wq, &pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); mutex_unlock(&pci_pme_list_mutex); } static void __pci_pme_active(struct pci_dev *dev, bool enable) { u16 pmcsr; if (!dev->pme_support) return; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); /* Clear PME_Status by writing 1 to it and enable PME# */ pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; if (!enable) pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); } /** * pci_pme_restore - Restore PME configuration after config space restore. * @dev: PCI device to update. */ void pci_pme_restore(struct pci_dev *dev) { u16 pmcsr; if (!dev->pme_support) return; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (dev->wakeup_prepared) { pmcsr |= PCI_PM_CTRL_PME_ENABLE; pmcsr &= ~PCI_PM_CTRL_PME_STATUS; } else { pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; pmcsr |= PCI_PM_CTRL_PME_STATUS; } pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); } /** * pci_pme_active - enable or disable PCI device's PME# function * @dev: PCI device to handle. * @enable: 'true' to enable PME# generation; 'false' to disable it. * * The caller must verify that the device is capable of generating PME# before * calling this function with @enable equal to 'true'. */ void pci_pme_active(struct pci_dev *dev, bool enable) { __pci_pme_active(dev, enable); /* * PCI (as opposed to PCIe) PME requires that the device have * its PME# line hooked up correctly. Not all hardware vendors * do this, so the PME never gets delivered and the device * remains asleep. The easiest way around this is to * periodically walk the list of suspended devices and check * whether any have their PME flag set. The assumption is that * we'll wake up often enough anyway that this won't be a huge * hit, and the power savings from the devices will still be a * win. * * Although PCIe uses in-band PME message instead of PME# line * to report PME, PME does not work for some PCIe devices in * reality. For example, there are devices that set their PME * status bits, but don't really bother to send a PME message; * there are PCI Express Root Ports that don't bother to * trigger interrupts when they receive PME messages from the * devices below. So PME poll is used for PCIe devices too. */ if (dev->pme_poll) { struct pci_pme_device *pme_dev; if (enable) { pme_dev = kmalloc(sizeof(struct pci_pme_device), GFP_KERNEL); if (!pme_dev) { pci_warn(dev, "can't enable PME#\n"); return; } pme_dev->dev = dev; mutex_lock(&pci_pme_list_mutex); list_add(&pme_dev->list, &pci_pme_list); if (list_is_singular(&pci_pme_list)) queue_delayed_work(system_freezable_wq, &pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); mutex_unlock(&pci_pme_list_mutex); } else { mutex_lock(&pci_pme_list_mutex); list_for_each_entry(pme_dev, &pci_pme_list, list) { if (pme_dev->dev == dev) { list_del(&pme_dev->list); kfree(pme_dev); break; } } mutex_unlock(&pci_pme_list_mutex); } } pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled"); } EXPORT_SYMBOL(pci_pme_active); /** * __pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected * @state: PCI state from which device will issue wakeup events * @enable: True to enable event generation; false to disable * * This enables the device as a wakeup event source, or disables it. * When such events involves platform-specific hooks, those hooks are * called automatically by this routine. * * Devices with legacy power management (no standard PCI PM capabilities) * always require such platform hooks. * * RETURN VALUE: * 0 is returned on success * -EINVAL is returned if device is not supposed to wake up the system * Error code depending on the platform is returned if both the platform and * the native mechanism fail to enable the generation of wake-up events */ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { int ret = 0; /* * Bridges that are not power-manageable directly only signal * wakeup on behalf of subordinate devices which is set up * elsewhere, so skip them. However, bridges that are * power-manageable may signal wakeup for themselves (for example, * on a hotplug event) and they need to be covered here. */ if (!pci_power_manageable(dev)) return 0; /* Don't do the same thing twice in a row for one device. */ if (!!enable == !!dev->wakeup_prepared) return 0; /* * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don * Anderson we should be doing PME# wake enable followed by ACPI wake * enable. To disable wake-up we call the platform first, for symmetry. */ if (enable) { int error; /* * Enable PME signaling if the device can signal PME from * D3cold regardless of whether or not it can signal PME from * the current target state, because that will allow it to * signal PME when the hierarchy above it goes into D3cold and * the device itself ends up in D3cold as a result of that. */ if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) pci_pme_active(dev, true); else ret = 1; error = platform_pci_set_wakeup(dev, true); if (ret) ret = error; if (!ret) dev->wakeup_prepared = true; } else { platform_pci_set_wakeup(dev, false); pci_pme_active(dev, false); dev->wakeup_prepared = false; } return ret; } /** * pci_enable_wake - change wakeup settings for a PCI device * @pci_dev: Target device * @state: PCI state from which device will issue wakeup events * @enable: Whether or not to enable event generation * * If @enable is set, check device_may_wakeup() for the device before calling * __pci_enable_wake() for it. */ int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) { if (enable && !device_may_wakeup(&pci_dev->dev)) return -EINVAL; return __pci_enable_wake(pci_dev, state, enable); } EXPORT_SYMBOL(pci_enable_wake); /** * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold * @dev: PCI device to prepare * @enable: True to enable wake-up event generation; false to disable * * Many drivers want the device to wake up the system from D3_hot or D3_cold * and this function allows them to set that up cleanly - pci_enable_wake() * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI * ordering constraints. * * This function only returns error code if the device is not allowed to wake * up the system from sleep or it is not capable of generating PME# from both * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. */ int pci_wake_from_d3(struct pci_dev *dev, bool enable) { return pci_pme_capable(dev, PCI_D3cold) ? pci_enable_wake(dev, PCI_D3cold, enable) : pci_enable_wake(dev, PCI_D3hot, enable); } EXPORT_SYMBOL(pci_wake_from_d3); /** * pci_target_state - find an appropriate low power state for a given PCI dev * @dev: PCI device * @wakeup: Whether or not wakeup functionality will be enabled for the device. * * Use underlying platform code to find a supported low power state for @dev. * If the platform can't manage @dev, return the deepest state from which it * can generate wake events, based on any available PME info. */ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) { if (platform_pci_power_manageable(dev)) { /* * Call the platform to find the target state for the device. */ pci_power_t state = platform_pci_choose_state(dev); switch (state) { case PCI_POWER_ERROR: case PCI_UNKNOWN: return PCI_D3hot; case PCI_D1: case PCI_D2: if (pci_no_d1d2(dev)) return PCI_D3hot; } return state; } /* * If the device is in D3cold even though it's not power-manageable by * the platform, it may have been powered down by non-standard means. * Best to let it slumber. */ if (dev->current_state == PCI_D3cold) return PCI_D3cold; else if (!dev->pm_cap) return PCI_D0; if (wakeup && dev->pme_support) { pci_power_t state = PCI_D3hot; /* * Find the deepest state from which the device can generate * PME#. */ while (state && !(dev->pme_support & (1 << state))) state--; if (state) return state; else if (dev->pme_support & 1) return PCI_D0; } return PCI_D3hot; } /** * pci_prepare_to_sleep - prepare PCI device for system-wide transition * into a sleep state * @dev: Device to handle. * * Choose the power state appropriate for the device depending on whether * it can wake up the system and/or is power manageable by the platform * (PCI_D3hot is the default) and put the device into that state. */ int pci_prepare_to_sleep(struct pci_dev *dev) { bool wakeup = device_may_wakeup(&dev->dev); pci_power_t target_state = pci_target_state(dev, wakeup); int error; if (target_state == PCI_POWER_ERROR) return -EIO; pci_enable_wake(dev, target_state, wakeup); error = pci_set_power_state(dev, target_state); if (error) pci_enable_wake(dev, target_state, false); return error; } EXPORT_SYMBOL(pci_prepare_to_sleep); /** * pci_back_from_sleep - turn PCI device on during system-wide transition * into working state * @dev: Device to handle. * * Disable device's system wake-up capability and put it into D0. */ int pci_back_from_sleep(struct pci_dev *dev) { int ret = pci_set_power_state(dev, PCI_D0); if (ret) return ret; pci_enable_wake(dev, PCI_D0, false); return 0; } EXPORT_SYMBOL(pci_back_from_sleep); /** * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. * @dev: PCI device being suspended. * * Prepare @dev to generate wake-up events at run time and put it into a low * power state. */ int pci_finish_runtime_suspend(struct pci_dev *dev) { pci_power_t target_state; int error; target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); if (target_state == PCI_POWER_ERROR) return -EIO; __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); error = pci_set_power_state(dev, target_state); if (error) pci_enable_wake(dev, target_state, false); return error; } /** * pci_dev_run_wake - Check if device can generate run-time wake-up events. * @dev: Device to check. * * Return true if the device itself is capable of generating wake-up events * (through the platform or using the native PCIe PME) or if the device supports * PME and one of its upstream bridges can generate wake-up events. */ bool pci_dev_run_wake(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; if (!dev->pme_support) return false; /* PME-capable in principle, but not from the target power state */ if (!pci_pme_capable(dev, pci_target_state(dev, true))) return false; if (device_can_wakeup(&dev->dev)) return true; while (bus->parent) { struct pci_dev *bridge = bus->self; if (device_can_wakeup(&bridge->dev)) return true; bus = bus->parent; } /* We have reached the root bus. */ if (bus->bridge) return device_can_wakeup(bus->bridge); return false; } EXPORT_SYMBOL_GPL(pci_dev_run_wake); /** * pci_dev_need_resume - Check if it is necessary to resume the device. * @pci_dev: Device to check. * * Return 'true' if the device is not runtime-suspended or it has to be * reconfigured due to wakeup settings difference between system and runtime * suspend, or the current power state of it is not suitable for the upcoming * (system-wide) transition. */ bool pci_dev_need_resume(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; pci_power_t target_state; if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev)) return true; target_state = pci_target_state(pci_dev, device_may_wakeup(dev)); /* * If the earlier platform check has not triggered, D3cold is just power * removal on top of D3hot, so no need to resume the device in that * case. */ return target_state != pci_dev->current_state && target_state != PCI_D3cold && pci_dev->current_state != PCI_D3hot; } /** * pci_dev_adjust_pme - Adjust PME setting for a suspended device. * @pci_dev: Device to check. * * If the device is suspended and it is not configured for system wakeup, * disable PME for it to prevent it from waking up the system unnecessarily. * * Note that if the device's power state is D3cold and the platform check in * pci_dev_need_resume() has not triggered, the device's configuration need not * be changed. */ void pci_dev_adjust_pme(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; spin_lock_irq(&dev->power.lock); if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) && pci_dev->current_state < PCI_D3cold) __pci_pme_active(pci_dev, false); spin_unlock_irq(&dev->power.lock); } /** * pci_dev_complete_resume - Finalize resume from system sleep for a device. * @pci_dev: Device to handle. * * If the device is runtime suspended and wakeup-capable, enable PME for it as * it might have been disabled during the prepare phase of system suspend if * the device was not configured for system wakeup. */ void pci_dev_complete_resume(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; if (!pci_dev_run_wake(pci_dev)) return; spin_lock_irq(&dev->power.lock); if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold) __pci_pme_active(pci_dev, true); spin_unlock_irq(&dev->power.lock); } /** * pci_choose_state - Choose the power state of a PCI device. * @dev: Target PCI device. * @state: Target state for the whole system. * * Returns PCI power state suitable for @dev and @state. */ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { if (state.event == PM_EVENT_ON) return PCI_D0; return pci_target_state(dev, false); } EXPORT_SYMBOL(pci_choose_state); void pci_config_pm_runtime_get(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct device *parent = dev->parent; if (parent) pm_runtime_get_sync(parent); pm_runtime_get_noresume(dev); /* * pdev->current_state is set to PCI_D3cold during suspending, * so wait until suspending completes */ pm_runtime_barrier(dev); /* * Only need to resume devices in D3cold, because config * registers are still accessible for devices suspended but * not in D3cold. */ if (pdev->current_state == PCI_D3cold) pm_runtime_resume(dev); } void pci_config_pm_runtime_put(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct device *parent = dev->parent; pm_runtime_put(dev); if (parent) pm_runtime_put_sync(parent); } static const struct dmi_system_id bridge_d3_blacklist[] = { #ifdef CONFIG_X86 { /* * Gigabyte X299 root port is not marked as hotplug capable * which allows Linux to power manage it. However, this * confuses the BIOS SMI handler so don't power manage root * ports on that system. */ .ident = "X299 DESIGNARE EX-CF", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), }, }, { /* * Downstream device is not accessible after putting a root port * into D3cold and back into D0 on Elo Continental Z2 board */ .ident = "Elo Continental Z2", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), }, }, { /* * Changing power state of root port dGPU is connected fails * https://gitlab.freedesktop.org/drm/amd/-/issues/3229 */ .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BOARD_NAME, "1972"), DMI_MATCH(DMI_BOARD_VERSION, "95.33"), }, }, #endif { } }; /** * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check * * Currently we only allow D3 for some PCIe ports and for Thunderbolt. * * Return: Whether it is possible to move the bridge to D3. * * The return value is guaranteed to be constant across the entire lifetime * of the bridge, including its hot-removal. */ bool pci_bridge_d3_possible(struct pci_dev *bridge) { if (!pci_is_pcie(bridge)) return false; switch (pci_pcie_type(bridge)) { case PCI_EXP_TYPE_ROOT_PORT: case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_DOWNSTREAM: if (pci_bridge_d3_disable) return false; /* * Hotplug ports handled by platform firmware may not be put * into D3 by the OS, e.g. ACPI slots ... */ if (bridge->is_hotplug_bridge && !bridge->is_pciehp) return false; /* ... or PCIe hotplug ports not handled natively by the OS. */ if (bridge->is_pciehp && !pciehp_is_native(bridge)) return false; if (pci_bridge_d3_force) return true; /* Even the oldest 2010 Thunderbolt controller supports D3. */ if (bridge->is_thunderbolt) return true; /* Platform might know better if the bridge supports D3 */ if (platform_pci_bridge_d3(bridge)) return true; /* * Hotplug ports handled natively by the OS were not validated * by vendors for runtime D3 at least until 2018 because there * was no OS support. */ if (bridge->is_pciehp) return false; if (dmi_check_system(bridge_d3_blacklist)) return false; /* * Out of caution, we only allow PCIe ports from 2015 or newer * into D3 on x86. */ if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015) return true; break; } return false; } static int pci_dev_check_d3cold(struct pci_dev *dev, void *data) { bool *d3cold_ok = data; if (/* The device needs to be allowed to go D3cold ... */ dev->no_d3cold || !dev->d3cold_allowed || /* ... and if it is wakeup capable to do so from D3cold. */ (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) || /* If it is a bridge it must be allowed to go to D3. */ !pci_power_manageable(dev)) *d3cold_ok = false; return !*d3cold_ok; } /* * pci_bridge_d3_update - Update bridge D3 capabilities * @dev: PCI device which is changed * * Update upstream bridge PM capabilities accordingly depending on if the * device PM configuration was changed or the device is being removed. The * change is also propagated upstream. */ void pci_bridge_d3_update(struct pci_dev *dev) { bool remove = !device_is_registered(&dev->dev); struct pci_dev *bridge; bool d3cold_ok = true; bridge = pci_upstream_bridge(dev); if (!bridge || !pci_bridge_d3_possible(bridge)) return; /* * If D3 is currently allowed for the bridge, removing one of its * children won't change that. */ if (remove && bridge->bridge_d3) return; /* * If D3 is currently allowed for the bridge and a child is added or * changed, disallowance of D3 can only be caused by that child, so * we only need to check that single device, not any of its siblings. * * If D3 is currently not allowed for the bridge, checking the device * first may allow us to skip checking its siblings. */ if (!remove) pci_dev_check_d3cold(dev, &d3cold_ok); /* * If D3 is currently not allowed for the bridge, this may be caused * either by the device being changed/removed or any of its siblings, * so we need to go through all children to find out if one of them * continues to block D3. */ if (d3cold_ok && !bridge->bridge_d3) pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold, &d3cold_ok); if (bridge->bridge_d3 != d3cold_ok) { bridge->bridge_d3 = d3cold_ok; /* Propagate change to upstream bridges */ pci_bridge_d3_update(bridge); } } /** * pci_d3cold_enable - Enable D3cold for device * @dev: PCI device to handle * * This function can be used in drivers to enable D3cold from the device * they handle. It also updates upstream PCI bridge PM capabilities * accordingly. */ void pci_d3cold_enable(struct pci_dev *dev) { if (dev->no_d3cold) { dev->no_d3cold = false; pci_bridge_d3_update(dev); } } EXPORT_SYMBOL_GPL(pci_d3cold_enable); /** * pci_d3cold_disable - Disable D3cold for device * @dev: PCI device to handle * * This function can be used in drivers to disable D3cold from the device * they handle. It also updates upstream PCI bridge PM capabilities * accordingly. */ void pci_d3cold_disable(struct pci_dev *dev) { if (!dev->no_d3cold) { dev->no_d3cold = true; pci_bridge_d3_update(dev); } } EXPORT_SYMBOL_GPL(pci_d3cold_disable); void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev) { pci_power_up(pci_dev); pci_update_current_state(pci_dev, PCI_D0); } /** * pci_pm_init - Initialize PM functions of given PCI device * @dev: PCI device to handle. */ void pci_pm_init(struct pci_dev *dev) { int pm; u16 pmc; device_enable_async_suspend(&dev->dev); dev->wakeup_prepared = false; dev->pm_cap = 0; dev->pme_support = 0; /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); if (!pm) goto poweron; /* Check device's ability to generate PME# */ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { pci_err(dev, "unsupported PM cap regs version (%u)\n", pmc & PCI_PM_CAP_VER_MASK); goto poweron; } dev->pm_cap = pm; dev->d3hot_delay = PCI_PM_D3HOT_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; dev->bridge_d3 = pci_bridge_d3_possible(dev); dev->d3cold_allowed = true; dev->d1_support = false; dev->d2_support = false; if (!pci_no_d1d2(dev)) { if (pmc & PCI_PM_CAP_D1) dev->d1_support = true; if (pmc & PCI_PM_CAP_D2) dev->d2_support = true; if (dev->d1_support || dev->d2_support) pci_info(dev, "supports%s%s\n", dev->d1_support ? " D1" : "", dev->d2_support ? " D2" : ""); } pmc &= PCI_PM_CAP_PME_MASK; if (pmc) { pci_info(dev, "PME# supported from%s%s%s%s%s\n", (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "", (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc); dev->pme_poll = true; /* * Make device's PM flags reflect the wake-up capability, but * let the user space enable it to wake up the system as needed. */ device_set_wakeup_capable(&dev->dev, true); /* Disable the PME# generation functionality */ pci_pme_active(dev, false); } poweron: pci_pm_power_up_and_verify_state(dev); pm_runtime_forbid(&dev->dev); pm_runtime_set_active(&dev->dev); pm_runtime_enable(&dev->dev); } static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) { unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI; switch (prop) { case PCI_EA_P_MEM: case PCI_EA_P_VF_MEM: flags |= IORESOURCE_MEM; break; case PCI_EA_P_MEM_PREFETCH: case PCI_EA_P_VF_MEM_PREFETCH: flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; break; case PCI_EA_P_IO: flags |= IORESOURCE_IO; break; default: return 0; } return flags; } static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei, u8 prop) { if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO) return &dev->resource[bei]; #ifdef CONFIG_PCI_IOV else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 && (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH)) return &dev->resource[PCI_IOV_RESOURCES + bei - PCI_EA_BEI_VF_BAR0]; #endif else if (bei == PCI_EA_BEI_ROM) return &dev->resource[PCI_ROM_RESOURCE]; else return NULL; } /* Read an Enhanced Allocation (EA) entry */ static int pci_ea_read(struct pci_dev *dev, int offset) { struct resource *res; const char *res_name; int ent_size, ent_offset = offset; resource_size_t start, end; unsigned long flags; u32 dw0, bei, base, max_offset; u8 prop; bool support_64 = (sizeof(resource_size_t) >= 8); pci_read_config_dword(dev, ent_offset, &dw0); ent_offset += 4; /* Entry size field indicates DWORDs after 1st */ ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2; if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */ goto out; bei = FIELD_GET(PCI_EA_BEI, dw0); prop = FIELD_GET(PCI_EA_PP, dw0); /* * If the Property is in the reserved range, try the Secondary * Property instead. */ if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED) prop = FIELD_GET(PCI_EA_SP, dw0); if (prop > PCI_EA_P_BRIDGE_IO) goto out; res = pci_ea_get_resource(dev, bei, prop); res_name = pci_resource_name(dev, bei); if (!res) { pci_err(dev, "Unsupported EA entry BEI: %u\n", bei); goto out; } flags = pci_ea_flags(dev, prop); if (!flags) { pci_err(dev, "Unsupported EA properties: %#x\n", prop); goto out; } /* Read Base */ pci_read_config_dword(dev, ent_offset, &base); start = (base & PCI_EA_FIELD_MASK); ent_offset += 4; /* Read MaxOffset */ pci_read_config_dword(dev, ent_offset, &max_offset); ent_offset += 4; /* Read Base MSBs (if 64-bit entry) */ if (base & PCI_EA_IS_64) { u32 base_upper; pci_read_config_dword(dev, ent_offset, &base_upper); ent_offset += 4; flags |= IORESOURCE_MEM_64; /* entry starts above 32-bit boundary, can't use */ if (!support_64 && base_upper) goto out; if (support_64) start |= ((u64)base_upper << 32); } end = start + (max_offset | 0x03); /* Read MaxOffset MSBs (if 64-bit entry) */ if (max_offset & PCI_EA_IS_64) { u32 max_offset_upper; pci_read_config_dword(dev, ent_offset, &max_offset_upper); ent_offset += 4; flags |= IORESOURCE_MEM_64; /* entry too big, can't use */ if (!support_64 && max_offset_upper) goto out; if (support_64) end += ((u64)max_offset_upper << 32); } if (end < start) { pci_err(dev, "EA Entry crosses address boundary\n"); goto out; } if (ent_size != ent_offset - offset) { pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n", ent_size, ent_offset - offset); goto out; } res->name = pci_name(dev); res->start = start; res->end = end; res->flags = flags; if (bei <= PCI_EA_BEI_BAR5) pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", res_name, res, prop); else if (bei == PCI_EA_BEI_ROM) pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", res_name, res, prop); else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", res_name, res, prop); else pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n", bei, res, prop); out: return offset + ent_size; } /* Enhanced Allocation Initialization */ void pci_ea_init(struct pci_dev *dev) { int ea; u8 num_ent; int offset; int i; /* find PCI EA capability in list */ ea = pci_find_capability(dev, PCI_CAP_ID_EA); if (!ea) return; /* determine the number of entries */ pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT, &num_ent); num_ent &= PCI_EA_NUM_ENT_MASK; offset = ea + PCI_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) offset += 4; /* parse each EA entry */ for (i = 0; i < num_ent; ++i) offset = pci_ea_read(dev, offset); } static void pci_add_saved_cap(struct pci_dev *pci_dev, struct pci_cap_saved_state *new_cap) { hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); } /** * _pci_add_cap_save_buffer - allocate buffer for saving given * capability registers * @dev: the PCI device * @cap: the capability to allocate the buffer for * @extended: Standard or Extended capability ID * @size: requested size of the buffer */ static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap, bool extended, unsigned int size) { int pos; struct pci_cap_saved_state *save_state; if (extended) pos = pci_find_ext_capability(dev, cap); else pos = pci_find_capability(dev, cap); if (!pos) return 0; save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); if (!save_state) return -ENOMEM; save_state->cap.cap_nr = cap; save_state->cap.cap_extended = extended; save_state->cap.size = size; pci_add_saved_cap(dev, save_state); return 0; } int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size) { return _pci_add_cap_save_buffer(dev, cap, false, size); } int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size) { return _pci_add_cap_save_buffer(dev, cap, true, size); } /** * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities * @dev: the PCI device */ void pci_allocate_cap_save_buffers(struct pci_dev *dev) { int error; error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, PCI_EXP_SAVE_REGS * sizeof(u16)); if (error) pci_err(dev, "unable to preallocate PCI Express save buffer\n"); error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); if (error) pci_err(dev, "unable to preallocate PCI-X save buffer\n"); error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, 2 * sizeof(u16)); if (error) pci_err(dev, "unable to allocate suspend buffer for LTR\n"); pci_allocate_vc_save_buffers(dev); } void pci_free_cap_save_buffers(struct pci_dev *dev) { struct pci_cap_saved_state *tmp; struct hlist_node *n; hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) kfree(tmp); } /** * pci_configure_ari - enable or disable ARI forwarding * @dev: the PCI device * * If @dev and its upstream bridge both support ARI, enable ARI in the * bridge. Otherwise, disable ARI in the bridge. */ void pci_configure_ari(struct pci_dev *dev) { u32 cap; struct pci_dev *bridge; if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) return; bridge = dev->bus->self; if (!bridge) return; pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); if (!(cap & PCI_EXP_DEVCAP2_ARI)) return; if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) { pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI); bridge->ari_enabled = 1; } else { pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI); bridge->ari_enabled = 0; } } static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) { int pos; u16 cap, ctrl; pos = pdev->acs_cap; if (!pos) return false; /* * Except for egress control, capabilities are either required * or only required if controllable. Features missing from the * capability field can therefore be assumed as hard-wired enabled. */ pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap); acs_flags &= (cap | PCI_ACS_EC); pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); return (ctrl & acs_flags) == acs_flags; } /** * pci_acs_enabled - test ACS against required flags for a given device * @pdev: device to test * @acs_flags: required PCI ACS flags * * Return true if the device supports the provided flags. Automatically * filters out flags that are not implemented on multifunction devices. * * Note that this interface checks the effective ACS capabilities of the * device rather than the actual capabilities. For instance, most single * function endpoints are not required to support ACS because they have no * opportunity for peer-to-peer access. We therefore return 'true' * regardless of whether the device exposes an ACS capability. This makes * it much easier for callers of this function to ignore the actual type * or topology of the device when testing ACS support. */ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) { int ret; ret = pci_dev_specific_acs_enabled(pdev, acs_flags); if (ret >= 0) return ret > 0; /* * Conventional PCI and PCI-X devices never support ACS, either * effectively or actually. The shared bus topology implies that * any device on the bus can receive or snoop DMA. */ if (!pci_is_pcie(pdev)) return false; switch (pci_pcie_type(pdev)) { /* * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, * but since their primary interface is PCI/X, we conservatively * handle them as we would a non-PCIe device. */ case PCI_EXP_TYPE_PCIE_BRIDGE: /* * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never * applicable... must never implement an ACS Extended Capability...". * This seems arbitrary, but we take a conservative interpretation * of this statement. */ case PCI_EXP_TYPE_PCI_BRIDGE: case PCI_EXP_TYPE_RC_EC: return false; /* * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should * implement ACS in order to indicate their peer-to-peer capabilities, * regardless of whether they are single- or multi-function devices. */ case PCI_EXP_TYPE_DOWNSTREAM: case PCI_EXP_TYPE_ROOT_PORT: return pci_acs_flags_enabled(pdev, acs_flags); /* * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be * implemented by the remaining PCIe types to indicate peer-to-peer * capabilities, but only when they are part of a multifunction * device. The footnote for section 6.12 indicates the specific * PCIe types included here. */ case PCI_EXP_TYPE_ENDPOINT: case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_LEG_END: case PCI_EXP_TYPE_RC_END: if (!pdev->multifunction) break; return pci_acs_flags_enabled(pdev, acs_flags); } /* * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable * to single function devices with the exception of downstream ports. */ return true; } /** * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy * @start: starting downstream device * @end: ending upstream device or NULL to search to the root bus * @acs_flags: required flags * * Walk up a device tree from start to end testing PCI ACS support. If * any step along the way does not support the required flags, return false. */ bool pci_acs_path_enabled(struct pci_dev *start, struct pci_dev *end, u16 acs_flags) { struct pci_dev *pdev, *parent = start; do { pdev = parent; if (!pci_acs_enabled(pdev, acs_flags)) return false; if (pci_is_root_bus(pdev->bus)) return (end == NULL); parent = pdev->bus->self; } while (pdev != end); return true; } /** * pci_acs_init - Initialize ACS if hardware supports it * @dev: the PCI device */ void pci_acs_init(struct pci_dev *dev) { dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); /* * Attempt to enable ACS regardless of capability because some Root * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have * the standard ACS capability but still support ACS via those * quirks. */ pci_enable_acs(dev); } /** * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port * @dev: the PCI device * @cap_mask: mask of desired AtomicOp sizes, including one or more of: * PCI_EXP_DEVCAP2_ATOMIC_COMP32 * PCI_EXP_DEVCAP2_ATOMIC_COMP64 * PCI_EXP_DEVCAP2_ATOMIC_COMP128 * * Return 0 if all upstream bridges support AtomicOp routing, egress * blocking is disabled on all upstream ports, and the root port supports * the requested completion capabilities (32-bit, 64-bit and/or 128-bit * AtomicOp completion), or negative otherwise. */ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) { struct pci_bus *bus = dev->bus; struct pci_dev *bridge; u32 cap, ctl2; /* * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit * in Device Control 2 is reserved in VFs and the PF value applies * to all associated VFs. */ if (dev->is_virtfn) return -EINVAL; if (!pci_is_pcie(dev)) return -EINVAL; /* * Per PCIe r4.0, sec 6.15, endpoints and root ports may be * AtomicOp requesters. For now, we only support endpoints as * requesters and root ports as completers. No endpoints as * completers, and no peer-to-peer. */ switch (pci_pcie_type(dev)) { case PCI_EXP_TYPE_ENDPOINT: case PCI_EXP_TYPE_LEG_END: case PCI_EXP_TYPE_RC_END: break; default: return -EINVAL; } while (bus->parent) { bridge = bus->self; pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); switch (pci_pcie_type(bridge)) { /* Ensure switch ports support AtomicOp routing */ case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_DOWNSTREAM: if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) return -EINVAL; break; /* Ensure root port supports all the sizes we care about */ case PCI_EXP_TYPE_ROOT_PORT: if ((cap & cap_mask) != cap_mask) return -EINVAL; break; } /* Ensure upstream ports don't block AtomicOps on egress */ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) return -EINVAL; } bus = bus->parent; } pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ATOMIC_REQ); return 0; } EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); /** * pci_release_region - Release a PCI bar * @pdev: PCI device whose resources were previously reserved by * pci_request_region() * @bar: BAR to release * * Releases the PCI I/O and memory resources previously reserved by a * successful call to pci_request_region(). Call this function only * after all use of the PCI regions has ceased. */ void pci_release_region(struct pci_dev *pdev, int bar) { if (!pci_bar_index_is_valid(bar)) return; if (pci_resource_len(pdev, bar) == 0) return; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) release_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) release_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); } EXPORT_SYMBOL(pci_release_region); /** * __pci_request_region - Reserved PCI I/O and memory resource * @pdev: PCI device whose resources are to be reserved * @bar: BAR to be reserved * @name: name of the driver requesting the resource * @exclusive: whether the region access is exclusive or not * * Returns: 0 on success, negative error code on failure. * * Mark the PCI region associated with PCI device @pdev BAR @bar as being * reserved by owner @name. Do not access any address inside the PCI regions * unless this call returns successfully. * * If @exclusive is set, then the region is marked so that userspace * is explicitly not allowed to map the resource via /dev/mem or * sysfs MMIO access. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *name, int exclusive) { if (!pci_bar_index_is_valid(bar)) return -EINVAL; if (pci_resource_len(pdev, bar) == 0) return 0; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { if (!request_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), name)) goto err_out; } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { if (!__request_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), name, exclusive)) goto err_out; } return 0; err_out: pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar, &pdev->resource[bar]); return -EBUSY; } /** * pci_request_region - Reserve PCI I/O and memory resource * @pdev: PCI device whose resources are to be reserved * @bar: BAR to be reserved * @name: name of the driver requesting the resource * * Returns: 0 on success, negative error code on failure. * * Mark the PCI region associated with PCI device @pdev BAR @bar as being * reserved by owner @name. Do not access any address inside the PCI regions * unless this call returns successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int pci_request_region(struct pci_dev *pdev, int bar, const char *name) { return __pci_request_region(pdev, bar, name, 0); } EXPORT_SYMBOL(pci_request_region); /** * pci_release_selected_regions - Release selected PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved * @bars: Bitmask of BARs to be released * * Release selected PCI I/O and memory resources previously reserved. * Call this function only after all use of the PCI regions has ceased. */ void pci_release_selected_regions(struct pci_dev *pdev, int bars) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) if (bars & (1 << i)) pci_release_region(pdev, i); } EXPORT_SYMBOL(pci_release_selected_regions); static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, const char *name, int excl) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) if (bars & (1 << i)) if (__pci_request_region(pdev, i, name, excl)) goto err_out; return 0; err_out: while (--i >= 0) if (bars & (1 << i)) pci_release_region(pdev, i); return -EBUSY; } /** * pci_request_selected_regions - Reserve selected PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @bars: Bitmask of BARs to be requested * @name: Name of the driver requesting the resources * * Returns: 0 on success, negative error code on failure. */ int pci_request_selected_regions(struct pci_dev *pdev, int bars, const char *name) { return __pci_request_selected_regions(pdev, bars, name, 0); } EXPORT_SYMBOL(pci_request_selected_regions); /** * pci_request_selected_regions_exclusive - Request regions exclusively * @pdev: PCI device to request regions from * @bars: bit mask of BARs to request * @name: name of the driver requesting the resources * * Returns: 0 on success, negative error code on failure. */ int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, const char *name) { return __pci_request_selected_regions(pdev, bars, name, IORESOURCE_EXCLUSIVE); } EXPORT_SYMBOL(pci_request_selected_regions_exclusive); /** * pci_release_regions - Release reserved PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved by * pci_request_regions() * * Releases all PCI I/O and memory resources previously reserved by a * successful call to pci_request_regions(). Call this function only * after all use of the PCI regions has ceased. */ void pci_release_regions(struct pci_dev *pdev) { pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1); } EXPORT_SYMBOL(pci_release_regions); /** * pci_request_regions - Reserve PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @name: name of the driver requesting the resources * * Mark all PCI regions associated with PCI device @pdev as being reserved by * owner @name. Do not access any address inside the PCI regions unless this * call returns successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int pci_request_regions(struct pci_dev *pdev, const char *name) { return pci_request_selected_regions(pdev, ((1 << PCI_STD_NUM_BARS) - 1), name); } EXPORT_SYMBOL(pci_request_regions); /** * pci_request_regions_exclusive - Reserve PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @name: name of the driver requesting the resources * * Returns: 0 on success, negative error code on failure. * * Mark all PCI regions associated with PCI device @pdev as being reserved * by owner @name. Do not access any address inside the PCI regions * unless this call returns successfully. * * pci_request_regions_exclusive() will mark the region so that /dev/mem * and the sysfs MMIO access will not be allowed. * * Returns 0 on success, or %EBUSY on error. A warning message is also * printed on failure. */ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name) { return pci_request_selected_regions_exclusive(pdev, ((1 << PCI_STD_NUM_BARS) - 1), name); } EXPORT_SYMBOL(pci_request_regions_exclusive); /* * Record the PCI IO range (expressed as CPU physical address + size). * Return a negative value if an error has occurred, zero otherwise */ int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, resource_size_t size) { int ret = 0; #ifdef PCI_IOBASE struct logic_pio_hwaddr *range; if (!size || addr + size < addr) return -EINVAL; range = kzalloc(sizeof(*range), GFP_ATOMIC); if (!range) return -ENOMEM; range->fwnode = fwnode; range->size = size; range->hw_start = addr; range->flags = LOGIC_PIO_CPU_MMIO; ret = logic_pio_register_range(range); if (ret) kfree(range); /* Ignore duplicates due to deferred probing */ if (ret == -EEXIST) ret = 0; #endif return ret; } phys_addr_t pci_pio_to_address(unsigned long pio) { #ifdef PCI_IOBASE if (pio < MMIO_UPPER_LIMIT) return logic_pio_to_hwaddr(pio); #endif return (phys_addr_t) OF_BAD_ADDR; } EXPORT_SYMBOL_GPL(pci_pio_to_address); unsigned long __weak pci_address_to_pio(phys_addr_t address) { #ifdef PCI_IOBASE return logic_pio_trans_cpuaddr(address); #else if (address > IO_SPACE_LIMIT) return (unsigned long)-1; return (unsigned long) address; #endif } /** * pci_remap_iospace - Remap the memory mapped I/O space * @res: Resource describing the I/O space * @phys_addr: physical address of range to be mapped * * Remap the memory mapped I/O space described by the @res and the CPU * physical address @phys_addr into virtual address space. Only * architectures that have memory mapped IO functions defined (and the * PCI_IOBASE value defined) should call this function. */ #ifndef pci_remap_iospace int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) { #if defined(PCI_IOBASE) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; if (!(res->flags & IORESOURCE_IO)) return -EINVAL; if (res->end > IO_SPACE_LIMIT) return -EINVAL; return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr, pgprot_device(PAGE_KERNEL)); #else /* * This architecture does not have memory mapped I/O space, * so this function should never be called */ WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); return -ENODEV; #endif } EXPORT_SYMBOL(pci_remap_iospace); #endif /** * pci_unmap_iospace - Unmap the memory mapped I/O space * @res: resource to be unmapped * * Unmap the CPU virtual address @res from virtual address space. Only * architectures that have memory mapped IO functions defined (and the * PCI_IOBASE value defined) should call this function. */ void pci_unmap_iospace(struct resource *res) { #if defined(PCI_IOBASE) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; vunmap_range(vaddr, vaddr + resource_size(res)); #endif } EXPORT_SYMBOL(pci_unmap_iospace); static void __pci_set_master(struct pci_dev *dev, bool enable) { u16 old_cmd, cmd; pci_read_config_word(dev, PCI_COMMAND, &old_cmd); if (enable) cmd = old_cmd | PCI_COMMAND_MASTER; else cmd = old_cmd & ~PCI_COMMAND_MASTER; if (cmd != old_cmd) { pci_dbg(dev, "%s bus mastering\n", enable ? "enabling" : "disabling"); pci_write_config_word(dev, PCI_COMMAND, cmd); } dev->is_busmaster = enable; } /** * pcibios_setup - process "pci=" kernel boot arguments * @str: string used to pass in "pci=" kernel boot arguments * * Process kernel boot arguments. This is the default implementation. * Architecture specific implementations can override this as necessary. */ char * __weak __init pcibios_setup(char *str) { return str; } /** * pcibios_set_master - enable PCI bus-mastering for device dev * @dev: the PCI device to enable * * Enables PCI bus-mastering for the device. This is the default * implementation. Architecture specific implementations can override * this if necessary. */ void __weak pcibios_set_master(struct pci_dev *dev) { u8 lat; /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */ if (pci_is_pcie(dev)) return; pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat < 16) lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; else if (lat > pcibios_max_latency) lat = pcibios_max_latency; else return; pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); } /** * pci_set_master - enables bus-mastering for device dev * @dev: the PCI device to enable * * Enables bus-mastering on the device and calls pcibios_set_master() * to do the needed arch specific settings. */ void pci_set_master(struct pci_dev *dev) { __pci_set_master(dev, true); pcibios_set_master(dev); } EXPORT_SYMBOL(pci_set_master); /** * pci_clear_master - disables bus-mastering for device dev * @dev: the PCI device to disable */ void pci_clear_master(struct pci_dev *dev) { __pci_set_master(dev, false); } EXPORT_SYMBOL(pci_clear_master); /** * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed * @dev: the PCI device for which MWI is to be enabled * * Helper function for pci_set_mwi. * Originally copied from drivers/net/acenic.c. * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pci_set_cacheline_size(struct pci_dev *dev) { u8 cacheline_size; if (!pci_cache_line_size) return -EINVAL; /* Validate current setting: the PCI_CACHE_LINE_SIZE must be equal to or multiple of the right value. */ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); if (cacheline_size >= pci_cache_line_size && (cacheline_size % pci_cache_line_size) == 0) return 0; /* Write the correct value. */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); /* Read it back. */ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); if (cacheline_size == pci_cache_line_size) return 0; pci_dbg(dev, "cache line size of %d is not supported\n", pci_cache_line_size << 2); return -EINVAL; } EXPORT_SYMBOL_GPL(pci_set_cacheline_size); /** * pci_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled * * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pci_set_mwi(struct pci_dev *dev) { #ifdef PCI_DISABLE_MWI return 0; #else int rc; u16 cmd; rc = pci_set_cacheline_size(dev); if (rc) return rc; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_INVALIDATE)) { pci_dbg(dev, "enabling Mem-Wr-Inval\n"); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; #endif } EXPORT_SYMBOL(pci_set_mwi); /** * pci_try_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled * * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. * Callers are not required to check the return value. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pci_try_set_mwi(struct pci_dev *dev) { #ifdef PCI_DISABLE_MWI return 0; #else return pci_set_mwi(dev); #endif } EXPORT_SYMBOL(pci_try_set_mwi); /** * pci_clear_mwi - disables Memory-Write-Invalidate for device dev * @dev: the PCI device to disable * * Disables PCI Memory-Write-Invalidate transaction on the device */ void pci_clear_mwi(struct pci_dev *dev) { #ifndef PCI_DISABLE_MWI u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_INVALIDATE) { cmd &= ~PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } #endif } EXPORT_SYMBOL(pci_clear_mwi); /** * pci_disable_parity - disable parity checking for device * @dev: the PCI device to operate on * * Disable parity checking for device @dev */ void pci_disable_parity(struct pci_dev *dev) { u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_PARITY) { cmd &= ~PCI_COMMAND_PARITY; pci_write_config_word(dev, PCI_COMMAND, cmd); } } /** * pci_intx - enables/disables PCI INTx for device dev * @pdev: the PCI device to operate on * @enable: boolean: whether to enable or disable PCI INTx * * Enables/disables PCI INTx for device @pdev */ void pci_intx(struct pci_dev *pdev, int enable) { u16 pci_command, new; pci_read_config_word(pdev, PCI_COMMAND, &pci_command); if (enable) new = pci_command & ~PCI_COMMAND_INTX_DISABLE; else new = pci_command | PCI_COMMAND_INTX_DISABLE; if (new == pci_command) return; pci_write_config_word(pdev, PCI_COMMAND, new); } EXPORT_SYMBOL_GPL(pci_intx); /** * pci_wait_for_pending_transaction - wait for pending transaction * @dev: the PCI device to operate on * * Return 0 if transaction is pending 1 otherwise. */ int pci_wait_for_pending_transaction(struct pci_dev *dev) { if (!pci_is_pcie(dev)) return 1; return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); } EXPORT_SYMBOL(pci_wait_for_pending_transaction); /** * pcie_flr - initiate a PCIe function level reset * @dev: device to reset * * Initiate a function level reset unconditionally on @dev without * checking any flags and DEVCAP */ int pcie_flr(struct pci_dev *dev) { if (!pci_wait_for_pending_transaction(dev)) pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); if (dev->imm_ready) return 0; /* * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ msleep(100); return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); } EXPORT_SYMBOL_GPL(pcie_flr); /** * pcie_reset_flr - initiate a PCIe function level reset * @dev: device to reset * @probe: if true, return 0 if device can be reset this way * * Initiate a function level reset on @dev. */ int pcie_reset_flr(struct pci_dev *dev, bool probe) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) return -ENOTTY; if (!(dev->devcap & PCI_EXP_DEVCAP_FLR)) return -ENOTTY; if (probe) return 0; return pcie_flr(dev); } EXPORT_SYMBOL_GPL(pcie_reset_flr); static int pci_af_flr(struct pci_dev *dev, bool probe) { int pos; u8 cap; pos = pci_find_capability(dev, PCI_CAP_ID_AF); if (!pos) return -ENOTTY; if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) return -ENOTTY; pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) return -ENOTTY; if (probe) return 0; /* * Wait for Transaction Pending bit to clear. A word-aligned test * is used, so we use the control offset rather than status and shift * the test bit to match. */ if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, PCI_AF_STATUS_TP << 8)) pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); if (dev->imm_ready) return 0; /* * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006, * updated 27 July 2006; a device must complete an FLR within * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ msleep(100); return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS); } /** * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0. * @dev: Device to reset. * @probe: if true, return 0 if the device can be reset this way. * * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is * unset, it will be reinitialized internally when going from PCI_D3hot to * PCI_D0. If that's the case and the device is not in a low-power state * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset. * * NOTE: This causes the caller to sleep for twice the device power transition * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms * by default (i.e. unless the @dev's d3hot_delay field has a different value). * Moreover, only devices in D0 can be reset by this function. */ static int pci_pm_reset(struct pci_dev *dev, bool probe) { u16 csr; if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET) return -ENOTTY; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); if (csr & PCI_PM_CTRL_NO_SOFT_RESET) return -ENOTTY; if (probe) return 0; if (dev->current_state != PCI_D0) return -EINVAL; csr &= ~PCI_PM_CTRL_STATE_MASK; csr |= PCI_D3hot; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); pci_dev_d3_sleep(dev); csr &= ~PCI_PM_CTRL_STATE_MASK; csr |= PCI_D0; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); pci_dev_d3_sleep(dev); return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS); } /** * pcie_wait_for_link_status - Wait for link status change * @pdev: Device whose link to wait for. * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE. * @active: Waiting for active or inactive? * * Return 0 if successful, or -ETIMEDOUT if status has not changed within * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. */ static int pcie_wait_for_link_status(struct pci_dev *pdev, bool use_lt, bool active) { u16 lnksta_mask, lnksta_match; unsigned long end_jiffies; u16 lnksta; lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA; lnksta_match = active ? lnksta_mask : 0; end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS); do { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); if ((lnksta & lnksta_mask) == lnksta_match) return 0; msleep(1); } while (time_before(jiffies, end_jiffies)); return -ETIMEDOUT; } /** * pcie_retrain_link - Request a link retrain and wait for it to complete * @pdev: Device whose link to retrain. * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status. * * Trigger retraining of the PCIe Link and wait for the completion of the * retraining. As link retraining is known to asserts LBMS and may change * the Link Speed, LBMS is cleared after the retraining and the Link Speed * of the subordinate bus is updated. * * Retrain completion status is retrieved from the Link Status Register * according to @use_lt. It is not verified whether the use of the DLLLA * bit is valid. * * Return 0 if successful, or -ETIMEDOUT if training has not completed * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. */ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) { int rc; /* * Ensure the updated LNKCTL parameters are used during link * training by checking that there is no ongoing link training that * may have started before link parameters were changed, so as to * avoid LTSSM race as recommended in Implementation Note at the end * of PCIe r6.1 sec 7.5.3.7. */ rc = pcie_wait_for_link_status(pdev, true, false); if (rc) return rc; pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); if (pdev->clear_retrain_link) { /* * Due to an erratum in some devices the Retrain Link bit * needs to be cleared again manually to allow the link * training to succeed. */ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); } rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); /* * Clear LBMS after a manual retrain so that the bit can be used * to track link speed or width changes made by hardware itself * in attempt to correct unreliable link operation. */ pcie_reset_lbms(pdev); /* * Ensure the Link Speed updates after retraining in case the Link * Speed was changed because of the retraining. While the bwctrl's * IRQ handler normally picks up the new Link Speed, clearing LBMS * races with the IRQ handler reading the Link Status register and * can result in the handler returning early without updating the * Link Speed. */ if (pdev->subordinate) pcie_update_link_speed(pdev->subordinate); return rc; } /** * pcie_wait_for_link_delay - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? * @delay: Delay to wait after link has become active (in ms) * * Use this to wait till link becomes active or inactive. */ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) { int rc; /* * Some controllers might not implement link active reporting. In this * case, we wait for 1000 ms + any delay requested by the caller. */ if (!pdev->link_active_reporting) { msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay); return true; } /* * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms, * after which we should expect the link to be active if the reset was * successful. If so, software must wait a minimum 100ms before sending * configuration requests to devices downstream this port. * * If the link fails to activate, either the device was physically * removed or the link is permanently failed. */ if (active) msleep(20); rc = pcie_wait_for_link_status(pdev, false, active); if (active) { if (rc) rc = pcie_failed_link_retrain(pdev); if (rc) return false; msleep(delay); return true; } if (rc) return false; return true; } /** * pcie_wait_for_link - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? * * Use this to wait till link becomes active or inactive. */ bool pcie_wait_for_link(struct pci_dev *pdev, bool active) { return pcie_wait_for_link_delay(pdev, active, 100); } /* * Find maximum D3cold delay required by all the devices on the bus. The * spec says 100 ms, but firmware can lower it and we allow drivers to * increase it as well. * * Called with @pci_bus_sem locked for reading. */ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) { const struct pci_dev *pdev; int min_delay = 100; int max_delay = 0; list_for_each_entry(pdev, &bus->devices, bus_list) { if (pdev->d3cold_delay < min_delay) min_delay = pdev->d3cold_delay; if (pdev->d3cold_delay > max_delay) max_delay = pdev->d3cold_delay; } return max(min_delay, max_delay); } /** * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible * @dev: PCI bridge * @reset_type: reset type in human-readable form * * Handle necessary delays before access to the devices on the secondary * side of the bridge are permitted after D3cold to D0 transition * or Conventional Reset. * * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section * 4.3.2. * * Return 0 on success or -ENOTTY if the first device on the secondary bus * failed to become accessible. */ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) { struct pci_dev *child __free(pci_dev_put) = NULL; int delay; if (pci_dev_is_disconnected(dev)) return 0; if (!pci_is_bridge(dev)) return 0; down_read(&pci_bus_sem); /* * We only deal with devices that are present currently on the bus. * For any hot-added devices the access delay is handled in pciehp * board_added(). In case of ACPI hotplug the firmware is expected * to configure the devices before OS is notified. */ if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { up_read(&pci_bus_sem); return 0; } /* Take d3cold_delay requirements into account */ delay = pci_bus_max_d3cold_delay(dev->subordinate); if (!delay) { up_read(&pci_bus_sem); return 0; } child = pci_dev_get(list_first_entry(&dev->subordinate->devices, struct pci_dev, bus_list)); up_read(&pci_bus_sem); /* * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before * accessing the device after reset (that is 1000 ms + 100 ms). */ if (!pci_is_pcie(dev)) { pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay); msleep(1000 + delay); return 0; } /* * For PCIe downstream and root ports that do not support speeds * greater than 5 GT/s need to wait minimum 100 ms. For higher * speeds (gen3) we need to wait first for the data link layer to * become active. * * However, 100 ms is the minimum and the PCIe spec says the * software must allow at least 1s before it can determine that the * device that did not respond is a broken device. Also device can * take longer than that to respond if it indicates so through Request * Retry Status completions. * * Therefore we wait for 100 ms and check for the device presence * until the timeout expires. */ if (!pcie_downstream_port(dev)) return 0; if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { u16 status; pci_dbg(dev, "waiting %d ms for downstream link\n", delay); msleep(delay); if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay)) return 0; /* * If the port supports active link reporting we now check * whether the link is active and if not bail out early with * the assumption that the device is not present anymore. */ if (!dev->link_active_reporting) return -ENOTTY; pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status); if (!(status & PCI_EXP_LNKSTA_DLLLA)) return -ENOTTY; return pci_dev_wait(child, reset_type, PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT); } pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", delay); if (!pcie_wait_for_link_delay(dev, true, delay)) { /* Did not train, no need to wait any further */ pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay); return -ENOTTY; } return pci_dev_wait(child, reset_type, PCIE_RESET_READY_POLL_MS - delay); } void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); ctrl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); /* * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double * this to 2ms to ensure that we meet the minimum requirement. */ msleep(2); ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); } void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) { pci_reset_secondary_bus(dev); } /** * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge. * @dev: Bridge device * * Use the bridge control register to assert reset on the secondary bus. * Devices on the secondary bus are left in power-on state. */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { if (!dev->block_cfg_access) pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n", __builtin_return_address(0)); pcibios_reset_secondary_bus(dev); return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); } EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); static int pci_parent_bus_reset(struct pci_dev *dev, bool probe) { struct pci_dev *pdev; if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) if (pdev != dev) return -ENOTTY; if (probe) return 0; return pci_bridge_secondary_bus_reset(dev->bus->self); } static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe) { int rc = -ENOTTY; if (!hotplug || !try_module_get(hotplug->owner)) return rc; if (hotplug->ops->reset_slot) rc = hotplug->ops->reset_slot(hotplug, probe); module_put(hotplug->owner); return rc; } static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe) { if (dev->multifunction || dev->subordinate || !dev->slot || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; return pci_reset_hotplug_slot(dev->slot->hotplug, probe); } static u16 cxl_port_dvsec(struct pci_dev *dev) { return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL, PCI_DVSEC_CXL_PORT); } static bool cxl_sbr_masked(struct pci_dev *dev) { u16 dvsec, reg; int rc; dvsec = cxl_port_dvsec(dev); if (!dvsec) return false; rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg); if (rc || PCI_POSSIBLE_ERROR(reg)) return false; /* * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR * bit in Bridge Control has no effect. When 1, the Port generates * hot reset when the SBR bit is set to 1. */ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) return false; return true; } static int pci_reset_bus_function(struct pci_dev *dev, bool probe) { struct pci_dev *bridge = pci_upstream_bridge(dev); int rc; /* * If "dev" is below a CXL port that has SBR control masked, SBR * won't do anything, so return error. */ if (bridge && cxl_sbr_masked(bridge)) { if (probe) return 0; return -ENOTTY; } rc = pci_dev_reset_slot_function(dev, probe); if (rc != -ENOTTY) return rc; return pci_parent_bus_reset(dev, probe); } static int cxl_reset_bus_function(struct pci_dev *dev, bool probe) { struct pci_dev *bridge; u16 dvsec, reg, val; int rc; bridge = pci_upstream_bridge(dev); if (!bridge) return -ENOTTY; dvsec = cxl_port_dvsec(bridge); if (!dvsec) return -ENOTTY; if (probe) return 0; rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg); if (rc) return -ENOTTY; if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) { val = reg; } else { val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR; pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, val); } rc = pci_reset_bus_function(dev, probe); if (reg != val) pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, reg); return rc; } void pci_dev_lock(struct pci_dev *dev) { /* block PM suspend, driver probe, etc. */ device_lock(&dev->dev); pci_cfg_access_lock(dev); } EXPORT_SYMBOL_GPL(pci_dev_lock); /* Return 1 on successful lock, 0 on contention */ int pci_dev_trylock(struct pci_dev *dev) { if (device_trylock(&dev->dev)) { if (pci_cfg_access_trylock(dev)) return 1; device_unlock(&dev->dev); } return 0; } EXPORT_SYMBOL_GPL(pci_dev_trylock); void pci_dev_unlock(struct pci_dev *dev) { pci_cfg_access_unlock(dev); device_unlock(&dev->dev); } EXPORT_SYMBOL_GPL(pci_dev_unlock); static void pci_dev_save_and_disable(struct pci_dev *dev) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; /* * dev->driver->err_handler->reset_prepare() is protected against * races with ->remove() by the device lock, which must be held by * the caller. */ if (err_handler && err_handler->reset_prepare) err_handler->reset_prepare(dev); else if (dev->driver) pci_warn(dev, "resetting"); /* * Wake-up device prior to save. PM registers default to D0 after * reset and a simple register restore doesn't reliably return * to a non-D0 state anyway. */ pci_set_power_state(dev, PCI_D0); pci_save_state(dev); /* * Disable the device by clearing the Command register, except for * INTx-disable which is set. This not only disables MMIO and I/O port * BARs, but also prevents the device from being Bus Master, preventing * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3 * compliant devices, INTx-disable prevents legacy interrupts. */ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); } static void pci_dev_restore(struct pci_dev *dev) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; pci_restore_state(dev); /* * dev->driver->err_handler->reset_done() is protected against * races with ->remove() by the device lock, which must be held by * the caller. */ if (err_handler && err_handler->reset_done) err_handler->reset_done(dev); else if (dev->driver) pci_warn(dev, "reset done"); } /* dev->reset_methods[] is a 0-terminated list of indices into this array */ const struct pci_reset_fn_method pci_reset_fn_methods[] = { { }, { pci_dev_specific_reset, .name = "device_specific" }, { pci_dev_acpi_reset, .name = "acpi" }, { pcie_reset_flr, .name = "flr" }, { pci_af_flr, .name = "af_flr" }, { pci_pm_reset, .name = "pm" }, { pci_reset_bus_function, .name = "bus" }, { cxl_reset_bus_function, .name = "cxl_bus" }, }; /** * __pci_reset_function_locked - reset a PCI device function while holding * the @dev mutex lock. * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * * The device function is presumed to be unused and the caller is holding * the device mutex lock when this function is called. * * Resetting the device will make the contents of PCI configuration space * random, so any caller of this must be prepared to reinitialise the * device including MSI, bus mastering, BARs, decoding IO and memory spaces, * etc. * * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ int __pci_reset_function_locked(struct pci_dev *dev) { int i, m, rc; const struct pci_reset_fn_method *method; might_sleep(); /* * A reset method returns -ENOTTY if it doesn't support this device and * we should try the next method. * * If it returns 0 (success), we're finished. If it returns any other * error, we're also finished: this indicates that further reset * mechanisms might be broken on the device. */ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) { m = dev->reset_methods[i]; if (!m) return -ENOTTY; method = &pci_reset_fn_methods[m]; pci_dbg(dev, "reset via %s\n", method->name); rc = method->reset_fn(dev, PCI_RESET_DO_RESET); if (!rc) return 0; pci_dbg(dev, "%s failed with %d\n", method->name, rc); if (rc != -ENOTTY) return rc; } return -ENOTTY; } EXPORT_SYMBOL_GPL(__pci_reset_function_locked); /** * pci_init_reset_methods - check whether device can be safely reset * and store supported reset mechanisms. * @dev: PCI device to check for reset mechanisms * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be in D0-D3hot * state. * * Stores reset mechanisms supported by device in reset_methods byte array * which is a member of struct pci_dev. */ void pci_init_reset_methods(struct pci_dev *dev) { int m, i, rc; BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS); might_sleep(); i = 0; for (m = 1; m < PCI_NUM_RESET_METHODS; m++) { rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE); if (!rc) dev->reset_methods[i++] = m; else if (rc != -ENOTTY) break; } dev->reset_methods[i] = 0; } /** * pci_reset_function - quiesce and reset a PCI device function * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * * This function does not just reset the PCI portion of a device, but * clears all the state associated with the device. This function differs * from __pci_reset_function_locked() in that it saves and restores device state * over the reset and takes the PCI device lock. * * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ int pci_reset_function(struct pci_dev *dev) { struct pci_dev *bridge; int rc; if (!pci_reset_supported(dev)) return -ENOTTY; /* * If there's no upstream bridge, no locking is needed since there is * no upstream bridge configuration to hold consistent. */ bridge = pci_upstream_bridge(dev); if (bridge) pci_dev_lock(bridge); pci_dev_lock(dev); pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); pci_dev_unlock(dev); if (bridge) pci_dev_unlock(bridge); return rc; } EXPORT_SYMBOL_GPL(pci_reset_function); /** * pci_reset_function_locked - quiesce and reset a PCI device function * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * * This function does not just reset the PCI portion of a device, but * clears all the state associated with the device. This function differs * from __pci_reset_function_locked() in that it saves and restores device state * over the reset. It also differs from pci_reset_function() in that it * requires the PCI device lock to be held. * * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ int pci_reset_function_locked(struct pci_dev *dev) { int rc; if (!pci_reset_supported(dev)) return -ENOTTY; pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); return rc; } EXPORT_SYMBOL_GPL(pci_reset_function_locked); /** * pci_try_reset_function - quiesce and reset a PCI device function * @dev: PCI device to reset * * Same as above, except return -EAGAIN if unable to lock device. */ int pci_try_reset_function(struct pci_dev *dev) { int rc; if (!pci_reset_supported(dev)) return -ENOTTY; if (!pci_dev_trylock(dev)) return -EAGAIN; pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); pci_dev_unlock(dev); return rc; } EXPORT_SYMBOL_GPL(pci_try_reset_function); /* Do any devices on or below this bus prevent a bus reset? */ static bool pci_bus_resettable(struct pci_bus *bus) { struct pci_dev *dev; if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) return false; list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; } return true; } /* Lock devices from the top of the tree down */ static void pci_bus_lock(struct pci_bus *bus) { struct pci_dev *dev; pci_dev_lock(bus->self); list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_lock(dev->subordinate); else pci_dev_lock(dev); } } /* Unlock devices from the bottom of the tree up */ static void pci_bus_unlock(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); else pci_dev_unlock(dev); } pci_dev_unlock(bus->self); } /* Return 1 on successful lock, 0 on contention */ static int pci_bus_trylock(struct pci_bus *bus) { struct pci_dev *dev; if (!pci_dev_trylock(bus->self)) return 0; list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) { if (!pci_bus_trylock(dev->subordinate)) goto unlock; } else if (!pci_dev_trylock(dev)) goto unlock; } return 1; unlock: list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); else pci_dev_unlock(dev); } pci_dev_unlock(bus->self); return 0; } /* Do any devices on or below this slot prevent a bus reset? */ static bool pci_slot_resettable(struct pci_slot *slot) { struct pci_dev *dev; if (slot->bus->self && (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) return false; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; } return true; } /* Lock devices from the top of the tree down */ static void pci_slot_lock(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->subordinate) pci_bus_lock(dev->subordinate); else pci_dev_lock(dev); } } /* Unlock devices from the bottom of the tree up */ static void pci_slot_unlock(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); else pci_dev_unlock(dev); } } /* Return 1 on successful lock, 0 on contention */ static int pci_slot_trylock(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->subordinate) { if (!pci_bus_trylock(dev->subordinate)) { pci_dev_unlock(dev); goto unlock; } } else if (!pci_dev_trylock(dev)) goto unlock; } return 1; unlock: list_for_each_entry_continue_reverse(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); else pci_dev_unlock(dev); } return 0; } /* * Save and disable devices from the top of the tree down while holding * the @dev mutex lock for the entire tree. */ static void pci_bus_save_and_disable_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_save_and_disable(dev); if (dev->subordinate) pci_bus_save_and_disable_locked(dev->subordinate); } } /* * Restore devices from top of the tree down while holding @dev mutex lock * for the entire tree. Parent bridges need to be restored before we can * get to subordinate devices. */ static void pci_bus_restore_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_restore(dev); if (dev->subordinate) { pci_bridge_wait_for_secondary_bus(dev, "bus reset"); pci_bus_restore_locked(dev->subordinate); } } } /* * Save and disable devices from the top of the tree down while holding * the @dev mutex lock for the entire tree. */ static void pci_slot_save_and_disable_locked(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; pci_dev_save_and_disable(dev); if (dev->subordinate) pci_bus_save_and_disable_locked(dev->subordinate); } } /* * Restore devices from top of the tree down while holding @dev mutex lock * for the entire tree. Parent bridges need to be restored before we can * get to subordinate devices. */ static void pci_slot_restore_locked(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; pci_dev_restore(dev); if (dev->subordinate) { pci_bridge_wait_for_secondary_bus(dev, "slot reset"); pci_bus_restore_locked(dev->subordinate); } } } static int pci_slot_reset(struct pci_slot *slot, bool probe) { int rc; if (!slot || !pci_slot_resettable(slot)) return -ENOTTY; if (!probe) pci_slot_lock(slot); might_sleep(); rc = pci_reset_hotplug_slot(slot->hotplug, probe); if (!probe) pci_slot_unlock(slot); return rc; } /** * pci_probe_reset_slot - probe whether a PCI slot can be reset * @slot: PCI slot to probe * * Return 0 if slot can be reset, negative if a slot reset is not supported. */ int pci_probe_reset_slot(struct pci_slot *slot) { return pci_slot_reset(slot, PCI_RESET_PROBE); } EXPORT_SYMBOL_GPL(pci_probe_reset_slot); /** * __pci_reset_slot - Try to reset a PCI slot * @slot: PCI slot to reset * * A PCI bus may host multiple slots, each slot may support a reset mechanism * independent of other slots. For instance, some slots may support slot power * control. In the case of a 1:1 bus to slot architecture, this function may * wrap the bus reset to avoid spurious slot related events such as hotplug. * Generally a slot reset should be attempted before a bus reset. All of the * function of the slot and any subordinate buses behind the slot are reset * through this function. PCI config space of all devices in the slot and * behind the slot is saved before and restored after reset. * * Same as above except return -EAGAIN if the slot cannot be locked */ static int __pci_reset_slot(struct pci_slot *slot) { int rc; rc = pci_slot_reset(slot, PCI_RESET_PROBE); if (rc) return rc; if (pci_slot_trylock(slot)) { pci_slot_save_and_disable_locked(slot); might_sleep(); rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET); pci_slot_restore_locked(slot); pci_slot_unlock(slot); } else rc = -EAGAIN; return rc; } static int pci_bus_reset(struct pci_bus *bus, bool probe) { int ret; if (!bus->self || !pci_bus_resettable(bus)) return -ENOTTY; if (probe) return 0; pci_bus_lock(bus); might_sleep(); ret = pci_bridge_secondary_bus_reset(bus->self); pci_bus_unlock(bus); return ret; } /** * pci_bus_error_reset - reset the bridge's subordinate bus * @bridge: The parent device that connects to the bus to reset * * This function will first try to reset the slots on this bus if the method is * available. If slot reset fails or is not available, this will fall back to a * secondary bus reset. */ int pci_bus_error_reset(struct pci_dev *bridge) { struct pci_bus *bus = bridge->subordinate; struct pci_slot *slot; if (!bus) return -ENOTTY; mutex_lock(&pci_slot_mutex); if (list_empty(&bus->slots)) goto bus_reset; list_for_each_entry(slot, &bus->slots, list) if (pci_probe_reset_slot(slot)) goto bus_reset; list_for_each_entry(slot, &bus->slots, list) if (pci_slot_reset(slot, PCI_RESET_DO_RESET)) goto bus_reset; mutex_unlock(&pci_slot_mutex); return 0; bus_reset: mutex_unlock(&pci_slot_mutex); return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET); } /** * pci_probe_reset_bus - probe whether a PCI bus can be reset * @bus: PCI bus to probe * * Return 0 if bus can be reset, negative if a bus reset is not supported. */ int pci_probe_reset_bus(struct pci_bus *bus) { return pci_bus_reset(bus, PCI_RESET_PROBE); } EXPORT_SYMBOL_GPL(pci_probe_reset_bus); /** * __pci_reset_bus - Try to reset a PCI bus * @bus: top level PCI bus to reset * * Same as above except return -EAGAIN if the bus cannot be locked */ int __pci_reset_bus(struct pci_bus *bus) { int rc; rc = pci_bus_reset(bus, PCI_RESET_PROBE); if (rc) return rc; if (pci_bus_trylock(bus)) { pci_bus_save_and_disable_locked(bus); might_sleep(); rc = pci_bridge_secondary_bus_reset(bus->self); pci_bus_restore_locked(bus); pci_bus_unlock(bus); } else rc = -EAGAIN; return rc; } /** * pci_reset_bus - Try to reset a PCI bus * @pdev: top level PCI device to reset via slot/bus * * Same as above except return -EAGAIN if the bus cannot be locked */ int pci_reset_bus(struct pci_dev *pdev) { return (!pci_probe_reset_slot(pdev->slot)) ? __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); } EXPORT_SYMBOL_GPL(pci_reset_bus); /** * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query * * Returns mmrbc: maximum designed memory read count in bytes or * appropriate error value. */ int pcix_get_max_mmrbc(struct pci_dev *dev) { int cap; u32 stat; cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!cap) return -EINVAL; if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) return -EINVAL; return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat); } EXPORT_SYMBOL(pcix_get_max_mmrbc); /** * pcix_get_mmrbc - get PCI-X maximum memory read byte count * @dev: PCI device to query * * Returns mmrbc: maximum memory read count in bytes or appropriate error * value. */ int pcix_get_mmrbc(struct pci_dev *dev) { int cap; u16 cmd; cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!cap) return -EINVAL; if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) return -EINVAL; return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd); } EXPORT_SYMBOL(pcix_get_mmrbc); /** * pcix_set_mmrbc - set PCI-X maximum memory read byte count * @dev: PCI device to query * @mmrbc: maximum memory read count in bytes * valid values are 512, 1024, 2048, 4096 * * If possible sets maximum memory read byte count, some bridges have errata * that prevent this. */ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) { int cap; u32 stat, v, o; u16 cmd; if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) return -EINVAL; v = ffs(mmrbc) - 10; cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!cap) return -EINVAL; if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) return -EINVAL; if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat)) return -E2BIG; if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) return -EINVAL; o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd); if (o != v) { if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) return -EIO; cmd &= ~PCI_X_CMD_MAX_READ; cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v); if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) return -EIO; } return 0; } EXPORT_SYMBOL(pcix_set_mmrbc); /** * pcie_get_readrq - get PCI Express read request size * @dev: PCI device to query * * Returns maximum memory read request in bytes or appropriate error value. */ int pcie_get_readrq(struct pci_dev *dev) { u16 ctl; pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl); } EXPORT_SYMBOL(pcie_get_readrq); /** * pcie_set_readrq - set PCI Express maximum memory read request * @dev: PCI device to query * @rq: maximum memory read count in bytes * valid values are 128, 256, 512, 1024, 2048, 4096 * * If possible sets maximum memory read request in bytes */ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; unsigned int firstbit; struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; /* * If using the "performance" PCIe config, we clamp the read rq * size to the max packet size to keep the host bridge from * generating requests larger than we can cope with. */ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { int mps = pcie_get_mps(dev); if (mps < rq) rq = mps; } firstbit = ffs(rq); if (firstbit < 8) return -EINVAL; v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8); if (bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); if (rq > max_mrrs) { pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs); return -EINVAL; } } ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_readrq); /** * pcie_get_mps - get PCI Express maximum payload size * @dev: PCI device to query * * Returns maximum payload size in bytes */ int pcie_get_mps(struct pci_dev *dev) { u16 ctl; pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl); } EXPORT_SYMBOL(pcie_get_mps); /** * pcie_set_mps - set PCI Express maximum payload size * @dev: PCI device to query * @mps: maximum payload size in bytes * valid values are 128, 256, 512, 1024, 2048, 4096 * * If possible sets maximum payload size */ int pcie_set_mps(struct pci_dev *dev, int mps) { u16 v; int ret; if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) return -EINVAL; v = ffs(mps) - 8; if (v > dev->pcie_mpss) return -EINVAL; v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v); ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD, v); return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_mps); static enum pci_bus_speed to_pcie_link_speed(u16 lnksta) { return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)]; } int pcie_link_speed_mbps(struct pci_dev *pdev) { u16 lnksta; int err; err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); if (err) return err; return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta)); } EXPORT_SYMBOL(pcie_link_speed_mbps); /** * pcie_bandwidth_available - determine minimum link settings of a PCIe * device and its bandwidth limitation * @dev: PCI device to query * @limiting_dev: storage for device causing the bandwidth limitation * @speed: storage for speed of limiting device * @width: storage for width of limiting device * * Walk up the PCI device chain and find the point where the minimum * bandwidth is available. Return the bandwidth available there and (if * limiting_dev, speed, and width pointers are supplied) information about * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of * raw bandwidth. */ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, enum pci_bus_speed *speed, enum pcie_link_width *width) { u16 lnksta; enum pci_bus_speed next_speed; enum pcie_link_width next_width; u32 bw, next_bw; if (speed) *speed = PCI_SPEED_UNKNOWN; if (width) *width = PCIE_LNK_WIDTH_UNKNOWN; bw = 0; while (dev) { pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); next_speed = to_pcie_link_speed(lnksta); next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); /* Check if current device limits the total bandwidth */ if (!bw || next_bw <= bw) { bw = next_bw; if (limiting_dev) *limiting_dev = dev; if (speed) *speed = next_speed; if (width) *width = next_width; } dev = pci_upstream_bridge(dev); } return bw; } EXPORT_SYMBOL(pcie_bandwidth_available); /** * pcie_get_supported_speeds - query Supported Link Speed Vector * @dev: PCI device to query * * Query @dev supported link speeds. * * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining * supported link speeds using the Supported Link Speeds Vector in the Link * Capabilities 2 Register (when available). * * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. * * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s * speeds were defined. * * For @dev without Supported Link Speed Vector, the field is synthesized * from the Max Link Speed field in the Link Capabilities Register. * * Return: Supported Link Speeds Vector (+ reserved 0 at LSB). */ u8 pcie_get_supported_speeds(struct pci_dev *dev) { u32 lnkcap2, lnkcap; u8 speeds; /* * Speeds retain the reserved 0 at LSB before PCIe Supported Link * Speeds Vector to allow using SLS Vector bit defines directly. */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS; /* Ignore speeds higher than Max Link Speed */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0); /* PCIe r3.0-compliant */ if (speeds) return speeds; /* Synthesize from the Max Link Speed field */ if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB; else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) speeds = PCI_EXP_LNKCAP2_SLS_2_5GB; return speeds; } /** * pcie_get_speed_cap - query for the PCI device's link speed capability * @dev: PCI device to query * * Query the PCI device speed capability. * * Return: the maximum link speed supported by the device. */ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) { return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds); } EXPORT_SYMBOL(pcie_get_speed_cap); /** * pcie_get_width_cap - query for the PCI device's link width capability * @dev: PCI device to query * * Query the PCI device width capability. Return the maximum link width * supported by the device. */ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev) { u32 lnkcap; pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); if (lnkcap) return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap); return PCIE_LNK_WIDTH_UNKNOWN; } EXPORT_SYMBOL(pcie_get_width_cap); /** * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability * @dev: PCI device * @speed: storage for link speed * @width: storage for link width * * Calculate a PCI device's link bandwidth by querying for its link speed * and width, multiplying them, and applying encoding overhead. The result * is in Mb/s, i.e., megabits/second of raw bandwidth. */ static u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width) { *speed = pcie_get_speed_cap(dev); *width = pcie_get_width_cap(dev); if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) return 0; return *width * PCIE_SPEED2MBS_ENC(*speed); } /** * __pcie_print_link_status - Report the PCI device's link speed and width * @dev: PCI device to query * @verbose: Print info even when enough bandwidth is available * * If the available bandwidth at the device is less than the device is * capable of, report the device's maximum possible bandwidth and the * upstream link that limits its performance. If @verbose, always print * the available bandwidth, even if the device isn't constrained. */ void __pcie_print_link_status(struct pci_dev *dev, bool verbose) { enum pcie_link_width width, width_cap; enum pci_bus_speed speed, speed_cap; struct pci_dev *limiting_dev = NULL; u32 bw_avail, bw_cap; char *flit_mode = ""; bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap); bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); if (dev->bus && dev->bus->flit_mode) flit_mode = ", in Flit mode"; if (bw_avail >= bw_cap && verbose) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n", bw_cap / 1000, bw_cap % 1000, pci_speed_string(speed_cap), width_cap, flit_mode); else if (bw_avail < bw_cap) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n", bw_avail / 1000, bw_avail % 1000, pci_speed_string(speed), width, limiting_dev ? pci_name(limiting_dev) : "<unknown>", bw_cap / 1000, bw_cap % 1000, pci_speed_string(speed_cap), width_cap, flit_mode); } /** * pcie_print_link_status - Report the PCI device's link speed and width * @dev: PCI device to query * * Report the available bandwidth at the device. */ void pcie_print_link_status(struct pci_dev *dev) { __pcie_print_link_status(dev, true); } EXPORT_SYMBOL(pcie_print_link_status); /** * pci_select_bars - Make BAR mask from the type of resource * @dev: the PCI device for which BAR mask is made * @flags: resource type mask to be selected * * This helper routine makes bar mask from the type of resource. */ int pci_select_bars(struct pci_dev *dev, unsigned long flags) { int i, bars = 0; for (i = 0; i < PCI_NUM_RESOURCES; i++) if (pci_resource_flags(dev, i) & flags) bars |= (1 << i); return bars; } EXPORT_SYMBOL(pci_select_bars); /* Some architectures require additional programming to enable VGA */ static arch_set_vga_state_t arch_set_vga_state; void __init pci_register_set_vga_state(arch_set_vga_state_t func) { arch_set_vga_state = func; /* NULL disables */ } static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, unsigned int command_bits, u32 flags) { if (arch_set_vga_state) return arch_set_vga_state(dev, decode, command_bits, flags); return 0; } /** * pci_set_vga_state - set VGA decode state on device and parents if requested * @dev: the PCI device * @decode: true = enable decoding, false = disable decoding * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY * @flags: traverse ancestors and change bridges * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE */ int pci_set_vga_state(struct pci_dev *dev, bool decode, unsigned int command_bits, u32 flags) { struct pci_bus *bus; struct pci_dev *bridge; u16 cmd; int rc; WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); /* ARCH specific VGA enables */ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); if (rc) return rc; if (flags & PCI_VGA_STATE_CHANGE_DECODES) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (decode) cmd |= command_bits; else cmd &= ~command_bits; pci_write_config_word(dev, PCI_COMMAND, cmd); } if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) return 0; bus = dev->bus; while (bus) { bridge = bus->self; if (bridge) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &cmd); if (decode) cmd |= PCI_BRIDGE_CTL_VGA; else cmd &= ~PCI_BRIDGE_CTL_VGA; pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, cmd); } bus = bus->parent; } return 0; } #ifdef CONFIG_ACPI bool pci_pr3_present(struct pci_dev *pdev) { struct acpi_device *adev; if (acpi_disabled) return false; adev = ACPI_COMPANION(&pdev->dev); if (!adev) return false; return adev->power.flags.power_resources && acpi_has_method(adev->handle, "_PR3"); } EXPORT_SYMBOL_GPL(pci_pr3_present); #endif /** * pci_add_dma_alias - Add a DMA devfn alias for a device * @dev: the PCI device for which alias is added * @devfn_from: alias slot and function * @nr_devfns: number of subsequent devfns to alias * * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask * which is used to program permissible bus-devfn source addresses for DMA * requests in an IOMMU. These aliases factor into IOMMU group creation * and are useful for devices generating DMA requests beyond or different * from their logical bus-devfn. Examples include device quirks where the * device simply uses the wrong devfn, as well as non-transparent bridges * where the alias may be a proxy for devices in another domain. * * IOMMU group creation is performed during device discovery or addition, * prior to any potential DMA mapping and therefore prior to driver probing * (especially for userspace assigned devices where IOMMU group definition * cannot be left as a userspace activity). DMA aliases should therefore * be configured via quirks, such as the PCI fixup header quirk. */ void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned int nr_devfns) { int devfn_to; nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from); devfn_to = devfn_from + nr_devfns - 1; if (!dev->dma_alias_mask) dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL); if (!dev->dma_alias_mask) { pci_warn(dev, "Unable to allocate DMA alias mask\n"); return; } bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns); if (nr_devfns == 1) pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", PCI_SLOT(devfn_from), PCI_FUNC(devfn_from)); else if (nr_devfns > 1) pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n", PCI_SLOT(devfn_from), PCI_FUNC(devfn_from), PCI_SLOT(devfn_to), PCI_FUNC(devfn_to)); } bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) { return (dev1->dma_alias_mask && test_bit(dev2->devfn, dev1->dma_alias_mask)) || (dev2->dma_alias_mask && test_bit(dev1->devfn, dev2->dma_alias_mask)) || pci_real_dma_dev(dev1) == dev2 || pci_real_dma_dev(dev2) == dev1; } bool pci_device_is_present(struct pci_dev *pdev) { u32 v; /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ pdev = pci_physfn(pdev); if (pci_dev_is_disconnected(pdev)) return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); } EXPORT_SYMBOL_GPL(pci_device_is_present); void pci_ignore_hotplug(struct pci_dev *dev) { struct pci_dev *bridge = dev->bus->self; dev->ignore_hotplug = 1; /* Propagate the "ignore hotplug" setting to the parent bridge. */ if (bridge) bridge->ignore_hotplug = 1; } EXPORT_SYMBOL_GPL(pci_ignore_hotplug); /** * pci_real_dma_dev - Get PCI DMA device for PCI device * @dev: the PCI device that may have a PCI DMA alias * * Permits the platform to provide architecture-specific functionality to * devices needing to alias DMA to another PCI device on another PCI bus. If * the PCI device is on the same bus, it is recommended to use * pci_add_dma_alias(). This is the default implementation. Architecture * implementations can override this. */ struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev) { return dev; } resource_size_t __weak pcibios_default_alignment(void) { return 0; } /* * Arches that don't want to expose struct resource to userland as-is in * sysfs and /proc can implement their own pci_resource_to_user(). */ void __weak pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { *start = rsrc->start; *end = rsrc->end; } static char *resource_alignment_param; static DEFINE_SPINLOCK(resource_alignment_lock); /** * pci_specified_resource_alignment - get resource alignment specified by user. * @dev: the PCI device to get * @resize: whether or not to change resources' size when reassigning alignment * * RETURNS: Resource alignment if it is specified. * Zero if it is not specified. */ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, bool *resize) { int align_order, count; resource_size_t align = pcibios_default_alignment(); const char *p; int ret; spin_lock(&resource_alignment_lock); p = resource_alignment_param; if (!p || !*p) goto out; if (pci_has_flag(PCI_PROBE_ONLY)) { align = 0; pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n"); goto out; } while (*p) { count = 0; if (sscanf(p, "%d%n", &align_order, &count) == 1 && p[count] == '@') { p += count + 1; if (align_order > 63) { pr_err("PCI: Invalid requested alignment (order %d)\n", align_order); align_order = PAGE_SHIFT; } } else { align_order = PAGE_SHIFT; } ret = pci_dev_str_match(dev, p, &p); if (ret == 1) { *resize = true; align = 1ULL << align_order; break; } else if (ret < 0) { pr_err("PCI: Can't parse resource_alignment parameter: %s\n", p); break; } if (*p != ';' && *p != ',') { /* End of param or invalid format */ break; } p++; } out: spin_unlock(&resource_alignment_lock); return align; } static void pci_request_resource_alignment(struct pci_dev *dev, int bar, resource_size_t align, bool resize) { struct resource *r = &dev->resource[bar]; const char *r_name = pci_resource_name(dev, bar); resource_size_t size; if (!(r->flags & IORESOURCE_MEM)) return; if (r->flags & IORESOURCE_PCI_FIXED) { pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n", r_name, r, (unsigned long long)align); return; } size = resource_size(r); if (size >= align) return; /* * Increase the alignment of the resource. There are two ways we * can do this: * * 1) Increase the size of the resource. BARs are aligned on their * size, so when we reallocate space for this resource, we'll * allocate it with the larger alignment. This also prevents * assignment of any other BARs inside the alignment region, so * if we're requesting page alignment, this means no other BARs * will share the page. * * The disadvantage is that this makes the resource larger than * the hardware BAR, which may break drivers that compute things * based on the resource size, e.g., to find registers at a * fixed offset before the end of the BAR. * * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and * set r->start to the desired alignment. By itself this * doesn't prevent other BARs being put inside the alignment * region, but if we realign *every* resource of every device in * the system, none of them will share an alignment region. * * When the user has requested alignment for only some devices via * the "pci=resource_alignment" argument, "resize" is true and we * use the first method. Otherwise we assume we're aligning all * devices and we use the second. */ pci_info(dev, "%s %pR: requesting alignment to %#llx\n", r_name, r, (unsigned long long)align); if (resize) { r->start = 0; r->end = align - 1; } else { r->flags &= ~IORESOURCE_SIZEALIGN; r->flags |= IORESOURCE_STARTALIGN; resource_set_range(r, align, size); } r->flags |= IORESOURCE_UNSET; } /* * This function disables memory decoding and releases memory resources * of the device specified by kernel's boot parameter 'pci=resource_alignment='. * It also rounds up size to specified alignment. * Later on, the kernel will assign page-aligned memory resource back * to the device. */ void pci_reassigndev_resource_alignment(struct pci_dev *dev) { int i; struct resource *r; resource_size_t align; u16 command; bool resize = false; /* * VF BARs are read-only zero according to SR-IOV spec r1.1, sec * 3.4.1.11. Their resources are allocated from the space * described by the VF BARx register in the PF's SR-IOV capability. * We can't influence their alignment here. */ if (dev->is_virtfn) return; /* check if specified PCI is target device to reassign */ align = pci_specified_resource_alignment(dev, &resize); if (!align) return; if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { pci_warn(dev, "Can't reassign resources to host bridge\n"); return; } pci_read_config_word(dev, PCI_COMMAND, &command); command &= ~PCI_COMMAND_MEMORY; pci_write_config_word(dev, PCI_COMMAND, command); for (i = 0; i <= PCI_ROM_RESOURCE; i++) pci_request_resource_alignment(dev, i, align, resize); /* * Need to disable bridge's resource window, * to enable the kernel to reassign new resource * window later on. */ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { r = &dev->resource[i]; if (!(r->flags & IORESOURCE_MEM)) continue; r->flags |= IORESOURCE_UNSET; r->end = resource_size(r) - 1; r->start = 0; } pci_disable_bridge_window(dev); } } static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf) { size_t count = 0; spin_lock(&resource_alignment_lock); if (resource_alignment_param) count = sysfs_emit(buf, "%s\n", resource_alignment_param); spin_unlock(&resource_alignment_lock); return count; } static ssize_t resource_alignment_store(const struct bus_type *bus, const char *buf, size_t count) { char *param, *old, *end; if (count >= (PAGE_SIZE - 1)) return -EINVAL; param = kstrndup(buf, count, GFP_KERNEL); if (!param) return -ENOMEM; end = strchr(param, '\n'); if (end) *end = '\0'; spin_lock(&resource_alignment_lock); old = resource_alignment_param; if (strlen(param)) { resource_alignment_param = param; } else { kfree(param); resource_alignment_param = NULL; } spin_unlock(&resource_alignment_lock); kfree(old); return count; } static BUS_ATTR_RW(resource_alignment); static int __init pci_resource_alignment_sysfs_init(void) { return bus_create_file(&pci_bus_type, &bus_attr_resource_alignment); } late_initcall(pci_resource_alignment_sysfs_init); static void pci_no_domains(void) { #ifdef CONFIG_PCI_DOMAINS pci_domains_supported = 0; #endif } #ifdef CONFIG_PCI_DOMAINS static DEFINE_IDA(pci_domain_nr_dynamic_ida); /** * pci_bus_find_emul_domain_nr() - allocate a PCI domain number per constraints * @hint: desired domain, 0 if any ID in the range of @min to @max is acceptable * @min: minimum allowable domain * @max: maximum allowable domain, no IDs higher than INT_MAX will be returned */ int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max) { return ida_alloc_range(&pci_domain_nr_dynamic_ida, max(hint, min), max, GFP_KERNEL); } EXPORT_SYMBOL_GPL(pci_bus_find_emul_domain_nr); void pci_bus_release_emul_domain_nr(int domain_nr) { ida_free(&pci_domain_nr_dynamic_ida, domain_nr); } EXPORT_SYMBOL_GPL(pci_bus_release_emul_domain_nr); #endif #ifdef CONFIG_PCI_DOMAINS_GENERIC static DEFINE_IDA(pci_domain_nr_static_ida); static void of_pci_reserve_static_domain_nr(void) { struct device_node *np; int domain_nr; for_each_node_by_type(np, "pci") { domain_nr = of_get_pci_domain_nr(np); if (domain_nr < 0) continue; /* * Permanently allocate domain_nr in dynamic_ida * to prevent it from dynamic allocation. */ ida_alloc_range(&pci_domain_nr_dynamic_ida, domain_nr, domain_nr, GFP_KERNEL); } } static int of_pci_bus_find_domain_nr(struct device *parent) { static bool static_domains_reserved = false; int domain_nr; /* On the first call scan device tree for static allocations. */ if (!static_domains_reserved) { of_pci_reserve_static_domain_nr(); static_domains_reserved = true; } if (parent) { /* * If domain is in DT, allocate it in static IDA. This * prevents duplicate static allocations in case of errors * in DT. */ domain_nr = of_get_pci_domain_nr(parent->of_node); if (domain_nr >= 0) return ida_alloc_range(&pci_domain_nr_static_ida, domain_nr, domain_nr, GFP_KERNEL); } /* * If domain was not specified in DT, choose a free ID from dynamic * allocations. All domain numbers from DT are permanently in * dynamic allocations to prevent assigning them to other DT nodes * without static domain. */ return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL); } static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr) { if (domain_nr < 0) return; /* Release domain from IDA where it was allocated. */ if (of_get_pci_domain_nr(parent->of_node) == domain_nr) ida_free(&pci_domain_nr_static_ida, domain_nr); else ida_free(&pci_domain_nr_dynamic_ida, domain_nr); } int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) { return acpi_disabled ? of_pci_bus_find_domain_nr(parent) : acpi_pci_bus_find_domain_nr(bus); } void pci_bus_release_domain_nr(struct device *parent, int domain_nr) { if (!acpi_disabled) return; of_pci_bus_release_domain_nr(parent, domain_nr); } #endif /** * pci_ext_cfg_avail - can we access extended PCI config space? * * Returns 1 if we can access PCI extended config space (offsets * greater than 0xff). This is the default implementation. Architecture * implementations can override this. */ int __weak pci_ext_cfg_avail(void) { return 1; } static int __init pci_setup(char *str) { while (str) { char *k = strchr(str, ','); if (k) *k++ = 0; if (*str && (str = pcibios_setup(str)) && *str) { if (!strcmp(str, "nomsi")) { pci_no_msi(); } else if (!strncmp(str, "noats", 5)) { pr_info("PCIe: ATS is disabled\n"); pcie_ats_disabled = true; } else if (!strcmp(str, "noaer")) { pci_no_aer(); } else if (!strcmp(str, "earlydump")) { pci_early_dump = true; } else if (!strncmp(str, "realloc=", 8)) { pci_realloc_get_opt(str + 8); } else if (!strncmp(str, "realloc", 7)) { pci_realloc_get_opt("on"); } else if (!strcmp(str, "nodomains")) { pci_no_domains(); } else if (!strncmp(str, "noari", 5)) { pcie_ari_disabled = true; } else if (!strncmp(str, "notph", 5)) { pci_no_tph(); } else if (!strncmp(str, "cbiosize=", 9)) { pci_cardbus_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "cbmemsize=", 10)) { pci_cardbus_mem_size = memparse(str + 10, &str); } else if (!strncmp(str, "resource_alignment=", 19)) { resource_alignment_param = str + 19; } else if (!strncmp(str, "ecrc=", 5)) { pcie_ecrc_get_policy(str + 5); } else if (!strncmp(str, "hpiosize=", 9)) { pci_hotplug_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "hpmmiosize=", 11)) { pci_hotplug_mmio_size = memparse(str + 11, &str); } else if (!strncmp(str, "hpmmioprefsize=", 15)) { pci_hotplug_mmio_pref_size = memparse(str + 15, &str); } else if (!strncmp(str, "hpmemsize=", 10)) { pci_hotplug_mmio_size = memparse(str + 10, &str); pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size; } else if (!strncmp(str, "hpbussize=", 10)) { pci_hotplug_bus_size = simple_strtoul(str + 10, &str, 0); if (pci_hotplug_bus_size > 0xff) pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { pcie_bus_config = PCIE_BUS_TUNE_OFF; } else if (!strncmp(str, "pcie_bus_safe", 13)) { pcie_bus_config = PCIE_BUS_SAFE; } else if (!strncmp(str, "pcie_bus_perf", 13)) { pcie_bus_config = PCIE_BUS_PERFORMANCE; } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { pcie_bus_config = PCIE_BUS_PEER2PEER; } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); } else if (!strncmp(str, "disable_acs_redir=", 18)) { disable_acs_redir_param = str + 18; } else if (!strncmp(str, "config_acs=", 11)) { config_acs_param = str + 11; } else { pr_err("PCI: Unknown option `%s'\n", str); } } str = k; } return 0; } early_param("pci", pci_setup); /* * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized * in pci_setup(), above, to point to data in the __initdata section which * will be freed after the init sequence is complete. We can't allocate memory * in pci_setup() because some architectures do not have any memory allocation * service available during an early_param() call. So we allocate memory and * copy the variable here before the init section is freed. * */ static int __init pci_realloc_setup_params(void) { resource_alignment_param = kstrdup(resource_alignment_param, GFP_KERNEL); disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); config_acs_param = kstrdup(config_acs_param, GFP_KERNEL); return 0; } pure_initcall(pci_realloc_setup_params);
32 105 160 161 215 1 216 212 5 5 4 4 4 4 4 4 4 4 201 136 16 5 18 16 16 1 16 16 2 16 16 19 19 19 19 18 18 18 18 16 16 16 16 16 11 5 11 1 16 16 16 16 18 18 15 18 18 8 8 8 8 8 8 8 8 8 6 6 6 6 6 6 204 1 208 207 206 62 61 62 62 203 203 204 201 201 206 203 209 206 202 203 203 209 208 202 209 202 38 185 183 184 184 62 184 182 184 37 184 199 212 210 213 212 1 213 213 212 213 20 20 20 20 20 44 19 39 1 38 20 44 26 22 22 22 4 214 210 216 4 2 1 23 23 23 23 23 22 23 23 22 22 22 22 2 1 22 22 20 21 23 23 22 22 21 21 22 5 5 81 81 81 81 80 81 80 81 81 80 81 81 81 89 89 89 89 115 103 115 78 103 114 79 115 107 107 107 107 107 106 106 107 107 106 107 72 72 72 72 72 72 72 72 72 72 72 81 81 1 81 81 81 89 27 27 2 2 2 2 67 66 67 66 67 67 67 66 66 66 66 4 66 66 66 65 66 81 82 82 82 4 4 4 4 2 2 2 2 4 22 21 22 21 22 22 22 22 204 202 204 201 200 200 149 150 150 146 146 146 121 121 121 39 39 39 4 4 4 4 4 4 1 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA sequencer Client Manager * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * Takashi Iwai <tiwai@suse.de> */ #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/minors.h> #include <linux/kmod.h> #include <sound/seq_kernel.h> #include <sound/ump.h> #include "seq_clientmgr.h" #include "seq_memory.h" #include "seq_queue.h" #include "seq_timer.h" #include "seq_info.h" #include "seq_system.h" #include "seq_ump_convert.h" #include <sound/seq_device.h> #ifdef CONFIG_COMPAT #include <linux/compat.h> #endif /* Client Manager * this module handles the connections of userland and kernel clients * */ /* * There are four ranges of client numbers (last two shared): * 0..15: global clients * 16..127: statically allocated client numbers for cards 0..27 * 128..191: dynamically allocated client numbers for cards 28..31 * 128..191: dynamically allocated client numbers for applications */ /* number of kernel non-card clients */ #define SNDRV_SEQ_GLOBAL_CLIENTS 16 /* clients per cards, for static clients */ #define SNDRV_SEQ_CLIENTS_PER_CARD 4 /* dynamically allocated client numbers (both kernel drivers and user space) */ #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128 #define SNDRV_SEQ_LFLG_INPUT 0x0001 #define SNDRV_SEQ_LFLG_OUTPUT 0x0002 #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT) static DEFINE_SPINLOCK(clients_lock); static DEFINE_MUTEX(register_mutex); /* * client table */ static char clienttablock[SNDRV_SEQ_MAX_CLIENTS]; static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS]; static struct snd_seq_usage client_usage; /* * prototypes */ static int bounce_error_event(struct snd_seq_client *client, struct snd_seq_event *event, int err, int atomic, int hop); static int snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) static void free_ump_info(struct snd_seq_client *client); #endif /* */ static inline unsigned short snd_seq_file_flags(struct file *file) { switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) { case FMODE_WRITE: return SNDRV_SEQ_LFLG_OUTPUT; case FMODE_READ: return SNDRV_SEQ_LFLG_INPUT; default: return SNDRV_SEQ_LFLG_OPEN; } } static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client) { return snd_seq_total_cells(client->pool) > 0; } /* return pointer to client structure for specified id */ static struct snd_seq_client *clientptr(int clientid) { if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) { pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n", clientid); return NULL; } return clienttab[clientid]; } static struct snd_seq_client *client_use_ptr(int clientid, bool load_module) { struct snd_seq_client *client; if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) { pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n", clientid); return NULL; } scoped_guard(spinlock_irqsave, &clients_lock) { client = clientptr(clientid); if (client) return snd_seq_client_ref(client); if (clienttablock[clientid]) return NULL; } #ifdef CONFIG_MODULES if (load_module) { static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS); static DECLARE_BITMAP(card_requested, SNDRV_CARDS); if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) { int idx; if (!test_and_set_bit(clientid, client_requested)) { for (idx = 0; idx < 15; idx++) { if (seq_client_load[idx] < 0) break; if (seq_client_load[idx] == clientid) { request_module("snd-seq-client-%i", clientid); break; } } } } else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) { int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) / SNDRV_SEQ_CLIENTS_PER_CARD; if (card < snd_ecards_limit) { if (!test_and_set_bit(card, card_requested)) snd_request_card(card); snd_seq_device_load_drivers(); } } scoped_guard(spinlock_irqsave, &clients_lock) { client = clientptr(clientid); if (client) return snd_seq_client_ref(client); } } #endif return NULL; } /* get snd_seq_client object for the given id quickly */ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) { return client_use_ptr(clientid, false); } /* get snd_seq_client object for the given id; * if not found, retry after loading the modules */ static struct snd_seq_client *client_load_and_use_ptr(int clientid) { return client_use_ptr(clientid, IS_ENABLED(CONFIG_MODULES)); } static void usage_alloc(struct snd_seq_usage *res, int num) { res->cur += num; if (res->cur > res->peak) res->peak = res->cur; } static void usage_free(struct snd_seq_usage *res, int num) { res->cur -= num; } /* initialise data structures */ int __init client_init_data(void) { /* zap out the client table */ memset(&clienttablock, 0, sizeof(clienttablock)); memset(&clienttab, 0, sizeof(clienttab)); return 0; } static struct snd_seq_client *seq_create_client1(int client_index, int poolsize) { int c; struct snd_seq_client *client; /* init client data */ client = kzalloc(sizeof(*client), GFP_KERNEL); if (client == NULL) return NULL; client->pool = snd_seq_pool_new(poolsize); if (client->pool == NULL) { kfree(client); return NULL; } client->type = NO_CLIENT; snd_use_lock_init(&client->use_lock); rwlock_init(&client->ports_lock); mutex_init(&client->ports_mutex); INIT_LIST_HEAD(&client->ports_list_head); mutex_init(&client->ioctl_mutex); client->ump_endpoint_port = -1; /* find free slot in the client table */ scoped_guard(spinlock_irq, &clients_lock) { if (client_index < 0) { for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN; c < SNDRV_SEQ_MAX_CLIENTS; c++) { if (clienttab[c] || clienttablock[c]) continue; clienttab[client->number = c] = client; return client; } } else { if (clienttab[client_index] == NULL && !clienttablock[client_index]) { clienttab[client->number = client_index] = client; return client; } } } snd_seq_pool_delete(&client->pool); kfree(client); return NULL; /* no free slot found or busy, return failure code */ } static int seq_free_client1(struct snd_seq_client *client) { if (!client) return 0; scoped_guard(spinlock_irq, &clients_lock) { clienttablock[client->number] = 1; clienttab[client->number] = NULL; } snd_seq_delete_all_ports(client); snd_seq_queue_client_leave(client->number); snd_use_lock_sync(&client->use_lock); if (client->pool) snd_seq_pool_delete(&client->pool); scoped_guard(spinlock_irq, &clients_lock) { clienttablock[client->number] = 0; } return 0; } static void seq_free_client(struct snd_seq_client * client) { scoped_guard(mutex, &register_mutex) { switch (client->type) { case NO_CLIENT: pr_warn("ALSA: seq: Trying to free unused client %d\n", client->number); break; case USER_CLIENT: case KERNEL_CLIENT: seq_free_client1(client); usage_free(&client_usage, 1); break; default: pr_err("ALSA: seq: Trying to free client %d with undefined type = %d\n", client->number, client->type); } } snd_seq_system_client_ev_client_exit(client->number); } /* -------------------------------------------------------- */ /* create a user client */ static int snd_seq_open(struct inode *inode, struct file *file) { int c, mode; /* client id */ struct snd_seq_client *client; struct snd_seq_user_client *user; int err; err = stream_open(inode, file); if (err < 0) return err; scoped_guard(mutex, &register_mutex) { client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS); if (!client) return -ENOMEM; /* failure code */ mode = snd_seq_file_flags(file); if (mode & SNDRV_SEQ_LFLG_INPUT) client->accept_input = 1; if (mode & SNDRV_SEQ_LFLG_OUTPUT) client->accept_output = 1; user = &client->data.user; user->fifo = NULL; user->fifo_pool_size = 0; if (mode & SNDRV_SEQ_LFLG_INPUT) { user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS; user->fifo = snd_seq_fifo_new(user->fifo_pool_size); if (user->fifo == NULL) { seq_free_client1(client); kfree(client); return -ENOMEM; } } usage_alloc(&client_usage, 1); client->type = USER_CLIENT; } c = client->number; file->private_data = client; /* fill client data */ user->file = file; sprintf(client->name, "Client-%d", c); client->data.user.owner = get_pid(task_pid(current)); /* make others aware this new client */ snd_seq_system_client_ev_client_start(c); return 0; } /* delete a user client */ static int snd_seq_release(struct inode *inode, struct file *file) { struct snd_seq_client *client = file->private_data; if (client) { seq_free_client(client); if (client->data.user.fifo) snd_seq_fifo_delete(&client->data.user.fifo); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) free_ump_info(client); #endif put_pid(client->data.user.owner); kfree(client); } return 0; } static bool event_is_compatible(const struct snd_seq_client *client, const struct snd_seq_event *ev) { if (snd_seq_ev_is_ump(ev) && !client->midi_version) return false; if (snd_seq_ev_is_ump(ev) && snd_seq_ev_is_variable(ev)) return false; return true; } /* handle client read() */ /* possible error values: * -ENXIO invalid client or file open mode * -ENOSPC FIFO overflow (the flag is cleared after this error report) * -EINVAL no enough user-space buffer to write the whole event * -EFAULT seg. fault during copy to user space */ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct snd_seq_client *client = file->private_data; struct snd_seq_fifo *fifo; size_t aligned_size; int err; long result = 0; struct snd_seq_event_cell *cell; if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT)) return -ENXIO; if (!access_ok(buf, count)) return -EFAULT; /* check client structures are in place */ if (snd_BUG_ON(!client)) return -ENXIO; if (!client->accept_input) return -ENXIO; fifo = client->data.user.fifo; if (!fifo) return -ENXIO; if (atomic_read(&fifo->overflow) > 0) { /* buffer overflow is detected */ snd_seq_fifo_clear(fifo); /* return error code */ return -ENOSPC; } cell = NULL; err = 0; guard(snd_seq_fifo)(fifo); if (IS_ENABLED(CONFIG_SND_SEQ_UMP) && client->midi_version > 0) aligned_size = sizeof(struct snd_seq_ump_event); else aligned_size = sizeof(struct snd_seq_event); /* while data available in queue */ while (count >= aligned_size) { int nonblock; nonblock = (file->f_flags & O_NONBLOCK) || result > 0; err = snd_seq_fifo_cell_out(fifo, &cell, nonblock); if (err < 0) break; if (!event_is_compatible(client, &cell->event)) { snd_seq_cell_free(cell); cell = NULL; continue; } if (snd_seq_ev_is_variable(&cell->event)) { struct snd_seq_ump_event tmpev; memcpy(&tmpev, &cell->event, aligned_size); tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK; if (copy_to_user(buf, &tmpev, aligned_size)) { err = -EFAULT; break; } count -= aligned_size; buf += aligned_size; err = snd_seq_expand_var_event(&cell->event, count, (char __force *)buf, 0, aligned_size); if (err < 0) break; result += err; count -= err; buf += err; } else { if (copy_to_user(buf, &cell->event, aligned_size)) { err = -EFAULT; break; } count -= aligned_size; buf += aligned_size; } snd_seq_cell_free(cell); cell = NULL; /* to be sure */ result += aligned_size; } if (err < 0) { if (cell) snd_seq_fifo_cell_putback(fifo, cell); if (err == -EAGAIN && result > 0) err = 0; } return (err < 0) ? err : result; } /* * check access permission to the port */ static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags) { if ((port->capability & flags) != flags) return 0; return flags; } /* * check if the destination client is available, and return the pointer */ static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event) { struct snd_seq_client *dest __free(snd_seq_client) = NULL; dest = snd_seq_client_use_ptr(event->dest.client); if (dest == NULL) return NULL; if (! dest->accept_input) return NULL; if (snd_seq_ev_is_ump(event)) return no_free_ptr(dest); /* ok - no filter checks */ if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) && ! test_bit(event->type, dest->event_filter)) return NULL; return no_free_ptr(dest); /* ok - accessible */ } /* * Return the error event. * * If the receiver client is a user client, the original event is * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If * the original event is also variable length, the external data is * copied after the event record. * If the receiver client is a kernel client, the original event is * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra * kmalloc. */ static int bounce_error_event(struct snd_seq_client *client, struct snd_seq_event *event, int err, int atomic, int hop) { struct snd_seq_event bounce_ev; int result; if (client == NULL || ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) || ! client->accept_input) return 0; /* ignored */ /* set up quoted error */ memset(&bounce_ev, 0, sizeof(bounce_ev)); bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR; bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED; bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT; bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM; bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE; bounce_ev.dest.client = client->number; bounce_ev.dest.port = event->source.port; bounce_ev.data.quote.origin = event->dest; bounce_ev.data.quote.event = event; bounce_ev.data.quote.value = -err; /* use positive value */ result = snd_seq_deliver_single_event(NULL, &bounce_ev, atomic, hop + 1); if (result < 0) { client->event_lost++; return result; } return result; } /* * rewrite the time-stamp of the event record with the curren time * of the given queue. * return non-zero if updated. */ static int update_timestamp_of_queue(struct snd_seq_event *event, int queue, int real_time) { struct snd_seq_queue *q __free(snd_seq_queue) = NULL; q = queueptr(queue); if (! q) return 0; event->queue = queue; event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK; if (real_time) { event->time.time = snd_seq_timer_get_cur_time(q->timer, true); event->flags |= SNDRV_SEQ_TIME_STAMP_REAL; } else { event->time.tick = snd_seq_timer_get_cur_tick(q->timer); event->flags |= SNDRV_SEQ_TIME_STAMP_TICK; } return 1; } /* deliver a single event; called from below and UMP converter */ int __snd_seq_deliver_single_event(struct snd_seq_client *dest, struct snd_seq_client_port *dest_port, struct snd_seq_event *event, int atomic, int hop) { switch (dest->type) { case USER_CLIENT: if (!dest->data.user.fifo) return 0; return snd_seq_fifo_event_in(dest->data.user.fifo, event); case KERNEL_CLIENT: if (!dest_port->event_input) return 0; return dest_port->event_input(event, snd_seq_ev_is_direct(event), dest_port->private_data, atomic, hop); } return 0; } /* deliver a single event; called from snd_seq_deliver_single_event() */ static int _snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { struct snd_seq_client *dest __free(snd_seq_client) = NULL; struct snd_seq_client_port *dest_port __free(snd_seq_port) = NULL; dest = get_event_dest_client(event); if (dest == NULL) return -ENOENT; dest_port = snd_seq_port_use_ptr(dest, event->dest.port); if (dest_port == NULL) return -ENOENT; /* check permission */ if (!check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) return -EPERM; if (dest_port->timestamping) update_timestamp_of_queue(event, dest_port->time_queue, dest_port->time_real); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) if (snd_seq_ev_is_ump(event)) { if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) return snd_seq_deliver_from_ump(client, dest, dest_port, event, atomic, hop); else if (dest->type == USER_CLIENT && !snd_seq_client_is_ump(dest)) return 0; // drop the event } else if (snd_seq_client_is_ump(dest)) { if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) return snd_seq_deliver_to_ump(client, dest, dest_port, event, atomic, hop); } #endif /* CONFIG_SND_SEQ_UMP */ return __snd_seq_deliver_single_event(dest, dest_port, event, atomic, hop); } /* * deliver an event to the specified destination. * if filter is non-zero, client filter bitmap is tested. * * RETURN VALUE: 0 : if succeeded * <0 : error */ static int snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { int result = _snd_seq_deliver_single_event(client, event, atomic, hop); if (result < 0 && !snd_seq_ev_is_direct(event)) return bounce_error_event(client, event, result, atomic, hop); return result; } /* * send the event to all subscribers: */ static int __deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int port, int atomic, int hop) { struct snd_seq_client_port *src_port __free(snd_seq_port) = NULL; struct snd_seq_subscribers *subs; int err, result = 0, num_ev = 0; union __snd_seq_event event_saved; size_t saved_size; struct snd_seq_port_subs_info *grp; if (port < 0) return 0; src_port = snd_seq_port_use_ptr(client, port); if (!src_port) return 0; /* save original event record */ saved_size = snd_seq_event_packet_size(event); memcpy(&event_saved, event, saved_size); grp = &src_port->c_src; /* lock list */ if (atomic) read_lock(&grp->list_lock); else down_read_nested(&grp->list_mutex, hop); list_for_each_entry(subs, &grp->list_head, src_list) { /* both ports ready? */ if (atomic_read(&subs->ref_count) != 2) continue; event->dest = subs->info.dest; if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) /* convert time according to flag with subscription */ update_timestamp_of_queue(event, subs->info.queue, subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL); err = snd_seq_deliver_single_event(client, event, atomic, hop); if (err < 0) { /* save first error that occurs and continue */ if (!result) result = err; continue; } num_ev++; /* restore original event record */ memcpy(event, &event_saved, saved_size); } if (atomic) read_unlock(&grp->list_lock); else up_read(&grp->list_mutex); memcpy(event, &event_saved, saved_size); return (result < 0) ? result : num_ev; } static int deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { int ret; #if IS_ENABLED(CONFIG_SND_SEQ_UMP) int ret2; #endif ret = __deliver_to_subscribers(client, event, event->source.port, atomic, hop); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) if (!snd_seq_client_is_ump(client) || client->ump_endpoint_port < 0) return ret; /* If it's an event from EP port (and with a UMP group), * deliver to subscribers of the corresponding UMP group port, too. * Or, if it's from non-EP port, deliver to subscribers of EP port, too. */ if (event->source.port == client->ump_endpoint_port) ret2 = __deliver_to_subscribers(client, event, snd_seq_ump_group_port(event), atomic, hop); else ret2 = __deliver_to_subscribers(client, event, client->ump_endpoint_port, atomic, hop); if (ret2 < 0) return ret2; #endif return ret; } /* deliver an event to the destination port(s). * if the event is to subscribers or broadcast, the event is dispatched * to multiple targets. * * RETURN VALUE: n > 0 : the number of delivered events. * n == 0 : the event was not passed to any client. * n < 0 : error - event was not processed. */ static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { int result; hop++; if (hop >= SNDRV_SEQ_MAX_HOPS) { pr_debug("ALSA: seq: too long delivery path (%d:%d->%d:%d)\n", event->source.client, event->source.port, event->dest.client, event->dest.port); return -EMLINK; } if (snd_seq_ev_is_variable(event) && snd_BUG_ON(atomic && (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR))) return -EINVAL; if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) result = deliver_to_subscribers(client, event, atomic, hop); else result = snd_seq_deliver_single_event(client, event, atomic, hop); return result; } /* * dispatch an event cell: * This function is called only from queue check routines in timer * interrupts or after enqueued. * The event cell shall be released or re-queued in this function. * * RETURN VALUE: n > 0 : the number of delivered events. * n == 0 : the event was not passed to any client. * n < 0 : error - event was not processed. */ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop) { struct snd_seq_client *client __free(snd_seq_client) = NULL; int result; if (snd_BUG_ON(!cell)) return -EINVAL; client = snd_seq_client_use_ptr(cell->event.source.client); if (client == NULL) { snd_seq_cell_free(cell); /* release this cell */ return -EINVAL; } if (!snd_seq_ev_is_ump(&cell->event) && cell->event.type == SNDRV_SEQ_EVENT_NOTE) { /* NOTE event: * the event cell is re-used as a NOTE-OFF event and * enqueued again. */ struct snd_seq_event tmpev, *ev; /* reserve this event to enqueue note-off later */ tmpev = cell->event; tmpev.type = SNDRV_SEQ_EVENT_NOTEON; result = snd_seq_deliver_event(client, &tmpev, atomic, hop); /* * This was originally a note event. We now re-use the * cell for the note-off event. */ ev = &cell->event; ev->type = SNDRV_SEQ_EVENT_NOTEOFF; ev->flags |= SNDRV_SEQ_PRIORITY_HIGH; /* add the duration time */ switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) { case SNDRV_SEQ_TIME_STAMP_TICK: cell->event.time.tick += ev->data.note.duration; break; case SNDRV_SEQ_TIME_STAMP_REAL: /* unit for duration is ms */ ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000); ev->time.time.tv_sec += ev->data.note.duration / 1000 + ev->time.time.tv_nsec / 1000000000; ev->time.time.tv_nsec %= 1000000000; break; } ev->data.note.velocity = ev->data.note.off_velocity; /* Now queue this cell as the note off event */ if (snd_seq_enqueue_event(cell, atomic, hop) < 0) snd_seq_cell_free(cell); /* release this cell */ } else { /* Normal events: * event cell is freed after processing the event */ result = snd_seq_deliver_event(client, &cell->event, atomic, hop); snd_seq_cell_free(cell); } return result; } /* Allocate a cell from client pool and enqueue it to queue: * if pool is empty and blocking is TRUE, sleep until a new cell is * available. */ static int snd_seq_client_enqueue_event(struct snd_seq_client *client, struct snd_seq_event *event, struct file *file, int blocking, int atomic, int hop, struct mutex *mutexp) { struct snd_seq_event_cell *cell; int err; /* special queue values - force direct passing */ if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; event->queue = SNDRV_SEQ_QUEUE_DIRECT; } else if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { /* check presence of source port */ struct snd_seq_client_port *src_port __free(snd_seq_port) = snd_seq_port_use_ptr(client, event->source.port); if (!src_port) return -EINVAL; } /* direct event processing without enqueued */ if (snd_seq_ev_is_direct(event)) { if (!snd_seq_ev_is_ump(event) && event->type == SNDRV_SEQ_EVENT_NOTE) return -EINVAL; /* this event must be enqueued! */ return snd_seq_deliver_event(client, event, atomic, hop); } /* Not direct, normal queuing */ if (snd_seq_queue_is_used(event->queue, client->number) <= 0) return -EINVAL; /* invalid queue */ if (! snd_seq_write_pool_allocated(client)) return -ENXIO; /* queue is not allocated */ /* allocate an event cell */ err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file, mutexp); if (err < 0) return err; /* we got a cell. enqueue it. */ err = snd_seq_enqueue_event(cell, atomic, hop); if (err < 0) { snd_seq_cell_free(cell); return err; } return 0; } /* * check validity of event type and data length. * return non-zero if invalid. */ static int check_event_type_and_length(struct snd_seq_event *ev) { switch (snd_seq_ev_length_type(ev)) { case SNDRV_SEQ_EVENT_LENGTH_FIXED: if (snd_seq_ev_is_variable_type(ev)) return -EINVAL; break; case SNDRV_SEQ_EVENT_LENGTH_VARIABLE: if (! snd_seq_ev_is_variable_type(ev) || (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN) return -EINVAL; break; case SNDRV_SEQ_EVENT_LENGTH_VARUSR: if (! snd_seq_ev_is_direct(ev)) return -EINVAL; break; } return 0; } /* handle write() */ /* possible error values: * -ENXIO invalid client or file open mode * -ENOMEM malloc failed * -EFAULT seg. fault during copy from user space * -EINVAL invalid event * -EAGAIN no space in output pool * -EINTR interrupts while sleep * -EMLINK too many hops * others depends on return value from driver callback */ static ssize_t snd_seq_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct snd_seq_client *client = file->private_data; int written = 0, len; int err, handled; union __snd_seq_event __event; struct snd_seq_event *ev = &__event.legacy; if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) return -ENXIO; /* check client structures are in place */ if (snd_BUG_ON(!client)) return -ENXIO; if (!client->accept_output || client->pool == NULL) return -ENXIO; repeat: handled = 0; /* allocate the pool now if the pool is not allocated yet */ mutex_lock(&client->ioctl_mutex); if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { err = snd_seq_pool_init(client->pool); if (err < 0) goto out; } /* only process whole events */ err = -EINVAL; while (count >= sizeof(struct snd_seq_event)) { /* Read in the event header from the user */ len = sizeof(struct snd_seq_event); if (copy_from_user(ev, buf, len)) { err = -EFAULT; break; } /* read in the rest bytes for UMP events */ if (snd_seq_ev_is_ump(ev)) { if (count < sizeof(struct snd_seq_ump_event)) break; if (copy_from_user((char *)ev + len, buf + len, sizeof(struct snd_seq_ump_event) - len)) { err = -EFAULT; break; } len = sizeof(struct snd_seq_ump_event); } ev->source.client = client->number; /* fill in client number */ /* Check for extension data length */ if (check_event_type_and_length(ev)) { err = -EINVAL; break; } if (!event_is_compatible(client, ev)) { err = -EINVAL; break; } /* check for special events */ if (!snd_seq_ev_is_ump(ev)) { if (ev->type == SNDRV_SEQ_EVENT_NONE) goto __skip_event; else if (snd_seq_ev_is_reserved(ev)) { err = -EINVAL; break; } } if (snd_seq_ev_is_variable(ev)) { int extlen = ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK; if ((size_t)(extlen + len) > count) { /* back out, will get an error this time or next */ err = -EINVAL; break; } /* set user space pointer */ ev->data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR; ev->data.ext.ptr = (char __force *)buf + len; len += extlen; /* increment data length */ } else { #ifdef CONFIG_COMPAT if (client->convert32 && snd_seq_ev_is_varusr(ev)) ev->data.ext.ptr = (void __force *)compat_ptr(ev->data.raw32.d[1]); #endif } /* ok, enqueue it */ err = snd_seq_client_enqueue_event(client, ev, file, !(file->f_flags & O_NONBLOCK), 0, 0, &client->ioctl_mutex); if (err < 0) break; handled++; __skip_event: /* Update pointers and counts */ count -= len; buf += len; written += len; /* let's have a coffee break if too many events are queued */ if (++handled >= 200) { mutex_unlock(&client->ioctl_mutex); goto repeat; } } out: mutex_unlock(&client->ioctl_mutex); return written ? written : err; } /* * handle polling */ static __poll_t snd_seq_poll(struct file *file, poll_table * wait) { struct snd_seq_client *client = file->private_data; __poll_t mask = 0; /* check client structures are in place */ if (snd_BUG_ON(!client)) return EPOLLERR; if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) && client->data.user.fifo) { /* check if data is available in the outqueue */ if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait)) mask |= EPOLLIN | EPOLLRDNORM; } if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) { /* check if data is available in the pool */ if (snd_seq_pool_poll_wait(client->pool, file, wait)) mask |= EPOLLOUT | EPOLLWRNORM; } return mask; } /*-----------------------------------------------------*/ static int snd_seq_ioctl_pversion(struct snd_seq_client *client, void *arg) { int *pversion = arg; *pversion = SNDRV_SEQ_VERSION; return 0; } static int snd_seq_ioctl_user_pversion(struct snd_seq_client *client, void *arg) { client->user_pversion = *(unsigned int *)arg; return 0; } static int snd_seq_ioctl_client_id(struct snd_seq_client *client, void *arg) { int *client_id = arg; *client_id = client->number; return 0; } /* SYSTEM_INFO ioctl() */ static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void *arg) { struct snd_seq_system_info *info = arg; memset(info, 0, sizeof(*info)); /* fill the info fields */ info->queues = SNDRV_SEQ_MAX_QUEUES; info->clients = SNDRV_SEQ_MAX_CLIENTS; info->ports = SNDRV_SEQ_MAX_PORTS; info->channels = 256; /* fixed limit */ info->cur_clients = client_usage.cur; info->cur_queues = snd_seq_queue_get_cur_queues(); return 0; } /* RUNNING_MODE ioctl() */ static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void *arg) { struct snd_seq_running_info *info = arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; /* requested client number */ cptr = client_load_and_use_ptr(info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ #ifdef SNDRV_BIG_ENDIAN if (!info->big_endian) return -EINVAL; #else if (info->big_endian) return -EINVAL; #endif if (info->cpu_mode > sizeof(long)) return -EINVAL; cptr->convert32 = (info->cpu_mode < sizeof(long)); return 0; } /* CLIENT_INFO ioctl() */ static void get_client_info(struct snd_seq_client *cptr, struct snd_seq_client_info *info) { info->client = cptr->number; /* fill the info fields */ info->type = cptr->type; strscpy(info->name, cptr->name); info->filter = cptr->filter; info->event_lost = cptr->event_lost; memcpy(info->event_filter, cptr->event_filter, 32); info->group_filter = cptr->group_filter; info->num_ports = cptr->num_ports; if (cptr->type == USER_CLIENT) info->pid = pid_vnr(cptr->data.user.owner); else info->pid = -1; if (cptr->type == KERNEL_CLIENT) info->card = cptr->data.kernel.card ? cptr->data.kernel.card->number : -1; else info->card = -1; info->midi_version = cptr->midi_version; memset(info->reserved, 0, sizeof(info->reserved)); } static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client, void *arg) { struct snd_seq_client_info *client_info = arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; /* requested client number */ cptr = client_load_and_use_ptr(client_info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ get_client_info(cptr, client_info); return 0; } /* CLIENT_INFO ioctl() */ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client, void *arg) { struct snd_seq_client_info *client_info = arg; /* it is not allowed to set the info fields for an another client */ if (client->number != client_info->client) return -EPERM; /* also client type must be set now */ if (client->type != client_info->type) return -EINVAL; if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3)) { /* check validity of midi_version field */ if (client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0) return -EINVAL; /* check if UMP is supported in kernel */ if (!IS_ENABLED(CONFIG_SND_SEQ_UMP) && client_info->midi_version > 0) return -EINVAL; } /* fill the info fields */ if (client_info->name[0]) strscpy(client->name, client_info->name, sizeof(client->name)); client->filter = client_info->filter; client->event_lost = client_info->event_lost; if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3)) client->midi_version = client_info->midi_version; memcpy(client->event_filter, client_info->event_filter, 32); client->group_filter = client_info->group_filter; /* notify the change */ snd_seq_system_client_ev_client_change(client->number); return 0; } /* * CREATE PORT ioctl() */ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client_port *port; struct snd_seq_port_callback *callback; int port_idx, err; /* it is not allowed to create the port for an another client */ if (info->addr.client != client->number) return -EPERM; if (client->type == USER_CLIENT && info->kernel) return -EINVAL; if ((info->capability & SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT) && client->ump_endpoint_port >= 0) return -EBUSY; if (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) port_idx = info->addr.port; else port_idx = -1; if (port_idx >= SNDRV_SEQ_ADDRESS_UNKNOWN) return -EINVAL; err = snd_seq_create_port(client, port_idx, &port); if (err < 0) return err; if (client->type == KERNEL_CLIENT) { callback = info->kernel; if (callback) { if (callback->owner) port->owner = callback->owner; port->private_data = callback->private_data; port->private_free = callback->private_free; port->event_input = callback->event_input; port->c_src.open = callback->subscribe; port->c_src.close = callback->unsubscribe; port->c_dest.open = callback->use; port->c_dest.close = callback->unuse; } } info->addr = port->addr; snd_seq_set_port_info(port, info); if (info->capability & SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT) client->ump_endpoint_port = port->addr.port; snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); snd_seq_port_unlock(port); return 0; } /* * DELETE PORT ioctl() */ static int snd_seq_ioctl_delete_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; int err; /* it is not allowed to remove the port for an another client */ if (info->addr.client != client->number) return -EPERM; err = snd_seq_delete_port(client, info->addr.port); if (err >= 0) { if (client->ump_endpoint_port == info->addr.port) client->ump_endpoint_port = -1; snd_seq_system_client_ev_port_exit(client->number, info->addr.port); } return err; } /* * GET_PORT_INFO ioctl() (on any client) */ static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; struct snd_seq_client_port *port __free(snd_seq_port) = NULL; cptr = client_load_and_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; port = snd_seq_port_use_ptr(cptr, info->addr.port); if (port == NULL) return -ENOENT; /* don't change */ /* get port info */ snd_seq_get_port_info(port, info); return 0; } /* * SET_PORT_INFO ioctl() (only ports on this/own client) */ static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client_port *port __free(snd_seq_port) = NULL; if (info->addr.client != client->number) /* only set our own ports ! */ return -EPERM; port = snd_seq_port_use_ptr(client, info->addr.port); if (port) { snd_seq_set_port_info(port, info); /* notify the change */ snd_seq_system_client_ev_port_change(info->addr.client, info->addr.port); } return 0; } /* * port subscription (connection) */ #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ) #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE) static int check_subscription_permission(struct snd_seq_client *client, struct snd_seq_client_port *sport, struct snd_seq_client_port *dport, struct snd_seq_port_subscribe *subs) { if (client->number != subs->sender.client && client->number != subs->dest.client) { /* connection by third client - check export permission */ if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; } /* check read permission */ /* if sender or receiver is the subscribing client itself, * no permission check is necessary */ if (client->number != subs->sender.client) { if (! check_port_perm(sport, PERM_RD)) return -EPERM; } /* check write permission */ if (client->number != subs->dest.client) { if (! check_port_perm(dport, PERM_WR)) return -EPERM; } return 0; } /* * send an subscription notify event to user client: * client must be user client. */ int snd_seq_client_notify_subscription(int client, int port, struct snd_seq_port_subscribe *info, int evtype) { struct snd_seq_event event; memset(&event, 0, sizeof(event)); event.type = evtype; event.data.connect.dest = info->dest; event.data.connect.sender = info->sender; return snd_seq_system_notify(client, port, &event, false); /* non-atomic */ } /* * add to port's subscription list IOCTL interface */ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_subscribe *subs = arg; struct snd_seq_client *receiver __free(snd_seq_client) = NULL; struct snd_seq_client *sender __free(snd_seq_client) = NULL; struct snd_seq_client_port *sport __free(snd_seq_port) = NULL; struct snd_seq_client_port *dport __free(snd_seq_port) = NULL; int result; receiver = client_load_and_use_ptr(subs->dest.client); if (!receiver) return -EINVAL; sender = client_load_and_use_ptr(subs->sender.client); if (!sender) return -EINVAL; sport = snd_seq_port_use_ptr(sender, subs->sender.port); if (!sport) return -EINVAL; dport = snd_seq_port_use_ptr(receiver, subs->dest.port); if (!dport) return -EINVAL; result = check_subscription_permission(client, sport, dport, subs); if (result < 0) return result; /* connect them */ result = snd_seq_port_connect(client, sender, sport, receiver, dport, subs); if (! result) /* broadcast announce */ snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0, subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED); return result; } /* * remove from port's subscription list */ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_subscribe *subs = arg; struct snd_seq_client *receiver __free(snd_seq_client) = NULL; struct snd_seq_client *sender __free(snd_seq_client) = NULL; struct snd_seq_client_port *sport __free(snd_seq_port) = NULL; struct snd_seq_client_port *dport __free(snd_seq_port) = NULL; int result; receiver = snd_seq_client_use_ptr(subs->dest.client); if (!receiver) return -ENXIO; sender = snd_seq_client_use_ptr(subs->sender.client); if (!sender) return -ENXIO; sport = snd_seq_port_use_ptr(sender, subs->sender.port); if (!sport) return -ENXIO; dport = snd_seq_port_use_ptr(receiver, subs->dest.port); if (!dport) return -ENXIO; result = check_subscription_permission(client, sport, dport, subs); if (result < 0) return result; result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, subs); if (! result) /* broadcast announce */ snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0, subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED); return result; } /* CREATE_QUEUE ioctl() */ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q __free(snd_seq_queue) = NULL; q = snd_seq_queue_alloc(client->number, info->locked, info->flags); if (IS_ERR(q)) return PTR_ERR(q); info->queue = q->queue; info->locked = q->locked; info->owner = q->owner; /* set queue name */ if (!info->name[0]) snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); strscpy(q->name, info->name, sizeof(q->name)); return 0; } /* DELETE_QUEUE ioctl() */ static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; return snd_seq_queue_delete(client->number, info->queue); } /* GET_QUEUE_INFO ioctl() */ static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q __free(snd_seq_queue) = NULL; q = queueptr(info->queue); if (q == NULL) return -EINVAL; memset(info, 0, sizeof(*info)); info->queue = q->queue; info->owner = q->owner; info->locked = q->locked; strscpy(info->name, q->name, sizeof(info->name)); return 0; } /* SET_QUEUE_INFO ioctl() */ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q __free(snd_seq_queue) = NULL; if (info->owner != client->number) return -EINVAL; /* change owner/locked permission */ if (snd_seq_queue_check_access(info->queue, client->number)) { if (snd_seq_queue_set_owner(info->queue, client->number, info->locked) < 0) return -EPERM; if (info->locked) snd_seq_queue_use(info->queue, client->number, 1); } else { return -EPERM; } q = queueptr(info->queue); if (! q) return -EINVAL; if (q->owner != client->number) return -EPERM; strscpy(q->name, info->name, sizeof(q->name)); return 0; } /* GET_NAMED_QUEUE ioctl() */ static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q __free(snd_seq_queue) = NULL; q = snd_seq_queue_find_name(info->name); if (q == NULL) return -EINVAL; info->queue = q->queue; info->owner = q->owner; info->locked = q->locked; return 0; } /* GET_QUEUE_STATUS ioctl() */ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_status *status = arg; struct snd_seq_queue *queue __free(snd_seq_queue) = NULL; struct snd_seq_timer *tmr; queue = queueptr(status->queue); if (queue == NULL) return -EINVAL; memset(status, 0, sizeof(*status)); status->queue = queue->queue; tmr = queue->timer; status->events = queue->tickq->cells + queue->timeq->cells; status->time = snd_seq_timer_get_cur_time(tmr, true); status->tick = snd_seq_timer_get_cur_tick(tmr); status->running = tmr->running; status->flags = queue->flags; return 0; } /* GET_QUEUE_TEMPO ioctl() */ static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_tempo *tempo = arg; struct snd_seq_queue *queue __free(snd_seq_queue) = NULL; struct snd_seq_timer *tmr; queue = queueptr(tempo->queue); if (queue == NULL) return -EINVAL; memset(tempo, 0, sizeof(*tempo)); tempo->queue = queue->queue; tmr = queue->timer; tempo->tempo = tmr->tempo; tempo->ppq = tmr->ppq; tempo->skew_value = tmr->skew; tempo->skew_base = tmr->skew_base; if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 4)) tempo->tempo_base = tmr->tempo_base; return 0; } /* SET_QUEUE_TEMPO ioctl() */ int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo) { if (!snd_seq_queue_check_access(tempo->queue, client)) return -EPERM; return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo); } EXPORT_SYMBOL(snd_seq_set_queue_tempo); static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_tempo *tempo = arg; int result; if (client->user_pversion < SNDRV_PROTOCOL_VERSION(1, 0, 4)) tempo->tempo_base = 0; result = snd_seq_set_queue_tempo(client->number, tempo); return result < 0 ? result : 0; } /* GET_QUEUE_TIMER ioctl() */ static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_timer *timer = arg; struct snd_seq_queue *queue __free(snd_seq_queue) = NULL; struct snd_seq_timer *tmr; queue = queueptr(timer->queue); if (queue == NULL) return -EINVAL; guard(mutex)(&queue->timer_mutex); tmr = queue->timer; memset(timer, 0, sizeof(*timer)); timer->queue = queue->queue; timer->type = tmr->type; if (tmr->type == SNDRV_SEQ_TIMER_ALSA) { timer->u.alsa.id = tmr->alsa_id; timer->u.alsa.resolution = tmr->preferred_resolution; } return 0; } /* SET_QUEUE_TIMER ioctl() */ static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_timer *timer = arg; int result = 0; if (timer->type != SNDRV_SEQ_TIMER_ALSA) return -EINVAL; if (snd_seq_queue_check_access(timer->queue, client->number)) { struct snd_seq_queue *q __free(snd_seq_queue) = NULL; struct snd_seq_timer *tmr; q = queueptr(timer->queue); if (q == NULL) return -ENXIO; guard(mutex)(&q->timer_mutex); tmr = q->timer; snd_seq_queue_timer_close(timer->queue); tmr->type = timer->type; if (tmr->type == SNDRV_SEQ_TIMER_ALSA) { tmr->alsa_id = timer->u.alsa.id; tmr->preferred_resolution = timer->u.alsa.resolution; } result = snd_seq_queue_timer_open(timer->queue); } else { return -EPERM; } return result; } /* GET_QUEUE_CLIENT ioctl() */ static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_client *info = arg; int used; used = snd_seq_queue_is_used(info->queue, client->number); if (used < 0) return -EINVAL; info->used = used; info->client = client->number; return 0; } /* SET_QUEUE_CLIENT ioctl() */ static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_client *info = arg; int err; if (info->used >= 0) { err = snd_seq_queue_use(info->queue, client->number, info->used); if (err < 0) return err; } return snd_seq_ioctl_get_queue_client(client, arg); } /* GET_CLIENT_POOL ioctl() */ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client, void *arg) { struct snd_seq_client_pool *info = arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; cptr = client_load_and_use_ptr(info->client); if (cptr == NULL) return -ENOENT; memset(info, 0, sizeof(*info)); info->client = cptr->number; info->output_pool = cptr->pool->size; info->output_room = cptr->pool->room; info->output_free = info->output_pool; info->output_free = snd_seq_unused_cells(cptr->pool); if (cptr->type == USER_CLIENT) { info->input_pool = cptr->data.user.fifo_pool_size; info->input_free = info->input_pool; info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo); } else { info->input_pool = 0; info->input_free = 0; } return 0; } /* SET_CLIENT_POOL ioctl() */ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, void *arg) { struct snd_seq_client_pool *info = arg; int rc; if (client->number != info->client) return -EINVAL; /* can't change other clients */ if (info->output_pool >= 1 && info->output_pool <= SNDRV_SEQ_MAX_EVENTS && (! snd_seq_write_pool_allocated(client) || info->output_pool != client->pool->size)) { if (snd_seq_write_pool_allocated(client)) { /* is the pool in use? */ if (atomic_read(&client->pool->counter)) return -EBUSY; /* remove all existing cells */ snd_seq_pool_mark_closing(client->pool); snd_seq_pool_done(client->pool); } client->pool->size = info->output_pool; rc = snd_seq_pool_init(client->pool); if (rc < 0) return rc; } if (client->type == USER_CLIENT && client->data.user.fifo != NULL && info->input_pool >= 1 && info->input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS && info->input_pool != client->data.user.fifo_pool_size) { /* change pool size */ rc = snd_seq_fifo_resize(client->data.user.fifo, info->input_pool); if (rc < 0) return rc; client->data.user.fifo_pool_size = info->input_pool; } if (info->output_room >= 1 && info->output_room <= client->pool->size) { client->pool->room = info->output_room; } return snd_seq_ioctl_get_client_pool(client, arg); } /* REMOVE_EVENTS ioctl() */ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client, void *arg) { struct snd_seq_remove_events *info = arg; /* * Input mostly not implemented XXX. */ if (info->remove_mode & SNDRV_SEQ_REMOVE_INPUT) { /* * No restrictions so for a user client we can clear * the whole fifo */ if (client->type == USER_CLIENT && client->data.user.fifo) snd_seq_fifo_clear(client->data.user.fifo); } if (info->remove_mode & SNDRV_SEQ_REMOVE_OUTPUT) snd_seq_queue_remove_cells(client->number, info); return 0; } /* * get subscription info */ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client, void *arg) { struct snd_seq_port_subscribe *subs = arg; struct snd_seq_client *sender __free(snd_seq_client) = NULL; struct snd_seq_client_port *sport __free(snd_seq_port) = NULL; sender = client_load_and_use_ptr(subs->sender.client); if (!sender) return -EINVAL; sport = snd_seq_port_use_ptr(sender, subs->sender.port); if (!sport) return -EINVAL; return snd_seq_port_get_subscription(&sport->c_src, &subs->dest, subs); } /* * get subscription info - check only its presence */ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg) { struct snd_seq_query_subs *subs = arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; struct snd_seq_client_port *port __free(snd_seq_port) = NULL; struct snd_seq_port_subs_info *group; struct list_head *p; int i; cptr = client_load_and_use_ptr(subs->root.client); if (!cptr) return -ENXIO; port = snd_seq_port_use_ptr(cptr, subs->root.port); if (!port) return -ENXIO; switch (subs->type) { case SNDRV_SEQ_QUERY_SUBS_READ: group = &port->c_src; break; case SNDRV_SEQ_QUERY_SUBS_WRITE: group = &port->c_dest; break; default: return -ENXIO; } guard(rwsem_read)(&group->list_mutex); /* search for the subscriber */ subs->num_subs = group->count; i = 0; list_for_each(p, &group->list_head) { if (i++ == subs->index) { /* found! */ struct snd_seq_subscribers *s; if (subs->type == SNDRV_SEQ_QUERY_SUBS_READ) { s = list_entry(p, struct snd_seq_subscribers, src_list); subs->addr = s->info.dest; } else { s = list_entry(p, struct snd_seq_subscribers, dest_list); subs->addr = s->info.sender; } subs->flags = s->info.flags; subs->queue = s->info.queue; return 0; } } return -ENOENT; } /* * query next client */ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client, void *arg) { struct snd_seq_client_info *info = arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; /* search for next client */ if (info->client < INT_MAX) info->client++; if (info->client < 0) info->client = 0; for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) { cptr = client_load_and_use_ptr(info->client); if (cptr) { get_client_info(cptr, info); return 0; /* found */ } } return -ENOENT; } /* * query next port */ static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; struct snd_seq_client_port *port __free(snd_seq_port) = NULL; cptr = client_load_and_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; /* search for next port */ info->addr.port++; port = snd_seq_port_query_nearest(cptr, info); if (port == NULL) return -ENOENT; /* get port info */ info->addr = port->addr; snd_seq_get_port_info(port, info); return 0; } #if IS_ENABLED(CONFIG_SND_SEQ_UMP) #define NUM_UMP_INFOS (SNDRV_UMP_MAX_BLOCKS + 1) static void free_ump_info(struct snd_seq_client *client) { int i; if (!client->ump_info) return; for (i = 0; i < NUM_UMP_INFOS; i++) kfree(client->ump_info[i]); kfree(client->ump_info); client->ump_info = NULL; } static void terminate_ump_info_strings(void *p, int type) { if (type == SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT) { struct snd_ump_endpoint_info *ep = p; ep->name[sizeof(ep->name) - 1] = 0; } else { struct snd_ump_block_info *bp = p; bp->name[sizeof(bp->name) - 1] = 0; } } #ifdef CONFIG_SND_PROC_FS static void dump_ump_info(struct snd_info_buffer *buffer, struct snd_seq_client *client) { struct snd_ump_endpoint_info *ep; struct snd_ump_block_info *bp; int i; if (!client->ump_info) return; ep = client->ump_info[SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT]; if (ep && *ep->name) snd_iprintf(buffer, " UMP Endpoint: \"%s\"\n", ep->name); for (i = 0; i < SNDRV_UMP_MAX_BLOCKS; i++) { bp = client->ump_info[i + 1]; if (bp && *bp->name) { snd_iprintf(buffer, " UMP Block %d: \"%s\" [%s]\n", i, bp->name, bp->active ? "Active" : "Inactive"); snd_iprintf(buffer, " Groups: %d-%d\n", bp->first_group + 1, bp->first_group + bp->num_groups); } } } #endif /* UMP-specific ioctls -- called directly without data copy */ static int snd_seq_ioctl_client_ump_info(struct snd_seq_client *caller, unsigned int cmd, unsigned long arg) { struct snd_seq_client_ump_info __user *argp = (struct snd_seq_client_ump_info __user *)arg; struct snd_seq_client *cptr __free(snd_seq_client) = NULL; int client, type, err = 0; size_t size; void *p; if (get_user(client, &argp->client) || get_user(type, &argp->type)) return -EFAULT; if (cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO && caller->number != client) return -EPERM; if (type < 0 || type >= NUM_UMP_INFOS) return -EINVAL; if (type == SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT) size = sizeof(struct snd_ump_endpoint_info); else size = sizeof(struct snd_ump_block_info); cptr = client_load_and_use_ptr(client); if (!cptr) return -ENOENT; scoped_guard(mutex, &cptr->ioctl_mutex) { if (!cptr->midi_version) { err = -EBADFD; break; } if (cmd == SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO) { if (!cptr->ump_info) p = NULL; else p = cptr->ump_info[type]; if (!p) { err = -ENODEV; break; } if (copy_to_user(argp->info, p, size)) { err = -EFAULT; break; } } else { if (cptr->type != USER_CLIENT) { err = -EBADFD; break; } if (!cptr->ump_info) { cptr->ump_info = kcalloc(NUM_UMP_INFOS, sizeof(void *), GFP_KERNEL); if (!cptr->ump_info) { err = -ENOMEM; break; } } p = memdup_user(argp->info, size); if (IS_ERR(p)) { err = PTR_ERR(p); break; } kfree(cptr->ump_info[type]); terminate_ump_info_strings(p, type); cptr->ump_info[type] = p; } } if (!err && cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO) { if (type == SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT) snd_seq_system_ump_notify(client, 0, SNDRV_SEQ_EVENT_UMP_EP_CHANGE, false); else snd_seq_system_ump_notify(client, type - 1, SNDRV_SEQ_EVENT_UMP_BLOCK_CHANGE, false); } return err; } #endif /* -------------------------------------------------------- */ static const struct ioctl_handler { unsigned int cmd; int (*func)(struct snd_seq_client *client, void *arg); } ioctl_handlers[] = { { SNDRV_SEQ_IOCTL_PVERSION, snd_seq_ioctl_pversion }, { SNDRV_SEQ_IOCTL_USER_PVERSION, snd_seq_ioctl_user_pversion }, { SNDRV_SEQ_IOCTL_CLIENT_ID, snd_seq_ioctl_client_id }, { SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info }, { SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode }, { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info }, { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info }, { SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port }, { SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port }, { SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info }, { SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info }, { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port }, { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port }, { SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue }, { SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue }, { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info }, { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info }, { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue }, { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status }, { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo }, { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo }, { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer }, { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer }, { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client }, { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client }, { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool }, { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool }, { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription }, { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client }, { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port }, { SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events }, { SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs }, { 0, NULL }, }; static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_seq_client *client = file->private_data; /* To use kernel stack for ioctl data. */ union { int pversion; int client_id; struct snd_seq_system_info system_info; struct snd_seq_running_info running_info; struct snd_seq_client_info client_info; struct snd_seq_port_info port_info; struct snd_seq_port_subscribe port_subscribe; struct snd_seq_queue_info queue_info; struct snd_seq_queue_status queue_status; struct snd_seq_queue_tempo tempo; struct snd_seq_queue_timer queue_timer; struct snd_seq_queue_client queue_client; struct snd_seq_client_pool client_pool; struct snd_seq_remove_events remove_events; struct snd_seq_query_subs query_subs; } buf; const struct ioctl_handler *handler; unsigned long size; int err; if (snd_BUG_ON(!client)) return -ENXIO; #if IS_ENABLED(CONFIG_SND_SEQ_UMP) /* exception - handling large data */ switch (cmd) { case SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO: case SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO: return snd_seq_ioctl_client_ump_info(client, cmd, arg); } #endif for (handler = ioctl_handlers; handler->cmd > 0; ++handler) { if (handler->cmd == cmd) break; } if (handler->cmd == 0) return -ENOTTY; memset(&buf, 0, sizeof(buf)); /* * All of ioctl commands for ALSA sequencer get an argument of size * within 13 bits. We can safely pick up the size from the command. */ size = _IOC_SIZE(handler->cmd); if (handler->cmd & IOC_IN) { if (copy_from_user(&buf, (const void __user *)arg, size)) return -EFAULT; } scoped_guard(mutex, &client->ioctl_mutex) { err = handler->func(client, &buf); } if (err >= 0) { /* Some commands includes a bug in 'dir' field. */ if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT || handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL || (handler->cmd & IOC_OUT)) if (copy_to_user((void __user *)arg, &buf, size)) return -EFAULT; } return err; } #ifdef CONFIG_COMPAT #include "seq_compat.c" #else #define snd_seq_ioctl_compat NULL #endif /* -------------------------------------------------------- */ /* exported to kernel modules */ int snd_seq_create_kernel_client(struct snd_card *card, int client_index, const char *name_fmt, ...) { struct snd_seq_client *client; va_list args; if (snd_BUG_ON(in_interrupt())) return -EBUSY; if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD) return -EINVAL; if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS) return -EINVAL; scoped_guard(mutex, &register_mutex) { if (card) { client_index += SNDRV_SEQ_GLOBAL_CLIENTS + card->number * SNDRV_SEQ_CLIENTS_PER_CARD; if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) client_index = -1; } /* empty write queue as default */ client = seq_create_client1(client_index, 0); if (client == NULL) return -EBUSY; /* failure code */ usage_alloc(&client_usage, 1); client->accept_input = 1; client->accept_output = 1; client->data.kernel.card = card; client->user_pversion = SNDRV_SEQ_VERSION; va_start(args, name_fmt); vsnprintf(client->name, sizeof(client->name), name_fmt, args); va_end(args); client->type = KERNEL_CLIENT; } /* make others aware this new client */ snd_seq_system_client_ev_client_start(client->number); /* return client number to caller */ return client->number; } EXPORT_SYMBOL(snd_seq_create_kernel_client); /* exported to kernel modules */ int snd_seq_delete_kernel_client(int client) { struct snd_seq_client *ptr; if (snd_BUG_ON(in_interrupt())) return -EBUSY; ptr = clientptr(client); if (ptr == NULL) return -EINVAL; seq_free_client(ptr); kfree(ptr); return 0; } EXPORT_SYMBOL(snd_seq_delete_kernel_client); /* * exported, called by kernel clients to enqueue events (w/o blocking) * * RETURN VALUE: zero if succeed, negative if error */ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev, struct file *file, bool blocking) { struct snd_seq_client *cptr __free(snd_seq_client) = NULL; if (snd_BUG_ON(!ev)) return -EINVAL; if (!snd_seq_ev_is_ump(ev)) { if (ev->type == SNDRV_SEQ_EVENT_NONE) return 0; /* ignore this */ if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR) return -EINVAL; /* quoted events can't be enqueued */ } /* fill in client number */ ev->source.client = client; if (check_event_type_and_length(ev)) return -EINVAL; cptr = client_load_and_use_ptr(client); if (cptr == NULL) return -EINVAL; if (!cptr->accept_output) { return -EPERM; } else { /* send it */ guard(mutex)(&cptr->ioctl_mutex); return snd_seq_client_enqueue_event(cptr, ev, file, blocking, false, 0, &cptr->ioctl_mutex); } } EXPORT_SYMBOL(snd_seq_kernel_client_enqueue); /* * exported, called by kernel clients to dispatch events directly to other * clients, bypassing the queues. Event time-stamp will be updated. * * RETURN VALUE: negative = delivery failed, * zero, or positive: the number of delivered events */ int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev, int atomic, int hop) { struct snd_seq_client *cptr __free(snd_seq_client) = NULL; if (snd_BUG_ON(!ev)) return -EINVAL; /* fill in client number */ ev->queue = SNDRV_SEQ_QUEUE_DIRECT; ev->source.client = client; if (check_event_type_and_length(ev)) return -EINVAL; cptr = snd_seq_client_use_ptr(client); if (cptr == NULL) return -EINVAL; if (!cptr->accept_output) return -EPERM; else return snd_seq_deliver_event(cptr, ev, atomic, hop); } EXPORT_SYMBOL(snd_seq_kernel_client_dispatch); static int call_seq_client_ctl(struct snd_seq_client *client, unsigned int cmd, void *arg) { const struct ioctl_handler *handler; for (handler = ioctl_handlers; handler->cmd > 0; ++handler) { if (handler->cmd == cmd) return handler->func(client, arg); } pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)); return -ENOTTY; } /** * snd_seq_kernel_client_ctl - operate a command for a client with data in * kernel space. * @clientid: A numerical ID for a client. * @cmd: An ioctl(2) command for ALSA sequencer operation. * @arg: A pointer to data in kernel space. * * Against its name, both kernel/application client can be handled by this * kernel API. A pointer of 'arg' argument should be in kernel space. * * Return: 0 at success. Negative error code at failure. */ int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg) { struct snd_seq_client *client; client = clientptr(clientid); if (client == NULL) return -ENXIO; return call_seq_client_ctl(client, cmd, arg); } EXPORT_SYMBOL(snd_seq_kernel_client_ctl); /* a similar like above but taking locks; used only from OSS sequencer layer */ int snd_seq_kernel_client_ioctl(int clientid, unsigned int cmd, void *arg) { struct snd_seq_client *client __free(snd_seq_client) = NULL; client = client_load_and_use_ptr(clientid); if (!client) return -ENXIO; guard(mutex)(&client->ioctl_mutex); return call_seq_client_ctl(client, cmd, arg); } EXPORT_SYMBOL_GPL(snd_seq_kernel_client_ioctl); /* exported (for OSS emulator) */ int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait) { struct snd_seq_client *client; client = clientptr(clientid); if (client == NULL) return -ENXIO; if (snd_seq_pool_poll_wait(client->pool, file, wait)) return 1; return 0; } EXPORT_SYMBOL(snd_seq_kernel_client_write_poll); /* get a sequencer client object; for internal use from a kernel client */ struct snd_seq_client *snd_seq_kernel_client_get(int id) { return snd_seq_client_use_ptr(id); } EXPORT_SYMBOL_GPL(snd_seq_kernel_client_get); /* put a sequencer client object; for internal use from a kernel client */ void snd_seq_kernel_client_put(struct snd_seq_client *cptr) { if (cptr) snd_seq_client_unref(cptr); } EXPORT_SYMBOL_GPL(snd_seq_kernel_client_put); /*---------------------------------------------------------------------------*/ #ifdef CONFIG_SND_PROC_FS /* * /proc interface */ static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer, struct snd_seq_port_subs_info *group, int is_src, char *msg) { struct list_head *p; struct snd_seq_subscribers *s; int count = 0; guard(rwsem_read)(&group->list_mutex); if (list_empty(&group->list_head)) return; snd_iprintf(buffer, msg); list_for_each(p, &group->list_head) { if (is_src) s = list_entry(p, struct snd_seq_subscribers, src_list); else s = list_entry(p, struct snd_seq_subscribers, dest_list); if (count++) snd_iprintf(buffer, ", "); snd_iprintf(buffer, "%d:%d", is_src ? s->info.dest.client : s->info.sender.client, is_src ? s->info.dest.port : s->info.sender.port); if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue); if (group->exclusive) snd_iprintf(buffer, "[ex]"); } snd_iprintf(buffer, "\n"); } #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-') #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-') #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e') #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-') static const char *port_direction_name(unsigned char dir) { static const char *names[4] = { "-", "In", "Out", "In/Out" }; if (dir > SNDRV_SEQ_PORT_DIR_BIDIRECTION) return "Invalid"; return names[dir]; } static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer, struct snd_seq_client *client) { struct snd_seq_client_port *p; guard(mutex)(&client->ports_mutex); list_for_each_entry(p, &client->ports_list_head, list) { if (p->capability & SNDRV_SEQ_PORT_CAP_INACTIVE) continue; snd_iprintf(buffer, " Port %3d : \"%s\" (%c%c%c%c) [%s]", p->addr.port, p->name, FLAG_PERM_RD(p->capability), FLAG_PERM_WR(p->capability), FLAG_PERM_EX(p->capability), FLAG_PERM_DUPLEX(p->capability), port_direction_name(p->direction)); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) if (snd_seq_client_is_midi2(client) && p->is_midi1) snd_iprintf(buffer, " [MIDI1]"); #endif snd_iprintf(buffer, "\n"); snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, " Connecting To: "); snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, " Connected From: "); } } static const char *midi_version_string(unsigned int version) { switch (version) { case SNDRV_SEQ_CLIENT_LEGACY_MIDI: return "Legacy"; case SNDRV_SEQ_CLIENT_UMP_MIDI_1_0: return "UMP MIDI1"; case SNDRV_SEQ_CLIENT_UMP_MIDI_2_0: return "UMP MIDI2"; default: return "Unknown"; } } /* exported to seq_info.c */ void snd_seq_info_clients_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int c; snd_iprintf(buffer, "Client info\n"); snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur); snd_iprintf(buffer, " peak clients : %d\n", client_usage.peak); snd_iprintf(buffer, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS); snd_iprintf(buffer, "\n"); /* list the client table */ for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) { struct snd_seq_client *client __free(snd_seq_client) = NULL; client = client_load_and_use_ptr(c); if (client == NULL) continue; if (client->type == NO_CLIENT) continue; guard(mutex)(&client->ioctl_mutex); snd_iprintf(buffer, "Client %3d : \"%s\" [%s %s]\n", c, client->name, client->type == USER_CLIENT ? "User" : "Kernel", midi_version_string(client->midi_version)); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) dump_ump_info(buffer, client); #endif snd_seq_info_dump_ports(buffer, client); if (snd_seq_write_pool_allocated(client)) { snd_iprintf(buffer, " Output pool :\n"); snd_seq_info_pool(buffer, client->pool, " "); } if (client->type == USER_CLIENT && client->data.user.fifo && client->data.user.fifo->pool) { snd_iprintf(buffer, " Input pool :\n"); snd_seq_info_pool(buffer, client->data.user.fifo->pool, " "); } } } #endif /* CONFIG_SND_PROC_FS */ /*---------------------------------------------------------------------------*/ /* * REGISTRATION PART */ static const struct file_operations snd_seq_f_ops = { .owner = THIS_MODULE, .read = snd_seq_read, .write = snd_seq_write, .open = snd_seq_open, .release = snd_seq_release, .poll = snd_seq_poll, .unlocked_ioctl = snd_seq_ioctl, .compat_ioctl = snd_seq_ioctl_compat, }; static struct device *seq_dev; /* * register sequencer device */ int __init snd_sequencer_device_init(void) { int err; err = snd_device_alloc(&seq_dev, NULL); if (err < 0) return err; dev_set_name(seq_dev, "seq"); scoped_guard(mutex, &register_mutex) { err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0, &snd_seq_f_ops, NULL, seq_dev); } if (err < 0) { put_device(seq_dev); return err; } return 0; } /* * unregister sequencer device */ void snd_sequencer_device_done(void) { snd_unregister_device(seq_dev); put_device(seq_dev); }
2103 2102 2043 2014 2051 1169 1176 1165 1174 16 1155 1163 33 31 30 31 5 29 32 1242 1259 9 9 8 8 7 9 8 1 1 4 4 4 945 950 951 946 950 947 946 13 13 59 59 59 59 58 59 896 897 894 894 7 7 7 7 58 56 60 59 367 1 1 1 4 4 6 6 6 6 2 738 737 739 738 51 52 52 51 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 // SPDX-License-Identifier: GPL-2.0 /* * fs/sysfs/file.c - sysfs regular (text) file implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * Please see Documentation/filesystems/sysfs.rst for more information. */ #include <linux/module.h> #include <linux/kobject.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/mm.h> #include "sysfs.h" static struct kobject *sysfs_file_kobj(struct kernfs_node *kn) { guard(rcu)(); return rcu_dereference(kn->__parent)->priv; } /* * Determine ktype->sysfs_ops for the given kernfs_node. This function * must be called while holding an active reference. */ static const struct sysfs_ops *sysfs_file_ops(struct kernfs_node *kn) { struct kobject *kobj = sysfs_file_kobj(kn); if (kn->flags & KERNFS_LOCKDEP) lockdep_assert_held(kn); return kobj->ktype ? kobj->ktype->sysfs_ops : NULL; } /* * Reads on sysfs are handled through seq_file, which takes care of hairy * details like buffering and seeking. The following function pipes * sysfs_ops->show() result through seq_file. */ static int sysfs_kf_seq_show(struct seq_file *sf, void *v) { struct kernfs_open_file *of = sf->private; struct kobject *kobj = sysfs_file_kobj(of->kn); const struct sysfs_ops *ops = sysfs_file_ops(of->kn); ssize_t count; char *buf; if (WARN_ON_ONCE(!ops->show)) return -EINVAL; /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */ count = seq_get_buf(sf, &buf); if (count < PAGE_SIZE) { seq_commit(sf, -1); return 0; } memset(buf, 0, PAGE_SIZE); count = ops->show(kobj, of->kn->priv, buf); if (count < 0) return count; /* * The code works fine with PAGE_SIZE return but it's likely to * indicate truncated result or overflow in normal use cases. */ if (count >= (ssize_t)PAGE_SIZE) { printk("fill_read_buffer: %pS returned bad count\n", ops->show); /* Try to struggle along */ count = PAGE_SIZE - 1; } seq_commit(sf, count); return 0; } static ssize_t sysfs_kf_bin_read(struct kernfs_open_file *of, char *buf, size_t count, loff_t pos) { const struct bin_attribute *battr = of->kn->priv; struct kobject *kobj = sysfs_file_kobj(of->kn); loff_t size = file_inode(of->file)->i_size; if (!count) return 0; if (size) { if (pos >= size) return 0; if (pos + count > size) count = size - pos; } if (!battr->read) return -EIO; return battr->read(of->file, kobj, battr, buf, pos, count); } /* kernfs read callback for regular sysfs files with pre-alloc */ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, size_t count, loff_t pos) { const struct sysfs_ops *ops = sysfs_file_ops(of->kn); struct kobject *kobj = sysfs_file_kobj(of->kn); ssize_t len; /* * If buf != of->prealloc_buf, we don't know how * large it is, so cannot safely pass it to ->show */ if (WARN_ON_ONCE(buf != of->prealloc_buf)) return 0; len = ops->show(kobj, of->kn->priv, buf); if (len < 0) return len; if (pos) { if (len <= pos) return 0; len -= pos; memmove(buf, buf + pos, len); } return min_t(ssize_t, count, len); } /* kernfs write callback for regular sysfs files */ static ssize_t sysfs_kf_write(struct kernfs_open_file *of, char *buf, size_t count, loff_t pos) { const struct sysfs_ops *ops = sysfs_file_ops(of->kn); struct kobject *kobj = sysfs_file_kobj(of->kn); if (!count) return 0; return ops->store(kobj, of->kn->priv, buf, count); } /* kernfs write callback for bin sysfs files */ static ssize_t sysfs_kf_bin_write(struct kernfs_open_file *of, char *buf, size_t count, loff_t pos) { const struct bin_attribute *battr = of->kn->priv; struct kobject *kobj = sysfs_file_kobj(of->kn); loff_t size = file_inode(of->file)->i_size; if (size) { if (size <= pos) return -EFBIG; count = min_t(ssize_t, count, size - pos); } if (!count) return 0; if (!battr->write) return -EIO; return battr->write(of->file, kobj, battr, buf, pos, count); } static int sysfs_kf_bin_mmap(struct kernfs_open_file *of, struct vm_area_struct *vma) { const struct bin_attribute *battr = of->kn->priv; struct kobject *kobj = sysfs_file_kobj(of->kn); return battr->mmap(of->file, kobj, battr, vma); } static loff_t sysfs_kf_bin_llseek(struct kernfs_open_file *of, loff_t offset, int whence) { const struct bin_attribute *battr = of->kn->priv; struct kobject *kobj = sysfs_file_kobj(of->kn); if (battr->llseek) return battr->llseek(of->file, kobj, battr, offset, whence); else return generic_file_llseek(of->file, offset, whence); } static int sysfs_kf_bin_open(struct kernfs_open_file *of) { const struct bin_attribute *battr = of->kn->priv; if (battr->f_mapping) of->file->f_mapping = battr->f_mapping(); return 0; } void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { struct kernfs_node *kn = kobj->sd, *tmp; if (kn && dir) kn = kernfs_find_and_get(kn, dir); else kernfs_get(kn); if (kn && attr) { tmp = kernfs_find_and_get(kn, attr); kernfs_put(kn); kn = tmp; } if (kn) { kernfs_notify(kn); kernfs_put(kn); } } EXPORT_SYMBOL_GPL(sysfs_notify); static const struct kernfs_ops sysfs_file_kfops_empty = { }; static const struct kernfs_ops sysfs_file_kfops_ro = { .seq_show = sysfs_kf_seq_show, }; static const struct kernfs_ops sysfs_file_kfops_wo = { .write = sysfs_kf_write, }; static const struct kernfs_ops sysfs_file_kfops_rw = { .seq_show = sysfs_kf_seq_show, .write = sysfs_kf_write, }; static const struct kernfs_ops sysfs_prealloc_kfops_ro = { .read = sysfs_kf_read, .prealloc = true, }; static const struct kernfs_ops sysfs_prealloc_kfops_wo = { .write = sysfs_kf_write, .prealloc = true, }; static const struct kernfs_ops sysfs_prealloc_kfops_rw = { .read = sysfs_kf_read, .write = sysfs_kf_write, .prealloc = true, }; static const struct kernfs_ops sysfs_bin_kfops_ro = { .read = sysfs_kf_bin_read, }; static const struct kernfs_ops sysfs_bin_kfops_wo = { .write = sysfs_kf_bin_write, }; static const struct kernfs_ops sysfs_bin_kfops_rw = { .read = sysfs_kf_bin_read, .write = sysfs_kf_bin_write, }; static const struct kernfs_ops sysfs_bin_kfops_mmap = { .read = sysfs_kf_bin_read, .write = sysfs_kf_bin_write, .mmap = sysfs_kf_bin_mmap, .open = sysfs_kf_bin_open, .llseek = sysfs_kf_bin_llseek, }; int sysfs_add_file_mode_ns(struct kernfs_node *parent, const struct attribute *attr, umode_t mode, kuid_t uid, kgid_t gid, const void *ns) { struct kobject *kobj = parent->priv; const struct sysfs_ops *sysfs_ops = kobj->ktype->sysfs_ops; struct lock_class_key *key = NULL; const struct kernfs_ops *ops = NULL; struct kernfs_node *kn; /* every kobject with an attribute needs a ktype assigned */ if (WARN(!sysfs_ops, KERN_ERR "missing sysfs attribute operations for kobject: %s\n", kobject_name(kobj))) return -EINVAL; if (mode & SYSFS_PREALLOC) { if (sysfs_ops->show && sysfs_ops->store) ops = &sysfs_prealloc_kfops_rw; else if (sysfs_ops->show) ops = &sysfs_prealloc_kfops_ro; else if (sysfs_ops->store) ops = &sysfs_prealloc_kfops_wo; } else { if (sysfs_ops->show && sysfs_ops->store) ops = &sysfs_file_kfops_rw; else if (sysfs_ops->show) ops = &sysfs_file_kfops_ro; else if (sysfs_ops->store) ops = &sysfs_file_kfops_wo; } if (!ops) ops = &sysfs_file_kfops_empty; #ifdef CONFIG_DEBUG_LOCK_ALLOC if (!attr->ignore_lockdep) key = attr->key ?: (struct lock_class_key *)&attr->skey; #endif kn = __kernfs_create_file(parent, attr->name, mode & 0777, uid, gid, PAGE_SIZE, ops, (void *)attr, ns, key); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(parent, attr->name); return PTR_ERR(kn); } return 0; } int sysfs_add_bin_file_mode_ns(struct kernfs_node *parent, const struct bin_attribute *battr, umode_t mode, size_t size, kuid_t uid, kgid_t gid, const void *ns) { const struct attribute *attr = &battr->attr; struct lock_class_key *key = NULL; const struct kernfs_ops *ops; struct kernfs_node *kn; if (battr->mmap) ops = &sysfs_bin_kfops_mmap; else if (battr->read && battr->write) ops = &sysfs_bin_kfops_rw; else if (battr->read) ops = &sysfs_bin_kfops_ro; else if (battr->write) ops = &sysfs_bin_kfops_wo; else ops = &sysfs_file_kfops_empty; #ifdef CONFIG_DEBUG_LOCK_ALLOC if (!attr->ignore_lockdep) key = attr->key ?: (struct lock_class_key *)&attr->skey; #endif kn = __kernfs_create_file(parent, attr->name, mode & 0777, uid, gid, size, ops, (void *)attr, ns, key); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(parent, attr->name); return PTR_ERR(kn); } return 0; } /** * sysfs_create_file_ns - create an attribute file for an object with custom ns * @kobj: object we're creating for * @attr: attribute descriptor * @ns: namespace the new file should belong to */ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { kuid_t uid; kgid_t gid; if (WARN_ON(!kobj || !kobj->sd || !attr)) return -EINVAL; kobject_get_ownership(kobj, &uid, &gid); return sysfs_add_file_mode_ns(kobj->sd, attr, attr->mode, uid, gid, ns); } EXPORT_SYMBOL_GPL(sysfs_create_file_ns); int sysfs_create_files(struct kobject *kobj, const struct attribute * const *ptr) { int err = 0; int i; for (i = 0; ptr[i] && !err; i++) err = sysfs_create_file(kobj, ptr[i]); if (err) while (--i >= 0) sysfs_remove_file(kobj, ptr[i]); return err; } EXPORT_SYMBOL_GPL(sysfs_create_files); /** * sysfs_add_file_to_group - add an attribute file to a pre-existing group. * @kobj: object we're acting for. * @attr: attribute descriptor. * @group: group name. */ int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) { struct kernfs_node *parent; kuid_t uid; kgid_t gid; int error; if (group) { parent = kernfs_find_and_get(kobj->sd, group); } else { parent = kobj->sd; kernfs_get(parent); } if (!parent) return -ENOENT; kobject_get_ownership(kobj, &uid, &gid); error = sysfs_add_file_mode_ns(parent, attr, attr->mode, uid, gid, NULL); kernfs_put(parent); return error; } EXPORT_SYMBOL_GPL(sysfs_add_file_to_group); /** * sysfs_chmod_file - update the modified mode value on an object attribute. * @kobj: object we're acting for. * @attr: attribute descriptor. * @mode: file permissions. * */ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode) { struct kernfs_node *kn; struct iattr newattrs; int rc; kn = kernfs_find_and_get(kobj->sd, attr->name); if (!kn) return -ENOENT; newattrs.ia_mode = (mode & S_IALLUGO) | (kn->mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE; rc = kernfs_setattr(kn, &newattrs); kernfs_put(kn); return rc; } EXPORT_SYMBOL_GPL(sysfs_chmod_file); /** * sysfs_break_active_protection - break "active" protection * @kobj: The kernel object @attr is associated with. * @attr: The attribute to break the "active" protection for. * * With sysfs, just like kernfs, deletion of an attribute is postponed until * all active .show() and .store() callbacks have finished unless this function * is called. Hence this function is useful in methods that implement self * deletion. */ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, const struct attribute *attr) { struct kernfs_node *kn; kobject_get(kobj); kn = kernfs_find_and_get(kobj->sd, attr->name); if (kn) kernfs_break_active_protection(kn); else kobject_put(kobj); return kn; } EXPORT_SYMBOL_GPL(sysfs_break_active_protection); /** * sysfs_unbreak_active_protection - restore "active" protection * @kn: Pointer returned by sysfs_break_active_protection(). * * Undo the effects of sysfs_break_active_protection(). Since this function * calls kernfs_put() on the kernfs node that corresponds to the 'attr' * argument passed to sysfs_break_active_protection() that attribute may have * been removed between the sysfs_break_active_protection() and * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after * this function has returned. */ void sysfs_unbreak_active_protection(struct kernfs_node *kn) { struct kobject *kobj = sysfs_file_kobj(kn); kernfs_unbreak_active_protection(kn); kernfs_put(kn); kobject_put(kobj); } EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection); /** * sysfs_remove_file_ns - remove an object attribute with a custom ns tag * @kobj: object we're acting for * @attr: attribute descriptor * @ns: namespace tag of the file to remove * * Hash the attribute name and namespace tag and kill the victim. */ void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { struct kernfs_node *parent = kobj->sd; kernfs_remove_by_name_ns(parent, attr->name, ns); } EXPORT_SYMBOL_GPL(sysfs_remove_file_ns); /** * sysfs_remove_file_self - remove an object attribute from its own method * @kobj: object we're acting for * @attr: attribute descriptor * * See kernfs_remove_self() for details. */ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr) { struct kernfs_node *parent = kobj->sd; struct kernfs_node *kn; bool ret; kn = kernfs_find_and_get(parent, attr->name); if (WARN_ON_ONCE(!kn)) return false; ret = kernfs_remove_self(kn); kernfs_put(kn); return ret; } EXPORT_SYMBOL_GPL(sysfs_remove_file_self); void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr) { int i; for (i = 0; ptr[i]; i++) sysfs_remove_file(kobj, ptr[i]); } EXPORT_SYMBOL_GPL(sysfs_remove_files); /** * sysfs_remove_file_from_group - remove an attribute file from a group. * @kobj: object we're acting for. * @attr: attribute descriptor. * @group: group name. */ void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group) { struct kernfs_node *parent; if (group) { parent = kernfs_find_and_get(kobj->sd, group); } else { parent = kobj->sd; kernfs_get(parent); } if (parent) { kernfs_remove_by_name(parent, attr->name); kernfs_put(parent); } } EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group); /** * sysfs_create_bin_file - create binary file for object. * @kobj: object. * @attr: attribute descriptor. */ int sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { kuid_t uid; kgid_t gid; if (WARN_ON(!kobj || !kobj->sd || !attr)) return -EINVAL; kobject_get_ownership(kobj, &uid, &gid); return sysfs_add_bin_file_mode_ns(kobj->sd, attr, attr->attr.mode, attr->size, uid, gid, NULL); } EXPORT_SYMBOL_GPL(sysfs_create_bin_file); /** * sysfs_remove_bin_file - remove binary file for object. * @kobj: object. * @attr: attribute descriptor. */ void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { kernfs_remove_by_name(kobj->sd, attr->attr.name); } EXPORT_SYMBOL_GPL(sysfs_remove_bin_file); static int internal_change_owner(struct kernfs_node *kn, kuid_t kuid, kgid_t kgid) { struct iattr newattrs = { .ia_valid = ATTR_UID | ATTR_GID, .ia_uid = kuid, .ia_gid = kgid, }; return kernfs_setattr(kn, &newattrs); } /** * sysfs_link_change_owner - change owner of a sysfs file. * @kobj: object of the kernfs_node the symlink is located in. * @targ: object of the kernfs_node the symlink points to. * @name: name of the link. * @kuid: new owner's kuid * @kgid: new owner's kgid * * This function looks up the sysfs symlink entry @name under @kobj and changes * the ownership to @kuid/@kgid. The symlink is looked up in the namespace of * @targ. * * Returns 0 on success or error code on failure. */ int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, const char *name, kuid_t kuid, kgid_t kgid) { struct kernfs_node *kn = NULL; int error; if (!name || !kobj->state_in_sysfs || !targ->state_in_sysfs) return -EINVAL; error = -ENOENT; kn = kernfs_find_and_get_ns(kobj->sd, name, targ->sd->ns); if (!kn) goto out; error = -EINVAL; if (kernfs_type(kn) != KERNFS_LINK) goto out; if (kn->symlink.target_kn->priv != targ) goto out; error = internal_change_owner(kn, kuid, kgid); out: kernfs_put(kn); return error; } /** * sysfs_file_change_owner - change owner of a sysfs file. * @kobj: object. * @name: name of the file to change. * @kuid: new owner's kuid * @kgid: new owner's kgid * * This function looks up the sysfs entry @name under @kobj and changes the * ownership to @kuid/@kgid. * * Returns 0 on success or error code on failure. */ int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, kgid_t kgid) { struct kernfs_node *kn; int error; if (!name) return -EINVAL; if (!kobj->state_in_sysfs) return -EINVAL; kn = kernfs_find_and_get(kobj->sd, name); if (!kn) return -ENOENT; error = internal_change_owner(kn, kuid, kgid); kernfs_put(kn); return error; } EXPORT_SYMBOL_GPL(sysfs_file_change_owner); /** * sysfs_change_owner - change owner of the given object. * @kobj: object. * @kuid: new owner's kuid * @kgid: new owner's kgid * * Change the owner of the default directory, files, groups, and attributes of * @kobj to @kuid/@kgid. Note that sysfs_change_owner mirrors how the sysfs * entries for a kobject are added by driver core. In summary, * sysfs_change_owner() takes care of the default directory entry for @kobj, * the default attributes associated with the ktype of @kobj and the default * attributes associated with the ktype of @kobj. * Additional properties not added by driver core have to be changed by the * driver or subsystem which created them. This is similar to how * driver/subsystem specific entries are removed. * * Returns 0 on success or error code on failure. */ int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) { int error; const struct kobj_type *ktype; if (!kobj->state_in_sysfs) return -EINVAL; /* Change the owner of the kobject itself. */ error = internal_change_owner(kobj->sd, kuid, kgid); if (error) return error; ktype = get_ktype(kobj); if (ktype) { /* * Change owner of the default groups associated with the * ktype of @kobj. */ error = sysfs_groups_change_owner(kobj, ktype->default_groups, kuid, kgid); if (error) return error; } return 0; } EXPORT_SYMBOL_GPL(sysfs_change_owner); /** * sysfs_emit - scnprintf equivalent, aware of PAGE_SIZE buffer. * @buf: start of PAGE_SIZE buffer. * @fmt: format * @...: optional arguments to @format * * * Returns number of characters written to @buf. */ int sysfs_emit(char *buf, const char *fmt, ...) { va_list args; int len; if (WARN(!buf || offset_in_page(buf), "invalid sysfs_emit: buf:%p\n", buf)) return 0; va_start(args, fmt); len = vscnprintf(buf, PAGE_SIZE, fmt, args); va_end(args); return len; } EXPORT_SYMBOL_GPL(sysfs_emit); /** * sysfs_emit_at - scnprintf equivalent, aware of PAGE_SIZE buffer. * @buf: start of PAGE_SIZE buffer. * @at: offset in @buf to start write in bytes * @at must be >= 0 && < PAGE_SIZE * @fmt: format * @...: optional arguments to @fmt * * * Returns number of characters written starting at &@buf[@at]. */ int sysfs_emit_at(char *buf, int at, const char *fmt, ...) { va_list args; int len; if (WARN(!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE, "invalid sysfs_emit_at: buf:%p at:%d\n", buf, at)) return 0; va_start(args, fmt); len = vscnprintf(buf + at, PAGE_SIZE - at, fmt, args); va_end(args); return len; } EXPORT_SYMBOL_GPL(sysfs_emit_at); /** * sysfs_bin_attr_simple_read - read callback to simply copy from memory. * @file: attribute file which is being read. * @kobj: object to which the attribute belongs. * @attr: attribute descriptor. * @buf: destination buffer. * @off: offset in bytes from which to read. * @count: maximum number of bytes to read. * * Simple ->read() callback for bin_attributes backed by a buffer in memory. * The @private and @size members in struct bin_attribute must be set to the * buffer's location and size before the bin_attribute is created in sysfs. * * Bounds check for @off and @count is done in sysfs_kf_bin_read(). * Negative value check for @off is done in vfs_setpos() and default_llseek(). * * Returns number of bytes written to @buf. */ ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { memcpy(buf, attr->private + off, count); return count; } EXPORT_SYMBOL_GPL(sysfs_bin_attr_simple_read);
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2023 Bootlin * */ #include "common.h" #include "netlink.h" #include <linux/phy.h> #include <linux/phy_link_topology.h> #include <linux/sfp.h> #include <net/netdev_lock.h> struct phy_req_info { struct ethnl_req_info base; }; struct phy_reply_data { struct ethnl_reply_data base; u32 phyindex; char *drvname; char *name; unsigned int upstream_type; char *upstream_sfp_name; unsigned int upstream_index; char *downstream_sfp_name; }; #define PHY_REPDATA(__reply_base) \ container_of(__reply_base, struct phy_reply_data, base) const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = { [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), }; static int phy_reply_size(const struct ethnl_req_info *req_info, const struct ethnl_reply_data *reply_data) { struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); size_t size = 0; /* ETHTOOL_A_PHY_INDEX */ size += nla_total_size(sizeof(u32)); /* ETHTOOL_A_DRVNAME */ if (rep_data->drvname) size += nla_total_size(strlen(rep_data->drvname) + 1); /* ETHTOOL_A_NAME */ size += nla_total_size(strlen(rep_data->name) + 1); /* ETHTOOL_A_PHY_UPSTREAM_TYPE */ size += nla_total_size(sizeof(u32)); /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */ if (rep_data->upstream_sfp_name) size += nla_total_size(strlen(rep_data->upstream_sfp_name) + 1); /* ETHTOOL_A_PHY_UPSTREAM_INDEX */ if (rep_data->upstream_index) size += nla_total_size(sizeof(u32)); /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */ if (rep_data->downstream_sfp_name) size += nla_total_size(strlen(rep_data->downstream_sfp_name) + 1); return size; } static int phy_prepare_data(const struct ethnl_req_info *req_info, struct ethnl_reply_data *reply_data, const struct genl_info *info) { struct phy_link_topology *topo = reply_data->dev->link_topo; struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); struct nlattr **tb = info->attrs; struct phy_device_node *pdn; struct phy_device *phydev; /* RTNL is held by the caller */ phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PHY_HEADER, info->extack); if (IS_ERR_OR_NULL(phydev)) return -EOPNOTSUPP; pdn = xa_load(&topo->phys, phydev->phyindex); if (!pdn) return -EOPNOTSUPP; rep_data->phyindex = phydev->phyindex; rep_data->name = kstrdup(dev_name(&phydev->mdio.dev), GFP_KERNEL); rep_data->drvname = kstrdup(phydev->drv->name, GFP_KERNEL); rep_data->upstream_type = pdn->upstream_type; if (pdn->upstream_type == PHY_UPSTREAM_PHY) { struct phy_device *upstream = pdn->upstream.phydev; rep_data->upstream_index = upstream->phyindex; } if (pdn->parent_sfp_bus) rep_data->upstream_sfp_name = kstrdup(sfp_get_name(pdn->parent_sfp_bus), GFP_KERNEL); if (phydev->sfp_bus) rep_data->downstream_sfp_name = kstrdup(sfp_get_name(phydev->sfp_bus), GFP_KERNEL); return 0; } static int phy_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_info, const struct ethnl_reply_data *reply_data) { struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, rep_data->phyindex) || nla_put_string(skb, ETHTOOL_A_PHY_NAME, rep_data->name) || nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, rep_data->upstream_type)) return -EMSGSIZE; if (rep_data->drvname && nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, rep_data->drvname)) return -EMSGSIZE; if (rep_data->upstream_index && nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, rep_data->upstream_index)) return -EMSGSIZE; if (rep_data->upstream_sfp_name && nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, rep_data->upstream_sfp_name)) return -EMSGSIZE; if (rep_data->downstream_sfp_name && nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, rep_data->downstream_sfp_name)) return -EMSGSIZE; return 0; } static void phy_cleanup_data(struct ethnl_reply_data *reply_data) { struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); kfree(rep_data->drvname); kfree(rep_data->name); kfree(rep_data->upstream_sfp_name); kfree(rep_data->downstream_sfp_name); } const struct ethnl_request_ops ethnl_phy_request_ops = { .request_cmd = ETHTOOL_MSG_PHY_GET, .reply_cmd = ETHTOOL_MSG_PHY_GET_REPLY, .hdr_attr = ETHTOOL_A_PHY_HEADER, .req_info_size = sizeof(struct phy_req_info), .reply_data_size = sizeof(struct phy_reply_data), .prepare_data = phy_prepare_data, .reply_size = phy_reply_size, .fill_reply = phy_fill_reply, .cleanup_data = phy_cleanup_data, };
4 5 4 3 4 1 23 2 21 3 18 23 23 20 18 23 23 23 23 23 23 3 23 23 23 2 21 3 23 3 20 23 1 23 2 21 21 21 23 3 18 23 23 23 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 // SPDX-License-Identifier: GPL-2.0-only /* * A virtual codec example device. * * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. * * This is a virtual codec device driver for testing the codec framework. * It simulates a device that uses memory buffers for both source and * destination and encodes or decodes the data. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/videobuf2-vmalloc.h> #include "codec-v4l2-fwht.h" MODULE_DESCRIPTION("Virtual codec device"); MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>"); MODULE_LICENSE("GPL v2"); static bool multiplanar; module_param(multiplanar, bool, 0444); MODULE_PARM_DESC(multiplanar, " use multi-planar API instead of single-planar API"); static unsigned int debug; module_param(debug, uint, 0644); MODULE_PARM_DESC(debug, " activates debug info"); #define VICODEC_NAME "vicodec" #define MAX_WIDTH 4096U #define MIN_WIDTH 640U #define MAX_HEIGHT 2160U #define MIN_HEIGHT 360U /* Recommended number of buffers for the stateful codecs */ #define VICODEC_REC_BUFS 2 #define dprintk(dev, fmt, arg...) \ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) struct pixfmt_info { u32 id; unsigned int bytesperline_mult; unsigned int sizeimage_mult; unsigned int sizeimage_div; unsigned int luma_step; unsigned int chroma_step; /* Chroma plane subsampling */ unsigned int width_div; unsigned int height_div; }; static const struct v4l2_fwht_pixfmt_info pixfmt_fwht = { V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0, 1 }; static const struct v4l2_fwht_pixfmt_info pixfmt_stateless_fwht = { V4L2_PIX_FMT_FWHT_STATELESS, 0, 3, 1, 1, 1, 1, 1, 0, 1 }; static void vicodec_dev_release(struct device *dev) { } static struct platform_device vicodec_pdev = { .name = VICODEC_NAME, .dev.release = vicodec_dev_release, }; /* Per-queue, driver-specific private data */ struct vicodec_q_data { unsigned int coded_width; unsigned int coded_height; unsigned int visible_width; unsigned int visible_height; unsigned int sizeimage; unsigned int vb2_sizeimage; unsigned int sequence; const struct v4l2_fwht_pixfmt_info *info; }; enum { V4L2_M2M_SRC = 0, V4L2_M2M_DST = 1, }; struct vicodec_dev_instance { struct video_device vfd; struct mutex mutex; spinlock_t lock; struct v4l2_m2m_dev *m2m_dev; }; struct vicodec_dev { struct v4l2_device v4l2_dev; struct vicodec_dev_instance stateful_enc; struct vicodec_dev_instance stateful_dec; struct vicodec_dev_instance stateless_dec; #ifdef CONFIG_MEDIA_CONTROLLER struct media_device mdev; #endif }; struct vicodec_ctx { struct v4l2_fh fh; struct vicodec_dev *dev; bool is_enc; bool is_stateless; spinlock_t *lock; struct v4l2_ctrl_handler hdl; /* Source and destination queue data */ struct vicodec_q_data q_data[2]; struct v4l2_fwht_state state; u32 cur_buf_offset; u32 comp_max_size; u32 comp_size; u32 header_size; u32 comp_magic_cnt; bool comp_has_frame; bool comp_has_next_frame; bool first_source_change_sent; bool source_changed; }; static const struct v4l2_event vicodec_eos_event = { .type = V4L2_EVENT_EOS }; static inline struct vicodec_ctx *file2ctx(struct file *file) { return container_of(file_to_v4l2_fh(file), struct vicodec_ctx, fh); } static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx, enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: return &ctx->q_data[V4L2_M2M_SRC]; case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: return &ctx->q_data[V4L2_M2M_DST]; default: break; } return NULL; } static void copy_cap_to_ref(const u8 *cap, const struct v4l2_fwht_pixfmt_info *info, struct v4l2_fwht_state *state) { int plane_idx; u8 *p_ref = state->ref_frame.buf; unsigned int cap_stride = state->stride; unsigned int ref_stride = state->ref_stride; for (plane_idx = 0; plane_idx < info->planes_num; plane_idx++) { int i; unsigned int h_div = (plane_idx == 1 || plane_idx == 2) ? info->height_div : 1; const u8 *row_cap = cap; u8 *row_ref = p_ref; if (info->planes_num == 3 && plane_idx == 1) { cap_stride /= 2; ref_stride /= 2; } if (plane_idx == 1 && (info->id == V4L2_PIX_FMT_NV24 || info->id == V4L2_PIX_FMT_NV42)) { cap_stride *= 2; ref_stride *= 2; } for (i = 0; i < state->visible_height / h_div; i++) { memcpy(row_ref, row_cap, ref_stride); row_ref += ref_stride; row_cap += cap_stride; } cap += cap_stride * (state->coded_height / h_div); p_ref += ref_stride * (state->coded_height / h_div); } } static bool validate_by_version(unsigned int flags, unsigned int version) { if (!version || version > V4L2_FWHT_VERSION) return false; if (version >= 2) { unsigned int components_num = 1 + ((flags & V4L2_FWHT_FL_COMPONENTS_NUM_MSK) >> V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); unsigned int pixenc = flags & V4L2_FWHT_FL_PIXENC_MSK; if (components_num == 0 || components_num > 4 || !pixenc) return false; } return true; } static bool validate_stateless_params_flags(const struct v4l2_ctrl_fwht_params *params, const struct v4l2_fwht_pixfmt_info *cur_info) { unsigned int width_div = (params->flags & V4L2_FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; unsigned int height_div = (params->flags & V4L2_FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; unsigned int components_num = 3; unsigned int pixenc = 0; if (params->version < 3) return false; components_num = 1 + ((params->flags & V4L2_FWHT_FL_COMPONENTS_NUM_MSK) >> V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); pixenc = (params->flags & V4L2_FWHT_FL_PIXENC_MSK); if (v4l2_fwht_validate_fmt(cur_info, width_div, height_div, components_num, pixenc)) return true; return false; } static void update_state_from_header(struct vicodec_ctx *ctx) { const struct fwht_cframe_hdr *p_hdr = &ctx->state.header; ctx->state.visible_width = ntohl(p_hdr->width); ctx->state.visible_height = ntohl(p_hdr->height); ctx->state.colorspace = ntohl(p_hdr->colorspace); ctx->state.xfer_func = ntohl(p_hdr->xfer_func); ctx->state.ycbcr_enc = ntohl(p_hdr->ycbcr_enc); ctx->state.quantization = ntohl(p_hdr->quantization); } static int device_process(struct vicodec_ctx *ctx, struct vb2_v4l2_buffer *src_vb, struct vb2_v4l2_buffer *dst_vb) { struct vicodec_dev *dev = ctx->dev; struct v4l2_fwht_state *state = &ctx->state; u8 *p_src, *p_dst; int ret = 0; if (ctx->is_enc || ctx->is_stateless) p_src = vb2_plane_vaddr(&src_vb->vb2_buf, 0); else p_src = state->compressed_frame; if (ctx->is_stateless) { struct media_request *src_req = src_vb->vb2_buf.req_obj.req; ret = v4l2_ctrl_request_setup(src_req, &ctx->hdl); if (ret) return ret; update_state_from_header(ctx); ctx->state.header.size = htonl(vb2_get_plane_payload(&src_vb->vb2_buf, 0)); /* * set the reference buffer from the reference timestamp * only if this is a P-frame */ if (!(ntohl(ctx->state.header.flags) & V4L2_FWHT_FL_I_FRAME)) { struct vb2_buffer *ref_vb2_buf; struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); ref_vb2_buf = vb2_find_buffer(vq_cap, ctx->state.ref_frame_ts); if (!ref_vb2_buf) return -EINVAL; if (ref_vb2_buf->state == VB2_BUF_STATE_ERROR) ret = -EINVAL; ctx->state.ref_frame.buf = vb2_plane_vaddr(ref_vb2_buf, 0); } else { ctx->state.ref_frame.buf = NULL; } } p_dst = vb2_plane_vaddr(&dst_vb->vb2_buf, 0); if (!p_src || !p_dst) { v4l2_err(&dev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); return -EFAULT; } if (ctx->is_enc) { struct vicodec_q_data *q_src; int comp_sz_or_errcode; q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); state->info = q_src->info; comp_sz_or_errcode = v4l2_fwht_encode(state, p_src, p_dst); if (comp_sz_or_errcode < 0) return comp_sz_or_errcode; vb2_set_plane_payload(&dst_vb->vb2_buf, 0, comp_sz_or_errcode); } else { struct vicodec_q_data *q_dst; unsigned int comp_frame_size = ntohl(ctx->state.header.size); q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); if (comp_frame_size > ctx->comp_max_size) return -EINVAL; state->info = q_dst->info; ret = v4l2_fwht_decode(state, p_src, p_dst); if (ret < 0) return ret; if (!ctx->is_stateless) copy_cap_to_ref(p_dst, ctx->state.info, &ctx->state); vb2_set_plane_payload(&dst_vb->vb2_buf, 0, q_dst->sizeimage); if (ntohl(ctx->state.header.flags) & V4L2_FWHT_FL_I_FRAME) dst_vb->flags |= V4L2_BUF_FLAG_KEYFRAME; else dst_vb->flags |= V4L2_BUF_FLAG_PFRAME; } return ret; } /* * mem2mem callbacks */ static enum vb2_buffer_state get_next_header(struct vicodec_ctx *ctx, u8 **pp, u32 sz) { static const u8 magic[] = { 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff }; u8 *p = *pp; u32 state; u8 *header = (u8 *)&ctx->state.header; state = VB2_BUF_STATE_DONE; if (!ctx->header_size) { state = VB2_BUF_STATE_ERROR; for (; p < *pp + sz; p++) { u32 copy; p = memchr(p, magic[ctx->comp_magic_cnt], *pp + sz - p); if (!p) { ctx->comp_magic_cnt = 0; p = *pp + sz; break; } copy = sizeof(magic) - ctx->comp_magic_cnt; if (*pp + sz - p < copy) copy = *pp + sz - p; memcpy(header + ctx->comp_magic_cnt, p, copy); ctx->comp_magic_cnt += copy; if (!memcmp(header, magic, ctx->comp_magic_cnt)) { p += copy; state = VB2_BUF_STATE_DONE; break; } ctx->comp_magic_cnt = 0; } if (ctx->comp_magic_cnt < sizeof(magic)) { *pp = p; return state; } ctx->header_size = sizeof(magic); } if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->header_size; if (*pp + sz - p < copy) copy = *pp + sz - p; memcpy(header + ctx->header_size, p, copy); p += copy; ctx->header_size += copy; } *pp = p; return state; } /* device_run() - prepares and starts the device */ static void device_run(void *priv) { struct vicodec_ctx *ctx = priv; struct vicodec_dev *dev = ctx->dev; struct vb2_v4l2_buffer *src_buf, *dst_buf; struct vicodec_q_data *q_src, *q_dst; u32 state; struct media_request *src_req; src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); src_req = src_buf->vb2_buf.req_obj.req; q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); state = VB2_BUF_STATE_DONE; if (device_process(ctx, src_buf, dst_buf)) state = VB2_BUF_STATE_ERROR; else dst_buf->sequence = q_dst->sequence++; dst_buf->flags &= ~V4L2_BUF_FLAG_LAST; v4l2_m2m_buf_copy_metadata(src_buf, dst_buf); spin_lock(ctx->lock); if (!ctx->comp_has_next_frame && v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src_buf)) { dst_buf->flags |= V4L2_BUF_FLAG_LAST; v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx); } if (ctx->is_enc || ctx->is_stateless) { src_buf->sequence = q_src->sequence++; src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_buf_done(src_buf, state); } else if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == ctx->cur_buf_offset) { src_buf->sequence = q_src->sequence++; src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_buf_done(src_buf, state); ctx->cur_buf_offset = 0; ctx->comp_has_next_frame = false; } v4l2_m2m_buf_done(dst_buf, state); ctx->comp_size = 0; ctx->header_size = 0; ctx->comp_magic_cnt = 0; ctx->comp_has_frame = false; spin_unlock(ctx->lock); if (ctx->is_stateless && src_req) v4l2_ctrl_request_complete(src_req, &ctx->hdl); if (ctx->is_enc) v4l2_m2m_job_finish(dev->stateful_enc.m2m_dev, ctx->fh.m2m_ctx); else if (ctx->is_stateless) v4l2_m2m_job_finish(dev->stateless_dec.m2m_dev, ctx->fh.m2m_ctx); else v4l2_m2m_job_finish(dev->stateful_dec.m2m_dev, ctx->fh.m2m_ctx); } static void job_remove_src_buf(struct vicodec_ctx *ctx, u32 state) { struct vb2_v4l2_buffer *src_buf; struct vicodec_q_data *q_src; q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); spin_lock(ctx->lock); src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); src_buf->sequence = q_src->sequence++; v4l2_m2m_buf_done(src_buf, state); ctx->cur_buf_offset = 0; spin_unlock(ctx->lock); } static const struct v4l2_fwht_pixfmt_info * info_from_header(const struct fwht_cframe_hdr *p_hdr) { unsigned int flags = ntohl(p_hdr->flags); unsigned int width_div = (flags & V4L2_FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; unsigned int height_div = (flags & V4L2_FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; unsigned int components_num = 3; unsigned int pixenc = 0; unsigned int version = ntohl(p_hdr->version); if (version >= 2) { components_num = 1 + ((flags & V4L2_FWHT_FL_COMPONENTS_NUM_MSK) >> V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); pixenc = (flags & V4L2_FWHT_FL_PIXENC_MSK); } return v4l2_fwht_find_nth_fmt(width_div, height_div, components_num, pixenc, 0); } static bool is_header_valid(const struct fwht_cframe_hdr *p_hdr) { const struct v4l2_fwht_pixfmt_info *info; unsigned int w = ntohl(p_hdr->width); unsigned int h = ntohl(p_hdr->height); unsigned int version = ntohl(p_hdr->version); unsigned int flags = ntohl(p_hdr->flags); if (w < MIN_WIDTH || w > MAX_WIDTH || h < MIN_HEIGHT || h > MAX_HEIGHT) return false; if (!validate_by_version(flags, version)) return false; info = info_from_header(p_hdr); if (!info) return false; return true; } static void update_capture_data_from_header(struct vicodec_ctx *ctx) { struct vicodec_q_data *q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); const struct fwht_cframe_hdr *p_hdr = &ctx->state.header; const struct v4l2_fwht_pixfmt_info *info = info_from_header(p_hdr); unsigned int flags = ntohl(p_hdr->flags); unsigned int hdr_width_div = (flags & V4L2_FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; unsigned int hdr_height_div = (flags & V4L2_FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; /* * This function should not be used by a stateless codec since * it changes values in q_data that are not request specific */ WARN_ON(ctx->is_stateless); q_dst->info = info; q_dst->visible_width = ntohl(p_hdr->width); q_dst->visible_height = ntohl(p_hdr->height); q_dst->coded_width = vic_round_dim(q_dst->visible_width, hdr_width_div); q_dst->coded_height = vic_round_dim(q_dst->visible_height, hdr_height_div); q_dst->sizeimage = q_dst->coded_width * q_dst->coded_height * q_dst->info->sizeimage_mult / q_dst->info->sizeimage_div; ctx->state.colorspace = ntohl(p_hdr->colorspace); ctx->state.xfer_func = ntohl(p_hdr->xfer_func); ctx->state.ycbcr_enc = ntohl(p_hdr->ycbcr_enc); ctx->state.quantization = ntohl(p_hdr->quantization); } static void set_last_buffer(struct vb2_v4l2_buffer *dst_buf, const struct vb2_v4l2_buffer *src_buf, struct vicodec_ctx *ctx) { struct vicodec_q_data *q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); dst_buf->sequence = q_dst->sequence++; v4l2_m2m_buf_copy_metadata(src_buf, dst_buf); dst_buf->flags |= V4L2_BUF_FLAG_LAST; v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); } static int job_ready(void *priv) { static const u8 magic[] = { 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff }; struct vicodec_ctx *ctx = priv; struct vb2_v4l2_buffer *src_buf; u8 *p_src; u8 *p; u32 sz; u32 state; struct vicodec_q_data *q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); unsigned int flags; unsigned int hdr_width_div; unsigned int hdr_height_div; unsigned int max_to_copy; unsigned int comp_frame_size; if (ctx->source_changed) return 0; if (ctx->is_stateless || ctx->is_enc || ctx->comp_has_frame) return 1; restart: ctx->comp_has_next_frame = false; src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); if (!src_buf) return 0; p_src = vb2_plane_vaddr(&src_buf->vb2_buf, 0); sz = vb2_get_plane_payload(&src_buf->vb2_buf, 0); p = p_src + ctx->cur_buf_offset; state = VB2_BUF_STATE_DONE; if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { state = get_next_header(ctx, &p, p_src + sz - p); if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src_buf)) return 1; job_remove_src_buf(ctx, state); goto restart; } } comp_frame_size = ntohl(ctx->state.header.size); /* * The current scanned frame might be the first frame of a new * resolution so its size might be larger than ctx->comp_max_size. * In that case it is copied up to the current buffer capacity and * the copy will continue after allocating new large enough buffer * when restreaming */ max_to_copy = min(comp_frame_size, ctx->comp_max_size); if (ctx->comp_size < max_to_copy) { u32 copy = max_to_copy - ctx->comp_size; if (copy > p_src + sz - p) copy = p_src + sz - p; memcpy(ctx->state.compressed_frame + ctx->comp_size, p, copy); p += copy; ctx->comp_size += copy; if (ctx->comp_size < max_to_copy) { if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src_buf)) return 1; job_remove_src_buf(ctx, state); goto restart; } } ctx->cur_buf_offset = p - p_src; if (ctx->comp_size == comp_frame_size) ctx->comp_has_frame = true; ctx->comp_has_next_frame = false; if (ctx->comp_has_frame && sz - ctx->cur_buf_offset >= sizeof(struct fwht_cframe_hdr)) { struct fwht_cframe_hdr *p_hdr = (struct fwht_cframe_hdr *)p; u32 frame_size = ntohl(p_hdr->size); u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr); if (!memcmp(p, magic, sizeof(magic))) ctx->comp_has_next_frame = remaining >= frame_size; } /* * if the header is invalid the device_run will just drop the frame * with an error */ if (!is_header_valid(&ctx->state.header) && ctx->comp_has_frame) return 1; flags = ntohl(ctx->state.header.flags); hdr_width_div = (flags & V4L2_FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; hdr_height_div = (flags & V4L2_FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; if (ntohl(ctx->state.header.width) != q_dst->visible_width || ntohl(ctx->state.header.height) != q_dst->visible_height || !q_dst->info || hdr_width_div != q_dst->info->width_div || hdr_height_div != q_dst->info->height_div) { static const struct v4l2_event rs_event = { .type = V4L2_EVENT_SOURCE_CHANGE, .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, }; struct vb2_v4l2_buffer *dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); update_capture_data_from_header(ctx); v4l2_event_queue_fh(&ctx->fh, &rs_event); set_last_buffer(dst_buf, src_buf, ctx); ctx->source_changed = true; return 0; } return 1; } /* * video ioctls */ static const struct v4l2_fwht_pixfmt_info *find_fmt(u32 fmt) { const struct v4l2_fwht_pixfmt_info *info = v4l2_fwht_find_pixfmt(fmt); if (!info) info = v4l2_fwht_get_pixfmt(0); return info; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, VICODEC_NAME, sizeof(cap->driver)); strscpy(cap->card, VICODEC_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", VICODEC_NAME); return 0; } static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx, bool is_out) { bool is_uncomp = (ctx->is_enc && is_out) || (!ctx->is_enc && !is_out); if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar) return -EINVAL; if (!V4L2_TYPE_IS_MULTIPLANAR(f->type) && multiplanar) return -EINVAL; if (is_uncomp) { const struct v4l2_fwht_pixfmt_info *info = get_q_data(ctx, f->type)->info; if (ctx->is_enc || !vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) info = v4l2_fwht_get_pixfmt(f->index); else info = v4l2_fwht_find_nth_fmt(info->width_div, info->height_div, info->components_num, info->pixenc, f->index); if (!info) return -EINVAL; f->pixelformat = info->id; } else { if (f->index) return -EINVAL; f->pixelformat = ctx->is_stateless ? V4L2_PIX_FMT_FWHT_STATELESS : V4L2_PIX_FMT_FWHT; if (!ctx->is_enc && !ctx->is_stateless) f->flags = V4L2_FMT_FLAG_DYN_RESOLUTION | V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM; } return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct vicodec_ctx *ctx = file2ctx(file); return enum_fmt(f, ctx, false); } static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct vicodec_ctx *ctx = file2ctx(file); return enum_fmt(f, ctx, true); } static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) { struct vicodec_q_data *q_data; struct v4l2_pix_format_mplane *pix_mp; struct v4l2_pix_format *pix; const struct v4l2_fwht_pixfmt_info *info; q_data = get_q_data(ctx, f->type); info = q_data->info; switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (multiplanar) return -EINVAL; pix = &f->fmt.pix; pix->width = q_data->coded_width; pix->height = q_data->coded_height; pix->field = V4L2_FIELD_NONE; pix->pixelformat = info->id; pix->bytesperline = q_data->coded_width * info->bytesperline_mult; pix->sizeimage = q_data->sizeimage; pix->colorspace = ctx->state.colorspace; pix->xfer_func = ctx->state.xfer_func; pix->ycbcr_enc = ctx->state.ycbcr_enc; pix->quantization = ctx->state.quantization; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: if (!multiplanar) return -EINVAL; pix_mp = &f->fmt.pix_mp; pix_mp->width = q_data->coded_width; pix_mp->height = q_data->coded_height; pix_mp->field = V4L2_FIELD_NONE; pix_mp->pixelformat = info->id; pix_mp->num_planes = 1; pix_mp->plane_fmt[0].bytesperline = q_data->coded_width * info->bytesperline_mult; pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage; pix_mp->colorspace = ctx->state.colorspace; pix_mp->xfer_func = ctx->state.xfer_func; pix_mp->ycbcr_enc = ctx->state.ycbcr_enc; pix_mp->quantization = ctx->state.quantization; break; default: return -EINVAL; } return 0; } static int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(file2ctx(file), f); } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(file2ctx(file), f); } static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix_mp; struct v4l2_pix_format *pix; struct v4l2_plane_pix_format *plane; const struct v4l2_fwht_pixfmt_info *info = ctx->is_stateless ? &pixfmt_stateless_fwht : &pixfmt_fwht; switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: pix = &f->fmt.pix; if (pix->pixelformat != V4L2_PIX_FMT_FWHT && pix->pixelformat != V4L2_PIX_FMT_FWHT_STATELESS) info = find_fmt(pix->pixelformat); pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH); pix->width = vic_round_dim(pix->width, info->width_div); pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT); pix->height = vic_round_dim(pix->height, info->height_div); pix->field = V4L2_FIELD_NONE; pix->bytesperline = pix->width * info->bytesperline_mult; pix->sizeimage = pix->width * pix->height * info->sizeimage_mult / info->sizeimage_div; if (pix->pixelformat == V4L2_PIX_FMT_FWHT) pix->sizeimage += sizeof(struct fwht_cframe_hdr); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: pix_mp = &f->fmt.pix_mp; plane = pix_mp->plane_fmt; if (pix_mp->pixelformat != V4L2_PIX_FMT_FWHT && pix_mp->pixelformat != V4L2_PIX_FMT_FWHT_STATELESS) info = find_fmt(pix_mp->pixelformat); pix_mp->num_planes = 1; pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH); pix_mp->width = vic_round_dim(pix_mp->width, info->width_div); pix_mp->height = clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT); pix_mp->height = vic_round_dim(pix_mp->height, info->height_div); pix_mp->field = V4L2_FIELD_NONE; plane->bytesperline = pix_mp->width * info->bytesperline_mult; plane->sizeimage = pix_mp->width * pix_mp->height * info->sizeimage_mult / info->sizeimage_div; if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT) plane->sizeimage += sizeof(struct fwht_cframe_hdr); break; default: return -EINVAL; } return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vicodec_ctx *ctx = file2ctx(file); struct v4l2_pix_format_mplane *pix_mp; struct v4l2_pix_format *pix; switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (multiplanar) return -EINVAL; pix = &f->fmt.pix; pix->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT : find_fmt(f->fmt.pix.pixelformat)->id; pix->colorspace = ctx->state.colorspace; pix->xfer_func = ctx->state.xfer_func; pix->ycbcr_enc = ctx->state.ycbcr_enc; pix->quantization = ctx->state.quantization; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: if (!multiplanar) return -EINVAL; pix_mp = &f->fmt.pix_mp; pix_mp->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT : find_fmt(pix_mp->pixelformat)->id; pix_mp->colorspace = ctx->state.colorspace; pix_mp->xfer_func = ctx->state.xfer_func; pix_mp->ycbcr_enc = ctx->state.ycbcr_enc; pix_mp->quantization = ctx->state.quantization; break; default: return -EINVAL; } return vidioc_try_fmt(ctx, f); } static int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vicodec_ctx *ctx = file2ctx(file); struct v4l2_pix_format_mplane *pix_mp; struct v4l2_pix_format *pix; switch (f->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (multiplanar) return -EINVAL; pix = &f->fmt.pix; if (ctx->is_enc) pix->pixelformat = find_fmt(pix->pixelformat)->id; else if (ctx->is_stateless) pix->pixelformat = V4L2_PIX_FMT_FWHT_STATELESS; else pix->pixelformat = V4L2_PIX_FMT_FWHT; if (!pix->colorspace) pix->colorspace = V4L2_COLORSPACE_REC709; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: if (!multiplanar) return -EINVAL; pix_mp = &f->fmt.pix_mp; if (ctx->is_enc) pix_mp->pixelformat = find_fmt(pix_mp->pixelformat)->id; else if (ctx->is_stateless) pix_mp->pixelformat = V4L2_PIX_FMT_FWHT_STATELESS; else pix_mp->pixelformat = V4L2_PIX_FMT_FWHT; if (!pix_mp->colorspace) pix_mp->colorspace = V4L2_COLORSPACE_REC709; break; default: return -EINVAL; } return vidioc_try_fmt(ctx, f); } static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) { struct vicodec_q_data *q_data; struct vb2_queue *vq; bool fmt_changed = true; struct v4l2_pix_format_mplane *pix_mp; struct v4l2_pix_format *pix; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: pix = &f->fmt.pix; if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type)) fmt_changed = !q_data->info || q_data->info->id != pix->pixelformat || q_data->coded_width != pix->width || q_data->coded_height != pix->height; if (vb2_is_busy(vq) && fmt_changed) return -EBUSY; if (pix->pixelformat == V4L2_PIX_FMT_FWHT) q_data->info = &pixfmt_fwht; else if (pix->pixelformat == V4L2_PIX_FMT_FWHT_STATELESS) q_data->info = &pixfmt_stateless_fwht; else q_data->info = find_fmt(pix->pixelformat); q_data->coded_width = pix->width; q_data->coded_height = pix->height; q_data->sizeimage = pix->sizeimage; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: pix_mp = &f->fmt.pix_mp; if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type)) fmt_changed = !q_data->info || q_data->info->id != pix_mp->pixelformat || q_data->coded_width != pix_mp->width || q_data->coded_height != pix_mp->height; if (vb2_is_busy(vq) && fmt_changed) return -EBUSY; if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT) q_data->info = &pixfmt_fwht; else if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT_STATELESS) q_data->info = &pixfmt_stateless_fwht; else q_data->info = find_fmt(pix_mp->pixelformat); q_data->coded_width = pix_mp->width; q_data->coded_height = pix_mp->height; q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage; break; default: return -EINVAL; } dprintk(ctx->dev, "Setting format for type %d, coded wxh: %dx%d, fourcc: 0x%08x\n", f->type, q_data->coded_width, q_data->coded_height, q_data->info->id); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = vidioc_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return vidioc_s_fmt(file2ctx(file), f); } static int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vicodec_ctx *ctx = file2ctx(file); struct vicodec_q_data *q_data; struct vicodec_q_data *q_data_cap; struct v4l2_pix_format *pix; struct v4l2_pix_format_mplane *pix_mp; u32 coded_w = 0, coded_h = 0; unsigned int size = 0; int ret; q_data = get_q_data(ctx, f->type); q_data_cap = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); ret = vidioc_try_fmt_vid_out(file, priv, f); if (ret) return ret; if (ctx->is_enc) { struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); const struct v4l2_fwht_pixfmt_info *info = ctx->is_stateless ? &pixfmt_stateless_fwht : &pixfmt_fwht; if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { coded_w = f->fmt.pix.width; coded_h = f->fmt.pix.height; } else { coded_w = f->fmt.pix_mp.width; coded_h = f->fmt.pix_mp.height; } if (vb2_is_busy(vq) && (coded_w != q_data->coded_width || coded_h != q_data->coded_height)) return -EBUSY; size = coded_w * coded_h * info->sizeimage_mult / info->sizeimage_div; if (!ctx->is_stateless) size += sizeof(struct fwht_cframe_hdr); if (vb2_is_busy(vq_cap) && size > q_data_cap->sizeimage) return -EBUSY; } ret = vidioc_s_fmt(file2ctx(file), f); if (!ret) { if (ctx->is_enc) { q_data->visible_width = coded_w; q_data->visible_height = coded_h; q_data_cap->coded_width = coded_w; q_data_cap->coded_height = coded_h; q_data_cap->sizeimage = size; } switch (f->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: pix = &f->fmt.pix; ctx->state.colorspace = pix->colorspace; ctx->state.xfer_func = pix->xfer_func; ctx->state.ycbcr_enc = pix->ycbcr_enc; ctx->state.quantization = pix->quantization; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: pix_mp = &f->fmt.pix_mp; ctx->state.colorspace = pix_mp->colorspace; ctx->state.xfer_func = pix_mp->xfer_func; ctx->state.ycbcr_enc = pix_mp->ycbcr_enc; ctx->state.quantization = pix_mp->quantization; break; default: break; } } return ret; } static int vidioc_g_selection(struct file *file, void *priv, struct v4l2_selection *s) { struct vicodec_ctx *ctx = file2ctx(file); struct vicodec_q_data *q_data; q_data = get_q_data(ctx, s->type); if (!q_data) return -EINVAL; /* * encoder supports only cropping on the OUTPUT buffer * decoder supports only composing on the CAPTURE buffer */ if (ctx->is_enc && s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { switch (s->target) { case V4L2_SEL_TGT_CROP: s->r.left = 0; s->r.top = 0; s->r.width = q_data->visible_width; s->r.height = q_data->visible_height; return 0; case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP_BOUNDS: s->r.left = 0; s->r.top = 0; s->r.width = q_data->coded_width; s->r.height = q_data->coded_height; return 0; } } else if (!ctx->is_enc && s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { switch (s->target) { case V4L2_SEL_TGT_COMPOSE: s->r.left = 0; s->r.top = 0; s->r.width = q_data->visible_width; s->r.height = q_data->visible_height; return 0; case V4L2_SEL_TGT_COMPOSE_DEFAULT: case V4L2_SEL_TGT_COMPOSE_BOUNDS: s->r.left = 0; s->r.top = 0; s->r.width = q_data->coded_width; s->r.height = q_data->coded_height; return 0; } } return -EINVAL; } static int vidioc_s_selection(struct file *file, void *priv, struct v4l2_selection *s) { struct vicodec_ctx *ctx = file2ctx(file); struct vicodec_q_data *q_data; if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; q_data = get_q_data(ctx, s->type); if (!q_data) return -EINVAL; if (!ctx->is_enc || s->target != V4L2_SEL_TGT_CROP) return -EINVAL; s->r.left = 0; s->r.top = 0; q_data->visible_width = clamp(s->r.width, MIN_WIDTH, q_data->coded_width); s->r.width = q_data->visible_width; q_data->visible_height = clamp(s->r.height, MIN_HEIGHT, q_data->coded_height); s->r.height = q_data->visible_height; return 0; } static int vicodec_encoder_cmd(struct file *file, void *priv, struct v4l2_encoder_cmd *ec) { struct vicodec_ctx *ctx = file2ctx(file); int ret; ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec); if (ret < 0) return ret; if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) return 0; ret = v4l2_m2m_ioctl_encoder_cmd(file, priv, ec); if (ret < 0) return ret; if (ec->cmd == V4L2_ENC_CMD_STOP && v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); if (ec->cmd == V4L2_ENC_CMD_START && v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q); return 0; } static int vicodec_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *dc) { struct vicodec_ctx *ctx = file2ctx(file); int ret; /* * This ioctl should not be used with a stateless codec that doesn't * support holding buffers and the associated flush command. */ WARN_ON(ctx->is_stateless); ret = v4l2_m2m_ioctl_try_decoder_cmd(file, priv, dc); if (ret < 0) return ret; if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) return 0; ret = v4l2_m2m_ioctl_decoder_cmd(file, priv, dc); if (ret < 0) return ret; if (dc->cmd == V4L2_DEC_CMD_STOP && v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); if (dc->cmd == V4L2_DEC_CMD_START && v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q); return 0; } static int vicodec_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { switch (fsize->pixel_format) { case V4L2_PIX_FMT_FWHT_STATELESS: break; case V4L2_PIX_FMT_FWHT: break; default: if (find_fmt(fsize->pixel_format)->id == fsize->pixel_format) break; return -EINVAL; } if (fsize->index) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = MIN_WIDTH; fsize->stepwise.max_width = MAX_WIDTH; fsize->stepwise.step_width = 8; fsize->stepwise.min_height = MIN_HEIGHT; fsize->stepwise.max_height = MAX_HEIGHT; fsize->stepwise.step_height = 8; return 0; } static int vicodec_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { struct vicodec_ctx *ctx = container_of(fh, struct vicodec_ctx, fh); switch (sub->type) { case V4L2_EVENT_SOURCE_CHANGE: if (ctx->is_enc) return -EINVAL; fallthrough; case V4L2_EVENT_EOS: if (ctx->is_stateless) return -EINVAL; return v4l2_event_subscribe(fh, sub, 0, NULL); default: return v4l2_ctrl_subscribe_event(fh, sub); } } static const struct v4l2_ioctl_ops vicodec_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_g_selection = vidioc_g_selection, .vidioc_s_selection = vidioc_s_selection, .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, .vidioc_encoder_cmd = vicodec_encoder_cmd, .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd, .vidioc_decoder_cmd = vicodec_decoder_cmd, .vidioc_enum_framesizes = vicodec_enum_framesizes, .vidioc_subscribe_event = vicodec_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* * Queue operations */ static int vicodec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct vicodec_ctx *ctx = vb2_get_drv_priv(vq); struct vicodec_q_data *q_data = get_q_data(ctx, vq->type); unsigned int size = q_data->sizeimage; if (*nplanes) return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; q_data->vb2_sizeimage = size; return 0; } static int vicodec_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); vbuf->field = V4L2_FIELD_NONE; return 0; } static int vicodec_buf_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct vicodec_q_data *q_data; dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); q_data = get_q_data(ctx, vb->vb2_queue->type); if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { if (vbuf->field == V4L2_FIELD_ANY) vbuf->field = V4L2_FIELD_NONE; if (vbuf->field != V4L2_FIELD_NONE) { dprintk(ctx->dev, "%s field isn't supported\n", __func__); return -EINVAL; } } if (vb2_plane_size(vb, 0) < q_data->vb2_sizeimage) { dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), (long)q_data->vb2_sizeimage); return -EINVAL; } return 0; } static void vicodec_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); unsigned int sz = vb2_get_plane_payload(&vbuf->vb2_buf, 0); u8 *p_src = vb2_plane_vaddr(&vbuf->vb2_buf, 0); u8 *p = p_src; struct vb2_queue *vq_out = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); bool header_valid = false; static const struct v4l2_event rs_event = { .type = V4L2_EVENT_SOURCE_CHANGE, .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, }; if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) && vb2_is_streaming(vb->vb2_queue) && v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) { unsigned int i; for (i = 0; i < vb->num_planes; i++) vb2_set_plane_payload(vb, i, 0); vbuf->field = V4L2_FIELD_NONE; vbuf->sequence = get_q_data(ctx, vb->vb2_queue->type)->sequence++; v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, vbuf); v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); return; } /* buf_queue handles only the first source change event */ if (ctx->first_source_change_sent) { v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); return; } /* * if both queues are streaming, the source change event is * handled in job_ready */ if (vb2_is_streaming(vq_cap) && vb2_is_streaming(vq_out)) { v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); return; } /* * source change event is relevant only for the stateful decoder * in the compressed stream */ if (ctx->is_stateless || ctx->is_enc || V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) { v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); return; } do { enum vb2_buffer_state state = get_next_header(ctx, &p, p_src + sz - p); if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { v4l2_m2m_buf_done(vbuf, state); return; } header_valid = is_header_valid(&ctx->state.header); /* * p points right after the end of the header in the * buffer. If the header is invalid we set p to point * to the next byte after the start of the header */ if (!header_valid) { p = p - sizeof(struct fwht_cframe_hdr) + 1; if (p < p_src) p = p_src; ctx->header_size = 0; ctx->comp_magic_cnt = 0; } } while (!header_valid); ctx->cur_buf_offset = p - p_src; update_capture_data_from_header(ctx); ctx->first_source_change_sent = true; v4l2_event_queue_fh(&ctx->fh, &rs_event); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } static void vicodec_return_bufs(struct vb2_queue *q, u32 state) { struct vicodec_ctx *ctx = vb2_get_drv_priv(q); struct vb2_v4l2_buffer *vbuf; for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (vbuf == NULL) return; v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, &ctx->hdl); spin_lock(ctx->lock); v4l2_m2m_buf_done(vbuf, state); spin_unlock(ctx->lock); } } static unsigned int total_frame_size(struct vicodec_q_data *q_data) { unsigned int size; unsigned int chroma_div; if (!q_data->info) { WARN_ON(1); return 0; } size = q_data->coded_width * q_data->coded_height; chroma_div = q_data->info->width_div * q_data->info->height_div; if (q_data->info->components_num == 4) return 2 * size + 2 * (size / chroma_div); else if (q_data->info->components_num == 3) return size + 2 * (size / chroma_div); return size; } static int vicodec_start_streaming(struct vb2_queue *q, unsigned int count) { struct vicodec_ctx *ctx = vb2_get_drv_priv(q); struct vicodec_q_data *q_data = get_q_data(ctx, q->type); struct v4l2_fwht_state *state = &ctx->state; const struct v4l2_fwht_pixfmt_info *info = q_data->info; unsigned int size = q_data->coded_width * q_data->coded_height; unsigned int chroma_div; unsigned int total_planes_size; u8 *new_comp_frame = NULL; chroma_div = info->width_div * info->height_div; q_data->sequence = 0; v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q); state->gop_cnt = 0; if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->is_enc)) return 0; if (info->id == V4L2_PIX_FMT_FWHT || info->id == V4L2_PIX_FMT_FWHT_STATELESS) { vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); return -EINVAL; } total_planes_size = total_frame_size(q_data); ctx->comp_max_size = total_planes_size; state->visible_width = q_data->visible_width; state->visible_height = q_data->visible_height; state->coded_width = q_data->coded_width; state->coded_height = q_data->coded_height; state->stride = q_data->coded_width * info->bytesperline_mult; if (ctx->is_stateless) { state->ref_stride = state->stride; return 0; } state->ref_stride = q_data->coded_width * info->luma_alpha_step; state->ref_frame.buf = kvmalloc(total_planes_size, GFP_KERNEL); state->ref_frame.luma = state->ref_frame.buf; new_comp_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL); if (!state->ref_frame.luma || !new_comp_frame) { kvfree(state->ref_frame.luma); kvfree(new_comp_frame); vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); return -ENOMEM; } /* * if state->compressed_frame was already allocated then * it contain data of the first frame of the new resolution */ if (state->compressed_frame) { if (ctx->comp_size > ctx->comp_max_size) ctx->comp_size = ctx->comp_max_size; memcpy(new_comp_frame, state->compressed_frame, ctx->comp_size); } kvfree(state->compressed_frame); state->compressed_frame = new_comp_frame; if (info->components_num < 3) { state->ref_frame.cb = NULL; state->ref_frame.cr = NULL; state->ref_frame.alpha = NULL; return 0; } state->ref_frame.cb = state->ref_frame.luma + size; state->ref_frame.cr = state->ref_frame.cb + size / chroma_div; if (info->components_num == 4) state->ref_frame.alpha = state->ref_frame.cr + size / chroma_div; else state->ref_frame.alpha = NULL; return 0; } static void vicodec_stop_streaming(struct vb2_queue *q) { struct vicodec_ctx *ctx = vb2_get_drv_priv(q); vicodec_return_bufs(q, VB2_BUF_STATE_ERROR); v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q); if (V4L2_TYPE_IS_OUTPUT(q->type) && v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event); if (!ctx->is_enc && V4L2_TYPE_IS_OUTPUT(q->type)) ctx->first_source_change_sent = false; if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || (V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) { if (!ctx->is_stateless) kvfree(ctx->state.ref_frame.buf); ctx->state.ref_frame.buf = NULL; ctx->state.ref_frame.luma = NULL; ctx->comp_max_size = 0; ctx->source_changed = false; } if (V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) { ctx->cur_buf_offset = 0; ctx->comp_size = 0; ctx->header_size = 0; ctx->comp_magic_cnt = 0; ctx->comp_has_frame = false; ctx->comp_has_next_frame = false; } } static void vicodec_buf_request_complete(struct vb2_buffer *vb) { struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl); } static const struct vb2_ops vicodec_qops = { .queue_setup = vicodec_queue_setup, .buf_out_validate = vicodec_buf_out_validate, .buf_prepare = vicodec_buf_prepare, .buf_queue = vicodec_buf_queue, .buf_request_complete = vicodec_buf_request_complete, .start_streaming = vicodec_start_streaming, .stop_streaming = vicodec_stop_streaming, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct vicodec_ctx *ctx = priv; int ret; src_vq->type = (multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT); src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &vicodec_qops; src_vq->mem_ops = &vb2_vmalloc_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; if (ctx->is_enc) { src_vq->lock = &ctx->dev->stateful_enc.mutex; src_vq->min_reqbufs_allocation = VICODEC_REC_BUFS; } else if (ctx->is_stateless) { src_vq->lock = &ctx->dev->stateless_dec.mutex; } else { src_vq->lock = &ctx->dev->stateful_dec.mutex; } src_vq->supports_requests = ctx->is_stateless; src_vq->requires_requests = ctx->is_stateless; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = (multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE); dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; dst_vq->max_num_buffers = 64; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &vicodec_qops; dst_vq->mem_ops = &vb2_vmalloc_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = src_vq->lock; if (!ctx->is_stateless && !ctx->is_enc) dst_vq->min_reqbufs_allocation = VICODEC_REC_BUFS; return vb2_queue_init(dst_vq); } static int vicodec_try_ctrl(struct v4l2_ctrl *ctrl) { struct vicodec_ctx *ctx = container_of(ctrl->handler, struct vicodec_ctx, hdl); const struct v4l2_ctrl_fwht_params *params; struct vicodec_q_data *q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); switch (ctrl->id) { case V4L2_CID_STATELESS_FWHT_PARAMS: if (!q_dst->info) return -EINVAL; params = ctrl->p_new.p_fwht_params; if (params->width > q_dst->coded_width || params->width < MIN_WIDTH || params->height > q_dst->coded_height || params->height < MIN_HEIGHT) return -EINVAL; if (!validate_by_version(params->flags, params->version)) return -EINVAL; if (!validate_stateless_params_flags(params, q_dst->info)) return -EINVAL; return 0; default: return 0; } return 0; } static void update_header_from_stateless_params(struct vicodec_ctx *ctx, const struct v4l2_ctrl_fwht_params *params) { struct fwht_cframe_hdr *p_hdr = &ctx->state.header; p_hdr->magic1 = FWHT_MAGIC1; p_hdr->magic2 = FWHT_MAGIC2; p_hdr->version = htonl(params->version); p_hdr->width = htonl(params->width); p_hdr->height = htonl(params->height); p_hdr->flags = htonl(params->flags); p_hdr->colorspace = htonl(params->colorspace); p_hdr->xfer_func = htonl(params->xfer_func); p_hdr->ycbcr_enc = htonl(params->ycbcr_enc); p_hdr->quantization = htonl(params->quantization); } static int vicodec_s_ctrl(struct v4l2_ctrl *ctrl) { struct vicodec_ctx *ctx = container_of(ctrl->handler, struct vicodec_ctx, hdl); const struct v4l2_ctrl_fwht_params *params; switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_GOP_SIZE: ctx->state.gop_size = ctrl->val; return 0; case V4L2_CID_FWHT_I_FRAME_QP: ctx->state.i_frame_qp = ctrl->val; return 0; case V4L2_CID_FWHT_P_FRAME_QP: ctx->state.p_frame_qp = ctrl->val; return 0; case V4L2_CID_STATELESS_FWHT_PARAMS: params = ctrl->p_new.p_fwht_params; update_header_from_stateless_params(ctx, params); ctx->state.ref_frame_ts = params->backward_ref_ts; return 0; } return -EINVAL; } static const struct v4l2_ctrl_ops vicodec_ctrl_ops = { .s_ctrl = vicodec_s_ctrl, .try_ctrl = vicodec_try_ctrl, }; static const struct v4l2_ctrl_config vicodec_ctrl_stateless_state = { .ops = &vicodec_ctrl_ops, .id = V4L2_CID_STATELESS_FWHT_PARAMS, .elem_size = sizeof(struct v4l2_ctrl_fwht_params), }; /* * File operations */ static int vicodec_open(struct file *file) { const struct v4l2_fwht_pixfmt_info *info = v4l2_fwht_get_pixfmt(0); struct video_device *vfd = video_devdata(file); struct vicodec_dev *dev = video_drvdata(file); struct vicodec_ctx *ctx = NULL; struct v4l2_ctrl_handler *hdl; unsigned int raw_size; unsigned int comp_size; int rc = 0; if (mutex_lock_interruptible(vfd->lock)) return -ERESTARTSYS; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { rc = -ENOMEM; goto open_unlock; } if (vfd == &dev->stateful_enc.vfd) ctx->is_enc = true; else if (vfd == &dev->stateless_dec.vfd) ctx->is_stateless = true; v4l2_fh_init(&ctx->fh, video_devdata(file)); ctx->dev = dev; hdl = &ctx->hdl; v4l2_ctrl_handler_init(hdl, 5); v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 16, 1, 10); v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_FWHT_I_FRAME_QP, 1, 31, 1, 20); v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_FWHT_P_FRAME_QP, 1, 31, 1, 20); if (ctx->is_stateless) v4l2_ctrl_new_custom(hdl, &vicodec_ctrl_stateless_state, NULL); else v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, ctx->is_enc ? V4L2_CID_MIN_BUFFERS_FOR_OUTPUT : V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, VICODEC_REC_BUFS, VICODEC_REC_BUFS, 1, VICODEC_REC_BUFS); if (hdl->error) { rc = hdl->error; v4l2_ctrl_handler_free(hdl); kfree(ctx); goto open_unlock; } ctx->fh.ctrl_handler = hdl; v4l2_ctrl_handler_setup(hdl); if (ctx->is_enc) ctx->q_data[V4L2_M2M_SRC].info = info; else if (ctx->is_stateless) ctx->q_data[V4L2_M2M_SRC].info = &pixfmt_stateless_fwht; else ctx->q_data[V4L2_M2M_SRC].info = &pixfmt_fwht; ctx->q_data[V4L2_M2M_SRC].coded_width = 1280; ctx->q_data[V4L2_M2M_SRC].coded_height = 720; ctx->q_data[V4L2_M2M_SRC].visible_width = 1280; ctx->q_data[V4L2_M2M_SRC].visible_height = 720; raw_size = 1280 * 720 * info->sizeimage_mult / info->sizeimage_div; comp_size = 1280 * 720 * pixfmt_fwht.sizeimage_mult / pixfmt_fwht.sizeimage_div; if (ctx->is_enc) ctx->q_data[V4L2_M2M_SRC].sizeimage = raw_size; else if (ctx->is_stateless) ctx->q_data[V4L2_M2M_SRC].sizeimage = comp_size; else ctx->q_data[V4L2_M2M_SRC].sizeimage = comp_size + sizeof(struct fwht_cframe_hdr); ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC]; if (ctx->is_enc) { ctx->q_data[V4L2_M2M_DST].info = &pixfmt_fwht; ctx->q_data[V4L2_M2M_DST].sizeimage = comp_size + sizeof(struct fwht_cframe_hdr); } else { ctx->q_data[V4L2_M2M_DST].info = info; ctx->q_data[V4L2_M2M_DST].sizeimage = raw_size; } ctx->state.colorspace = V4L2_COLORSPACE_REC709; if (ctx->is_enc) { ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->stateful_enc.m2m_dev, ctx, &queue_init); ctx->lock = &dev->stateful_enc.lock; } else if (ctx->is_stateless) { ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->stateless_dec.m2m_dev, ctx, &queue_init); ctx->lock = &dev->stateless_dec.lock; } else { ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->stateful_dec.m2m_dev, ctx, &queue_init); ctx->lock = &dev->stateful_dec.lock; } if (IS_ERR(ctx->fh.m2m_ctx)) { rc = PTR_ERR(ctx->fh.m2m_ctx); v4l2_ctrl_handler_free(hdl); v4l2_fh_exit(&ctx->fh); kfree(ctx); goto open_unlock; } v4l2_fh_add(&ctx->fh, file); open_unlock: mutex_unlock(vfd->lock); return rc; } static int vicodec_release(struct file *file) { struct video_device *vfd = video_devdata(file); struct vicodec_ctx *ctx = file2ctx(file); mutex_lock(vfd->lock); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); mutex_unlock(vfd->lock); v4l2_fh_del(&ctx->fh, file); v4l2_fh_exit(&ctx->fh); v4l2_ctrl_handler_free(&ctx->hdl); kvfree(ctx->state.compressed_frame); kfree(ctx); return 0; } static int vicodec_request_validate(struct media_request *req) { struct media_request_object *obj; struct v4l2_ctrl_handler *parent_hdl, *hdl; struct vicodec_ctx *ctx = NULL; struct v4l2_ctrl *ctrl; unsigned int count; list_for_each_entry(obj, &req->objects, list) { struct vb2_buffer *vb; if (vb2_request_object_is_buffer(obj)) { vb = container_of(obj, struct vb2_buffer, req_obj); ctx = vb2_get_drv_priv(vb->vb2_queue); break; } } if (!ctx) { pr_err("No buffer was provided with the request\n"); return -ENOENT; } count = vb2_request_buffer_cnt(req); if (!count) { v4l2_info(&ctx->dev->v4l2_dev, "No buffer was provided with the request\n"); return -ENOENT; } else if (count > 1) { v4l2_info(&ctx->dev->v4l2_dev, "More than one buffer was provided with the request\n"); return -EINVAL; } parent_hdl = &ctx->hdl; hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl); if (!hdl) { v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control\n"); return -ENOENT; } ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, vicodec_ctrl_stateless_state.id); v4l2_ctrl_request_hdl_put(hdl); if (!ctrl) { v4l2_info(&ctx->dev->v4l2_dev, "Missing required codec control\n"); return -ENOENT; } return vb2_request_validate(req); } static const struct v4l2_file_operations vicodec_fops = { .owner = THIS_MODULE, .open = vicodec_open, .release = vicodec_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static const struct video_device vicodec_videodev = { .name = VICODEC_NAME, .vfl_dir = VFL_DIR_M2M, .fops = &vicodec_fops, .ioctl_ops = &vicodec_ioctl_ops, .minor = -1, .release = video_device_release_empty, }; static const struct media_device_ops vicodec_m2m_media_ops = { .req_validate = vicodec_request_validate, .req_queue = v4l2_m2m_request_queue, }; static const struct v4l2_m2m_ops m2m_ops = { .device_run = device_run, .job_ready = job_ready, }; static int register_instance(struct vicodec_dev *dev, struct vicodec_dev_instance *dev_instance, const char *name, bool is_enc, bool is_stateless) { struct video_device *vfd; int ret; spin_lock_init(&dev_instance->lock); mutex_init(&dev_instance->mutex); dev_instance->m2m_dev = v4l2_m2m_init(&m2m_ops); if (IS_ERR(dev_instance->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init vicodec enc device\n"); return PTR_ERR(dev_instance->m2m_dev); } dev_instance->vfd = vicodec_videodev; vfd = &dev_instance->vfd; vfd->lock = &dev_instance->mutex; vfd->v4l2_dev = &dev->v4l2_dev; strscpy(vfd->name, name, sizeof(vfd->name)); vfd->device_caps = V4L2_CAP_STREAMING | (multiplanar ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M); if (is_enc || is_stateless) { v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD); v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD); } if (!is_enc) { v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD); v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD); } video_set_drvdata(vfd, dev); ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device '%s'\n", name); v4l2_m2m_release(dev_instance->m2m_dev); return ret; } v4l2_info(&dev->v4l2_dev, "Device '%s' registered as /dev/video%d\n", name, vfd->num); return 0; } static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev) { struct vicodec_dev *dev = container_of(v4l2_dev, struct vicodec_dev, v4l2_dev); v4l2_device_unregister(&dev->v4l2_dev); v4l2_m2m_release(dev->stateful_enc.m2m_dev); v4l2_m2m_release(dev->stateful_dec.m2m_dev); v4l2_m2m_release(dev->stateless_dec.m2m_dev); #ifdef CONFIG_MEDIA_CONTROLLER media_device_cleanup(&dev->mdev); #endif kfree(dev); } static int vicodec_probe(struct platform_device *pdev) { struct vicodec_dev *dev; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto free_dev; dev->v4l2_dev.release = vicodec_v4l2_dev_release; #ifdef CONFIG_MEDIA_CONTROLLER dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model)); strscpy(dev->mdev.bus_info, "platform:vicodec", sizeof(dev->mdev.bus_info)); media_device_init(&dev->mdev); dev->mdev.ops = &vicodec_m2m_media_ops; dev->v4l2_dev.mdev = &dev->mdev; #endif platform_set_drvdata(pdev, dev); ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder", true, false); if (ret) goto unreg_dev; ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder", false, false); if (ret) goto unreg_sf_enc; ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder", false, true); if (ret) goto unreg_sf_dec; #ifdef CONFIG_MEDIA_CONTROLLER ret = v4l2_m2m_register_media_controller(dev->stateful_enc.m2m_dev, &dev->stateful_enc.vfd, MEDIA_ENT_F_PROC_VIDEO_ENCODER); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller for enc\n"); goto unreg_m2m; } ret = v4l2_m2m_register_media_controller(dev->stateful_dec.m2m_dev, &dev->stateful_dec.vfd, MEDIA_ENT_F_PROC_VIDEO_DECODER); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller for dec\n"); goto unreg_m2m_sf_enc_mc; } ret = v4l2_m2m_register_media_controller(dev->stateless_dec.m2m_dev, &dev->stateless_dec.vfd, MEDIA_ENT_F_PROC_VIDEO_DECODER); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller for stateless dec\n"); goto unreg_m2m_sf_dec_mc; } ret = media_device_register(&dev->mdev); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n"); goto unreg_m2m_sl_dec_mc; } #endif return 0; #ifdef CONFIG_MEDIA_CONTROLLER unreg_m2m_sl_dec_mc: v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev); unreg_m2m_sf_dec_mc: v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev); unreg_m2m_sf_enc_mc: v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev); unreg_m2m: video_unregister_device(&dev->stateless_dec.vfd); v4l2_m2m_release(dev->stateless_dec.m2m_dev); #endif unreg_sf_dec: video_unregister_device(&dev->stateful_dec.vfd); v4l2_m2m_release(dev->stateful_dec.m2m_dev); unreg_sf_enc: video_unregister_device(&dev->stateful_enc.vfd); v4l2_m2m_release(dev->stateful_enc.m2m_dev); unreg_dev: v4l2_device_unregister(&dev->v4l2_dev); free_dev: kfree(dev); return ret; } static void vicodec_remove(struct platform_device *pdev) { struct vicodec_dev *dev = platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " VICODEC_NAME); #ifdef CONFIG_MEDIA_CONTROLLER media_device_unregister(&dev->mdev); v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev); v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev); v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev); #endif video_unregister_device(&dev->stateful_enc.vfd); video_unregister_device(&dev->stateful_dec.vfd); video_unregister_device(&dev->stateless_dec.vfd); v4l2_device_put(&dev->v4l2_dev); } static struct platform_driver vicodec_pdrv = { .probe = vicodec_probe, .remove = vicodec_remove, .driver = { .name = VICODEC_NAME, }, }; static void __exit vicodec_exit(void) { platform_driver_unregister(&vicodec_pdrv); platform_device_unregister(&vicodec_pdev); } static int __init vicodec_init(void) { int ret; ret = platform_device_register(&vicodec_pdev); if (ret) return ret; ret = platform_driver_register(&vicodec_pdrv); if (ret) platform_device_unregister(&vicodec_pdev); return ret; } module_init(vicodec_init); module_exit(vicodec_exit);
9 9 9 6 6 6 6 6 6 1 1 1 2 1 1 8 1 8 8 6 6 6 6 8 7 1 7 7 2 2 2 2 2 2 1 2 1 2 21 3 3 3 3 2 1 1 4 3 1 2 2 1 22 9 1 8 10 3 3 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 // SPDX-License-Identifier: GPL-2.0-only /* * vhost transport for vsock * * Copyright (C) 2013-2015 Red Hat, Inc. * Author: Asias He <asias@redhat.com> * Stefan Hajnoczi <stefanha@redhat.com> */ #include <linux/miscdevice.h> #include <linux/atomic.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/vmalloc.h> #include <net/sock.h> #include <linux/virtio_vsock.h> #include <linux/vhost.h> #include <linux/hashtable.h> #include <net/af_vsock.h> #include "vhost.h" #define VHOST_VSOCK_DEFAULT_HOST_CID 2 /* Max number of bytes transferred before requeueing the job. * Using this limit prevents one virtqueue from starving others. */ #define VHOST_VSOCK_WEIGHT 0x80000 /* Max number of packets transferred before requeueing the job. * Using this limit prevents one virtqueue from starving others with * small pkts. */ #define VHOST_VSOCK_PKT_WEIGHT 256 static const int vhost_vsock_bits[] = { VHOST_FEATURES, VIRTIO_F_ACCESS_PLATFORM, VIRTIO_VSOCK_F_SEQPACKET }; #define VHOST_VSOCK_FEATURES VHOST_FEATURES_U64(vhost_vsock_bits, 0) enum { VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) }; /* Used to track all the vhost_vsock instances on the system. */ static DEFINE_MUTEX(vhost_vsock_mutex); static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); struct vhost_vsock { struct vhost_dev dev; struct vhost_virtqueue vqs[2]; /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */ struct hlist_node hash; struct vhost_work send_pkt_work; struct sk_buff_head send_pkt_queue; /* host->guest pending packets */ atomic_t queued_replies; u32 guest_cid; bool seqpacket_allow; }; static u32 vhost_transport_get_local_cid(void) { return VHOST_VSOCK_DEFAULT_HOST_CID; } /* Callers must be in an RCU read section or hold the vhost_vsock_mutex. * The return value can only be dereferenced while within the section. */ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) { struct vhost_vsock *vsock; hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid, lockdep_is_held(&vhost_vsock_mutex)) { u32 other_cid = vsock->guest_cid; /* Skip instances that have no CID yet */ if (other_cid == 0) continue; if (other_cid == guest_cid) return vsock; } return NULL; } static void vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct vhost_virtqueue *vq) { struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; int pkts = 0, total_len = 0; bool added = false; bool restart_tx = false; mutex_lock(&vq->mutex); if (!vhost_vq_get_backend(vq)) goto out; if (!vq_meta_prefetch(vq)) goto out; /* Avoid further vmexits, we're already processing the virtqueue */ vhost_disable_notify(&vsock->dev, vq); do { struct virtio_vsock_hdr *hdr; size_t iov_len, payload_len; struct iov_iter iov_iter; u32 flags_to_restore = 0; struct sk_buff *skb; unsigned out, in; size_t nbytes; u32 offset; int head; skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); if (!skb) { vhost_enable_notify(&vsock->dev, vq); break; } head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); if (head < 0) { virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); break; } if (head == vq->num) { virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); /* We cannot finish yet if more buffers snuck in while * re-enabling notify. */ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { vhost_disable_notify(&vsock->dev, vq); continue; } break; } if (out) { kfree_skb(skb); vq_err(vq, "Expected 0 output buffers, got %u\n", out); break; } iov_len = iov_length(&vq->iov[out], in); if (iov_len < sizeof(*hdr)) { kfree_skb(skb); vq_err(vq, "Buffer len [%zu] too small\n", iov_len); break; } iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len); offset = VIRTIO_VSOCK_SKB_CB(skb)->offset; payload_len = skb->len - offset; hdr = virtio_vsock_hdr(skb); /* If the packet is greater than the space available in the * buffer, we split it using multiple buffers. */ if (payload_len > iov_len - sizeof(*hdr)) { payload_len = iov_len - sizeof(*hdr); /* As we are copying pieces of large packet's buffer to * small rx buffers, headers of packets in rx queue are * created dynamically and are initialized with header * of current packet(except length). But in case of * SOCK_SEQPACKET, we also must clear message delimeter * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise, * there will be sequence of packets with these * bits set. After initialized header will be copied to * rx buffer, these required bits will be restored. */ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM; if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) { hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR; } } } /* Set the correct length in the header */ hdr->len = cpu_to_le32(payload_len); nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter); if (nbytes != sizeof(*hdr)) { kfree_skb(skb); vq_err(vq, "Faulted on copying pkt hdr\n"); break; } if (skb_copy_datagram_iter(skb, offset, &iov_iter, payload_len)) { kfree_skb(skb); vq_err(vq, "Faulted on copying pkt buf\n"); break; } /* Deliver to monitoring devices all packets that we * will transmit. */ virtio_transport_deliver_tap_pkt(skb); vhost_add_used(vq, head, sizeof(*hdr) + payload_len); added = true; VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len; total_len += payload_len; /* If we didn't send all the payload we can requeue the packet * to send it with the next available buffer. */ if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) { hdr->flags |= cpu_to_le32(flags_to_restore); /* We are queueing the same skb to handle * the remaining bytes, and we want to deliver it * to monitoring devices in the next iteration. */ virtio_vsock_skb_clear_tap_delivered(skb); virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); } else { if (virtio_vsock_skb_reply(skb)) { int val; val = atomic_dec_return(&vsock->queued_replies); /* Do we have resources to resume tx * processing? */ if (val + 1 == tx_vq->num) restart_tx = true; } virtio_transport_consume_skb_sent(skb, true); } } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); if (added) vhost_signal(&vsock->dev, vq); out: mutex_unlock(&vq->mutex); if (restart_tx) vhost_poll_queue(&tx_vq->poll); } static void vhost_transport_send_pkt_work(struct vhost_work *work) { struct vhost_virtqueue *vq; struct vhost_vsock *vsock; vsock = container_of(work, struct vhost_vsock, send_pkt_work); vq = &vsock->vqs[VSOCK_VQ_RX]; vhost_transport_do_send_pkt(vsock, vq); } static int vhost_transport_send_pkt(struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vhost_vsock *vsock; int len = skb->len; rcu_read_lock(); /* Find the vhost_vsock according to guest context id */ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid)); if (!vsock) { rcu_read_unlock(); kfree_skb(skb); return -ENODEV; } if (virtio_vsock_skb_reply(skb)) atomic_inc(&vsock->queued_replies); virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work); rcu_read_unlock(); return len; } static int vhost_transport_cancel_pkt(struct vsock_sock *vsk) { struct vhost_vsock *vsock; int cnt = 0; int ret = -ENODEV; rcu_read_lock(); /* Find the vhost_vsock according to guest context id */ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); if (!vsock) goto out; cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); if (cnt) { struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; int new_cnt; new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) vhost_poll_queue(&tx_vq->poll); } ret = 0; out: rcu_read_unlock(); return ret; } static struct sk_buff * vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, unsigned int out, unsigned int in) { struct virtio_vsock_hdr *hdr; struct iov_iter iov_iter; struct sk_buff *skb; size_t payload_len; size_t nbytes; size_t len; if (in != 0) { vq_err(vq, "Expected 0 input buffers, got %u\n", in); return NULL; } len = iov_length(vq->iov, out); if (len < VIRTIO_VSOCK_SKB_HEADROOM || len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) return NULL; /* len contains both payload and hdr */ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); if (!skb) return NULL; iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len); hdr = virtio_vsock_hdr(skb); nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter); if (nbytes != sizeof(*hdr)) { vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n", sizeof(*hdr), nbytes); kfree_skb(skb); return NULL; } payload_len = le32_to_cpu(hdr->len); /* No payload */ if (!payload_len) return skb; /* The pkt is too big or the length in the header is invalid */ if (payload_len + sizeof(*hdr) > len) { kfree_skb(skb); return NULL; } virtio_vsock_skb_put(skb, payload_len); if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) { vq_err(vq, "Failed to copy %zu byte payload\n", payload_len); kfree_skb(skb); return NULL; } return skb; } /* Is there space left for replies to rx packets? */ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) { struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; int val; smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ val = atomic_read(&vsock->queued_replies); return val < vq->num; } static bool vhost_transport_msgzerocopy_allow(void) { return true; } static bool vhost_transport_seqpacket_allow(u32 remote_cid); static struct virtio_transport vhost_transport = { .transport = { .module = THIS_MODULE, .get_local_cid = vhost_transport_get_local_cid, .init = virtio_transport_do_socket_init, .destruct = virtio_transport_destruct, .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, .cancel_pkt = vhost_transport_cancel_pkt, .dgram_enqueue = virtio_transport_dgram_enqueue, .dgram_dequeue = virtio_transport_dgram_dequeue, .dgram_bind = virtio_transport_dgram_bind, .dgram_allow = virtio_transport_dgram_allow, .stream_enqueue = virtio_transport_stream_enqueue, .stream_dequeue = virtio_transport_stream_dequeue, .stream_has_data = virtio_transport_stream_has_data, .stream_has_space = virtio_transport_stream_has_space, .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, .stream_is_active = virtio_transport_stream_is_active, .stream_allow = virtio_transport_stream_allow, .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, .seqpacket_allow = vhost_transport_seqpacket_allow, .seqpacket_has_data = virtio_transport_seqpacket_has_data, .msgzerocopy_allow = vhost_transport_msgzerocopy_allow, .notify_poll_in = virtio_transport_notify_poll_in, .notify_poll_out = virtio_transport_notify_poll_out, .notify_recv_init = virtio_transport_notify_recv_init, .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, .notify_send_init = virtio_transport_notify_send_init, .notify_send_pre_block = virtio_transport_notify_send_pre_block, .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, .notify_buffer_size = virtio_transport_notify_buffer_size, .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat, .unsent_bytes = virtio_transport_unsent_bytes, .read_skb = virtio_transport_read_skb, }, .send_pkt = vhost_transport_send_pkt, }; static bool vhost_transport_seqpacket_allow(u32 remote_cid) { struct vhost_vsock *vsock; bool seqpacket_allow = false; rcu_read_lock(); vsock = vhost_vsock_get(remote_cid); if (vsock) seqpacket_allow = vsock->seqpacket_allow; rcu_read_unlock(); return seqpacket_allow; } static void vhost_vsock_handle_tx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, dev); int head, pkts = 0, total_len = 0; unsigned int out, in; struct sk_buff *skb; bool added = false; mutex_lock(&vq->mutex); if (!vhost_vq_get_backend(vq)) goto out; if (!vq_meta_prefetch(vq)) goto out; vhost_disable_notify(&vsock->dev, vq); do { struct virtio_vsock_hdr *hdr; if (!vhost_vsock_more_replies(vsock)) { /* Stop tx until the device processes already * pending replies. Leave tx virtqueue * callbacks disabled. */ goto no_more_replies; } head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); if (head < 0) break; if (head == vq->num) { if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { vhost_disable_notify(&vsock->dev, vq); continue; } break; } skb = vhost_vsock_alloc_skb(vq, out, in); if (!skb) { vq_err(vq, "Faulted on pkt\n"); continue; } total_len += sizeof(*hdr) + skb->len; /* Deliver to monitoring devices all received packets */ virtio_transport_deliver_tap_pkt(skb); hdr = virtio_vsock_hdr(skb); /* Only accept correctly addressed packets */ if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid && le64_to_cpu(hdr->dst_cid) == vhost_transport_get_local_cid()) virtio_transport_recv_pkt(&vhost_transport, skb); else kfree_skb(skb); vhost_add_used(vq, head, 0); added = true; } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); no_more_replies: if (added) vhost_signal(&vsock->dev, vq); out: mutex_unlock(&vq->mutex); } static void vhost_vsock_handle_rx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, dev); vhost_transport_do_send_pkt(vsock, vq); } static int vhost_vsock_start(struct vhost_vsock *vsock) { struct vhost_virtqueue *vq; size_t i; int ret; mutex_lock(&vsock->dev.mutex); ret = vhost_dev_check_owner(&vsock->dev); if (ret) goto err; for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); if (!vhost_vq_access_ok(vq)) { ret = -EFAULT; goto err_vq; } if (!vhost_vq_get_backend(vq)) { vhost_vq_set_backend(vq, vsock); ret = vhost_vq_init_access(vq); if (ret) goto err_vq; } mutex_unlock(&vq->mutex); } /* Some packets may have been queued before the device was started, * let's kick the send worker to send them. */ vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work); mutex_unlock(&vsock->dev.mutex); return 0; err_vq: vhost_vq_set_backend(vq, NULL); mutex_unlock(&vq->mutex); for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); vhost_vq_set_backend(vq, NULL); mutex_unlock(&vq->mutex); } err: mutex_unlock(&vsock->dev.mutex); return ret; } static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) { size_t i; int ret = 0; mutex_lock(&vsock->dev.mutex); if (check_owner) { ret = vhost_dev_check_owner(&vsock->dev); if (ret) goto err; } for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { struct vhost_virtqueue *vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); vhost_vq_set_backend(vq, NULL); mutex_unlock(&vq->mutex); } err: mutex_unlock(&vsock->dev.mutex); return ret; } static void vhost_vsock_free(struct vhost_vsock *vsock) { kvfree(vsock); } static int vhost_vsock_dev_open(struct inode *inode, struct file *file) { struct vhost_virtqueue **vqs; struct vhost_vsock *vsock; int ret; /* This struct is large and allocation could fail, fall back to vmalloc * if there is no other way. */ vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!vsock) return -ENOMEM; vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); if (!vqs) { ret = -ENOMEM; goto out; } vsock->guest_cid = 0; /* no CID assigned yet */ vsock->seqpacket_allow = false; atomic_set(&vsock->queued_replies, 0); vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT, VHOST_VSOCK_WEIGHT, true, NULL); file->private_data = vsock; skb_queue_head_init(&vsock->send_pkt_queue); vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); return 0; out: vhost_vsock_free(vsock); return ret; } static void vhost_vsock_flush(struct vhost_vsock *vsock) { vhost_dev_flush(&vsock->dev); } static void vhost_vsock_reset_orphans(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); /* vmci_transport.c doesn't take sk_lock here either. At least we're * under vsock_table_lock so the sock cannot disappear while we're * executing. */ rcu_read_lock(); /* If the peer is still valid, no need to reset connection */ if (vhost_vsock_get(vsk->remote_addr.svm_cid)) { rcu_read_unlock(); return; } rcu_read_unlock(); /* If the close timeout is pending, let it expire. This avoids races * with the timeout callback. */ if (vsk->close_work_scheduled) return; sock_set_flag(sk, SOCK_DONE); vsk->peer_shutdown = SHUTDOWN_MASK; sk->sk_state = SS_UNCONNECTED; sk->sk_err = ECONNRESET; sk_error_report(sk); } static int vhost_vsock_dev_release(struct inode *inode, struct file *file) { struct vhost_vsock *vsock = file->private_data; mutex_lock(&vhost_vsock_mutex); if (vsock->guest_cid) hash_del_rcu(&vsock->hash); mutex_unlock(&vhost_vsock_mutex); /* Wait for other CPUs to finish using vsock */ synchronize_rcu(); /* Iterating over all connections for all CIDs to find orphans is * inefficient. Room for improvement here. */ vsock_for_each_connected_socket(&vhost_transport.transport, vhost_vsock_reset_orphans); /* Don't check the owner, because we are in the release path, so we * need to stop the vsock device in any case. * vhost_vsock_stop() can not fail in this case, so we don't need to * check the return code. */ vhost_vsock_stop(vsock, false); vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); vhost_dev_cleanup(&vsock->dev); kfree(vsock->dev.vqs); vhost_vsock_free(vsock); return 0; } static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) { struct vhost_vsock *other; /* Refuse reserved CIDs */ if (guest_cid <= VMADDR_CID_HOST || guest_cid == U32_MAX) return -EINVAL; /* 64-bit CIDs are not yet supported */ if (guest_cid > U32_MAX) return -EINVAL; /* Refuse if CID is assigned to the guest->host transport (i.e. nested * VM), to make the loopback work. */ if (vsock_find_cid(guest_cid)) return -EADDRINUSE; /* Refuse if CID is already in use */ mutex_lock(&vhost_vsock_mutex); other = vhost_vsock_get(guest_cid); if (other && other != vsock) { mutex_unlock(&vhost_vsock_mutex); return -EADDRINUSE; } if (vsock->guest_cid) hash_del_rcu(&vsock->hash); vsock->guest_cid = guest_cid; hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); mutex_unlock(&vhost_vsock_mutex); return 0; } static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) { struct vhost_virtqueue *vq; int i; if (features & ~VHOST_VSOCK_FEATURES) return -EOPNOTSUPP; mutex_lock(&vsock->dev.mutex); if ((features & (1 << VHOST_F_LOG_ALL)) && !vhost_log_access_ok(&vsock->dev)) { goto err; } if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { if (vhost_init_device_iotlb(&vsock->dev)) goto err; } vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET); for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); vq->acked_features = features; mutex_unlock(&vq->mutex); } mutex_unlock(&vsock->dev.mutex); return 0; err: mutex_unlock(&vsock->dev.mutex); return -EFAULT; } static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { struct vhost_vsock *vsock = f->private_data; void __user *argp = (void __user *)arg; u64 guest_cid; u64 features; int start; int r; switch (ioctl) { case VHOST_VSOCK_SET_GUEST_CID: if (copy_from_user(&guest_cid, argp, sizeof(guest_cid))) return -EFAULT; return vhost_vsock_set_cid(vsock, guest_cid); case VHOST_VSOCK_SET_RUNNING: if (copy_from_user(&start, argp, sizeof(start))) return -EFAULT; if (start) return vhost_vsock_start(vsock); else return vhost_vsock_stop(vsock, true); case VHOST_GET_FEATURES: features = VHOST_VSOCK_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) return -EFAULT; return 0; case VHOST_SET_FEATURES: if (copy_from_user(&features, argp, sizeof(features))) return -EFAULT; return vhost_vsock_set_features(vsock, features); case VHOST_GET_BACKEND_FEATURES: features = VHOST_VSOCK_BACKEND_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) return -EFAULT; return 0; case VHOST_SET_BACKEND_FEATURES: if (copy_from_user(&features, argp, sizeof(features))) return -EFAULT; if (features & ~VHOST_VSOCK_BACKEND_FEATURES) return -EOPNOTSUPP; vhost_set_backend_features(&vsock->dev, features); return 0; default: mutex_lock(&vsock->dev.mutex); r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); if (r == -ENOIOCTLCMD) r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); else vhost_vsock_flush(vsock); mutex_unlock(&vsock->dev.mutex); return r; } } static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct vhost_vsock *vsock = file->private_data; struct vhost_dev *dev = &vsock->dev; int noblock = file->f_flags & O_NONBLOCK; return vhost_chr_read_iter(dev, to, noblock); } static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct vhost_vsock *vsock = file->private_data; struct vhost_dev *dev = &vsock->dev; return vhost_chr_write_iter(dev, from); } static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait) { struct vhost_vsock *vsock = file->private_data; struct vhost_dev *dev = &vsock->dev; return vhost_chr_poll(file, dev, wait); } static const struct file_operations vhost_vsock_fops = { .owner = THIS_MODULE, .open = vhost_vsock_dev_open, .release = vhost_vsock_dev_release, .llseek = noop_llseek, .unlocked_ioctl = vhost_vsock_dev_ioctl, .compat_ioctl = compat_ptr_ioctl, .read_iter = vhost_vsock_chr_read_iter, .write_iter = vhost_vsock_chr_write_iter, .poll = vhost_vsock_chr_poll, }; static struct miscdevice vhost_vsock_misc = { .minor = VHOST_VSOCK_MINOR, .name = "vhost-vsock", .fops = &vhost_vsock_fops, }; static int __init vhost_vsock_init(void) { int ret; ret = vsock_core_register(&vhost_transport.transport, VSOCK_TRANSPORT_F_H2G); if (ret < 0) return ret; ret = misc_register(&vhost_vsock_misc); if (ret) { vsock_core_unregister(&vhost_transport.transport); return ret; } return 0; }; static void __exit vhost_vsock_exit(void) { misc_deregister(&vhost_vsock_misc); vsock_core_unregister(&vhost_transport.transport); }; module_init(vhost_vsock_init); module_exit(vhost_vsock_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Asias He"); MODULE_DESCRIPTION("vhost transport for vsock "); MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR); MODULE_ALIAS("devname:vhost-vsock");
1 1 1 1 1 1 1 1 3 3 3 1 1 3 1 3 1 3 1 3 1 3 4 3 3 3 3 3 3 4 3 6 5 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 // SPDX-License-Identifier: GPL-2.0 /* XDP sockets monitoring support * * Copyright(c) 2019 Intel Corporation. * * Author: Björn Töpel <bjorn.topel@intel.com> */ #include <linux/module.h> #include <net/xdp_sock.h> #include <linux/xdp_diag.h> #include <linux/sock_diag.h> #include "xsk_queue.h" #include "xsk.h" static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb) { struct xdp_diag_info di = {}; di.ifindex = xs->dev ? xs->dev->ifindex : 0; di.queue_id = xs->queue_id; return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di); } static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type, struct sk_buff *nlskb) { struct xdp_diag_ring dr = {}; dr.entries = queue->nentries; return nla_put(nlskb, nl_type, sizeof(dr), &dr); } static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs, struct sk_buff *nlskb) { int err = 0; if (xs->rx) err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb); if (!err && xs->tx) err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb); return err; } static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) { struct xsk_buff_pool *pool = xs->pool; struct xdp_umem *umem = xs->umem; struct xdp_diag_umem du = {}; int err; if (!umem) return 0; du.id = umem->id; du.size = umem->size; du.num_pages = umem->npgs; du.chunk_size = umem->chunk_size; du.headroom = umem->headroom; du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0; du.queue_id = pool ? pool->queue_id : 0; du.flags = 0; if (umem->zc) du.flags |= XDP_DU_F_ZEROCOPY; du.refs = refcount_read(&umem->users); err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du); if (!err && pool && pool->fq) err = xsk_diag_put_ring(pool->fq, XDP_DIAG_UMEM_FILL_RING, nlskb); if (!err && pool && pool->cq) err = xsk_diag_put_ring(pool->cq, XDP_DIAG_UMEM_COMPLETION_RING, nlskb); return err; } static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) { struct xdp_diag_stats du = {}; du.n_rx_dropped = xs->rx_dropped; du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); du.n_rx_full = xs->rx_queue_full; du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); } static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, struct xdp_diag_req *req, struct user_namespace *user_ns, u32 portid, u32 seq, u32 flags, int sk_ino) { struct xdp_sock *xs = xdp_sk(sk); struct xdp_diag_msg *msg; struct nlmsghdr *nlh; nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg), flags); if (!nlh) return -EMSGSIZE; msg = nlmsg_data(nlh); memset(msg, 0, sizeof(*msg)); msg->xdiag_family = AF_XDP; msg->xdiag_type = sk->sk_type; msg->xdiag_ino = sk_ino; sock_diag_save_cookie(sk, msg->xdiag_cookie); mutex_lock(&xs->mutex); if (READ_ONCE(xs->state) == XSK_UNBOUND) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_INFO) && nla_put_u32(nlskb, XDP_DIAG_UID, from_kuid_munged(user_ns, sk_uid(sk)))) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_RING_CFG) && xsk_diag_put_rings_cfg(xs, nlskb)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_UMEM) && xsk_diag_put_umem(xs, nlskb)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_MEMINFO) && sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_STATS) && xsk_diag_put_stats(xs, nlskb)) goto out_nlmsg_trim; mutex_unlock(&xs->mutex); nlmsg_end(nlskb, nlh); return 0; out_nlmsg_trim: mutex_unlock(&xs->mutex); nlmsg_cancel(nlskb, nlh); return -EMSGSIZE; } static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb) { struct xdp_diag_req *req = nlmsg_data(cb->nlh); struct net *net = sock_net(nlskb->sk); int num = 0, s_num = cb->args[0]; struct sock *sk; mutex_lock(&net->xdp.lock); sk_for_each(sk, &net->xdp.list) { if (!net_eq(sock_net(sk), net)) continue; if (num++ < s_num) continue; if (xsk_diag_fill(sk, nlskb, req, sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, sock_i_ino(sk)) < 0) { num--; break; } } mutex_unlock(&net->xdp.lock); cb->args[0] = num; return nlskb->len; } static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr) { struct netlink_dump_control c = { .dump = xsk_diag_dump }; int hdrlen = sizeof(struct xdp_diag_req); struct net *net = sock_net(nlskb->sk); if (nlmsg_len(hdr) < hdrlen) return -EINVAL; if (!(hdr->nlmsg_flags & NLM_F_DUMP)) return -EOPNOTSUPP; return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c); } static const struct sock_diag_handler xsk_diag_handler = { .owner = THIS_MODULE, .family = AF_XDP, .dump = xsk_diag_handler_dump, }; static int __init xsk_diag_init(void) { return sock_diag_register(&xsk_diag_handler); } static void __exit xsk_diag_exit(void) { sock_diag_unregister(&xsk_diag_handler); } module_init(xsk_diag_init); module_exit(xsk_diag_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XDP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);
5 2 775 352 352 352 1110 1111 341 775 772 1110 1104 1108 7 8 317 60 58 59 341 497 497 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 // SPDX-License-Identifier: GPL-2.0-only /* * fs/anon_inodes.c * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * * Thanks to Arnd Bergmann for code review and suggestions. * More changes for Thomas Gleixner suggestions. * */ #include <linux/cred.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/magic.h> #include <linux/anon_inodes.h> #include <linux/pseudo_fs.h> #include <linux/uaccess.h> #include "internal.h" static struct vfsmount *anon_inode_mnt __ro_after_init; static struct inode *anon_inode_inode __ro_after_init; /* * User space expects anonymous inodes to have no file type in st_mode. * * In particular, 'lsof' has this legacy logic: * * type = s->st_mode & S_IFMT; * switch (type) { * ... * case 0: * if (!strcmp(p, "anon_inode")) * Lf->ntype = Ntype = N_ANON_INODE; * * to detect our old anon_inode logic. * * Rather than mess with our internal sane inode data, just fix it * up here in getattr() by masking off the format bits. */ int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); stat->mode &= ~S_IFMT; return 0; } int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { return -EOPNOTSUPP; } static const struct inode_operations anon_inode_operations = { .getattr = anon_inode_getattr, .setattr = anon_inode_setattr, }; /* * anon_inodefs_dname() is called from d_path(). */ static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(buffer, buflen, "anon_inode:%s", dentry->d_name.name); } static const struct dentry_operations anon_inodefs_dentry_operations = { .d_dname = anon_inodefs_dname, }; static int anon_inodefs_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, ANON_INODE_FS_MAGIC); if (!ctx) return -ENOMEM; fc->s_iflags |= SB_I_NOEXEC; fc->s_iflags |= SB_I_NODEV; ctx->dops = &anon_inodefs_dentry_operations; return 0; } static struct file_system_type anon_inode_fs_type = { .name = "anon_inodefs", .init_fs_context = anon_inodefs_init_fs_context, .kill_sb = kill_anon_super, }; /** * anon_inode_make_secure_inode - allocate an anonymous inode with security context * @sb: [in] Superblock to allocate from * @name: [in] Name of the class of the newfile (e.g., "secretmem") * @context_inode: * [in] Optional parent inode for security inheritance * * The function ensures proper security initialization through the LSM hook * security_inode_init_security_anon(). * * Return: Pointer to new inode on success, ERR_PTR on failure. */ struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name, const struct inode *context_inode) { struct inode *inode; int error; inode = alloc_anon_inode(sb); if (IS_ERR(inode)) return inode; inode->i_flags &= ~S_PRIVATE; inode->i_op = &anon_inode_operations; error = security_inode_init_security_anon(inode, &QSTR(name), context_inode); if (error) { iput(inode); return ERR_PTR(error); } return inode; } EXPORT_SYMBOL_FOR_MODULES(anon_inode_make_secure_inode, "kvm"); static struct file *__anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode, bool make_inode) { struct inode *inode; struct file *file; if (fops->owner && !try_module_get(fops->owner)) return ERR_PTR(-ENOENT); if (make_inode) { inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb, name, context_inode); if (IS_ERR(inode)) { file = ERR_CAST(inode); goto err; } } else { inode = anon_inode_inode; if (IS_ERR(inode)) { file = ERR_PTR(-ENODEV); goto err; } /* * We know the anon_inode inode count is always * greater than zero, so ihold() is safe. */ ihold(inode); } file = alloc_file_pseudo(inode, anon_inode_mnt, name, flags & (O_ACCMODE | O_NONBLOCK), fops); if (IS_ERR(file)) goto err_iput; file->f_mapping = inode->i_mapping; file->private_data = priv; return file; err_iput: iput(inode); err: module_put(fops->owner); return file; } /** * anon_inode_getfile - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry * setup. Returns the newly created file* or an error pointer. */ struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags) { return __anon_inode_getfile(name, fops, priv, flags, NULL, false); } EXPORT_SYMBOL_GPL(anon_inode_getfile); /** * anon_inode_getfile_fmode - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @f_mode: [in] fmode * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry * setup. Allows setting the fmode. Returns the newly created file* or an error * pointer. */ struct file *anon_inode_getfile_fmode(const char *name, const struct file_operations *fops, void *priv, int flags, fmode_t f_mode) { struct file *file; file = __anon_inode_getfile(name, fops, priv, flags, NULL, false); if (!IS_ERR(file)) file->f_mode |= f_mode; return file; } EXPORT_SYMBOL_GPL(anon_inode_getfile_fmode); /** * anon_inode_create_getfile - Like anon_inode_getfile(), but creates a new * !S_PRIVATE anon inode rather than reuse the * singleton anon inode and calls the * inode_init_security_anon() LSM hook. * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @context_inode: * [in] the logical relationship with the new inode (optional) * * Create a new anonymous inode and file pair. This can be done for two * reasons: * * - for the inode to have its own security context, so that LSMs can enforce * policy on the inode's creation; * * - if the caller needs a unique inode, for example in order to customize * the size returned by fstat() * * The LSM may use @context_inode in inode_init_security_anon(), but a * reference to it is not held. * * Returns the newly created file* or an error pointer. */ struct file *anon_inode_create_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode) { return __anon_inode_getfile(name, fops, priv, flags, context_inode, true); } EXPORT_SYMBOL_GPL(anon_inode_create_getfile); static int __anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode, bool make_inode) { return FD_ADD(flags, __anon_inode_getfile(name, fops, priv, flags, context_inode, make_inode)); } /** * anon_inode_getfd - creates a new file instance by hooking it up to * an anonymous inode and a dentry that describe * the "class" of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * * Creates a new file by hooking it on a single inode. This is * useful for files that do not need to have a full-fledged inode in * order to operate correctly. All the files created with * anon_inode_getfd() will use the same singleton inode, reducing * memory use and avoiding code duplication for the file/inode/dentry * setup. Returns a newly created file descriptor or an error code. */ int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags) { return __anon_inode_getfd(name, fops, priv, flags, NULL, false); } EXPORT_SYMBOL_GPL(anon_inode_getfd); /** * anon_inode_create_getfd - Like anon_inode_getfd(), but creates a new * !S_PRIVATE anon inode rather than reuse the singleton anon inode, and calls * the inode_init_security_anon() LSM hook. * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @context_inode: * [in] the logical relationship with the new inode (optional) * * Create a new anonymous inode and file pair. This can be done for two * reasons: * * - for the inode to have its own security context, so that LSMs can enforce * policy on the inode's creation; * * - if the caller needs a unique inode, for example in order to customize * the size returned by fstat() * * The LSM may use @context_inode in inode_init_security_anon(), but a * reference to it is not held. * * Returns a newly created file descriptor or an error code. */ int anon_inode_create_getfd(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode) { return __anon_inode_getfd(name, fops, priv, flags, context_inode, true); } static int __init anon_inode_init(void) { anon_inode_mnt = kern_mount(&anon_inode_fs_type); if (IS_ERR(anon_inode_mnt)) panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt)); anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); if (IS_ERR(anon_inode_inode)) panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode)); anon_inode_inode->i_op = &anon_inode_operations; return 0; } fs_initcall(anon_inode_init);
5 5 1 1 1 1 1 3 3 2 2 2 4 3 1 2 1 1 1 1 1 1 12 12 1 1 1 3 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */ #include <linux/file.h> #include <linux/interval_tree.h> #include <linux/iommu.h> #include <linux/iommufd.h> #include <linux/slab.h> #include <linux/vfio.h> #include <uapi/linux/vfio.h> #include <uapi/linux/iommufd.h> #include "iommufd_private.h" static struct iommufd_ioas *get_compat_ioas(struct iommufd_ctx *ictx) { struct iommufd_ioas *ioas = ERR_PTR(-ENODEV); xa_lock(&ictx->objects); if (!ictx->vfio_ioas || !iommufd_lock_obj(&ictx->vfio_ioas->obj)) goto out_unlock; ioas = ictx->vfio_ioas; out_unlock: xa_unlock(&ictx->objects); return ioas; } /** * iommufd_vfio_compat_ioas_get_id - Ensure a compat IOAS exists * @ictx: Context to operate on * @out_ioas_id: The IOAS ID of the compatibility IOAS * * Return the ID of the current compatibility IOAS. The ID can be passed into * other functions that take an ioas_id. */ int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id) { struct iommufd_ioas *ioas; ioas = get_compat_ioas(ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); *out_ioas_id = ioas->obj.id; iommufd_put_object(ictx, &ioas->obj); return 0; } EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_get_id, "IOMMUFD_VFIO"); /** * iommufd_vfio_compat_set_no_iommu - Called when a no-iommu device is attached * @ictx: Context to operate on * * This allows selecting the VFIO_NOIOMMU_IOMMU and blocks normal types. */ int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx) { int ret; xa_lock(&ictx->objects); if (!ictx->vfio_ioas) { ictx->no_iommu_mode = 1; ret = 0; } else { ret = -EINVAL; } xa_unlock(&ictx->objects); return ret; } EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_set_no_iommu, "IOMMUFD_VFIO"); /** * iommufd_vfio_compat_ioas_create - Ensure the compat IOAS is created * @ictx: Context to operate on * * The compatibility IOAS is the IOAS that the vfio compatibility ioctls operate * on since they do not have an IOAS ID input in their ABI. Only attaching a * group should cause a default creation of the internal ioas, this does nothing * if an existing ioas has already been assigned somehow. */ int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx) { struct iommufd_ioas *ioas = NULL; int ret; ioas = iommufd_ioas_alloc(ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); xa_lock(&ictx->objects); /* * VFIO won't allow attaching a container to both iommu and no iommu * operation */ if (ictx->no_iommu_mode) { ret = -EINVAL; goto out_abort; } if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj)) { ret = 0; iommufd_put_object(ictx, &ictx->vfio_ioas->obj); goto out_abort; } ictx->vfio_ioas = ioas; xa_unlock(&ictx->objects); /* * An automatically created compat IOAS is treated as a userspace * created object. Userspace can learn the ID via IOMMU_VFIO_IOAS_GET, * and if not manually destroyed it will be destroyed automatically * at iommufd release. */ iommufd_object_finalize(ictx, &ioas->obj); return 0; out_abort: xa_unlock(&ictx->objects); iommufd_object_abort(ictx, &ioas->obj); return ret; } EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_create, "IOMMUFD_VFIO"); int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd) { struct iommu_vfio_ioas *cmd = ucmd->cmd; struct iommufd_ioas *ioas; if (cmd->__reserved) return -EOPNOTSUPP; switch (cmd->op) { case IOMMU_VFIO_IOAS_GET: ioas = get_compat_ioas(ucmd->ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); cmd->ioas_id = ioas->obj.id; iommufd_put_object(ucmd->ictx, &ioas->obj); return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); case IOMMU_VFIO_IOAS_SET: ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); if (IS_ERR(ioas)) return PTR_ERR(ioas); xa_lock(&ucmd->ictx->objects); ucmd->ictx->vfio_ioas = ioas; xa_unlock(&ucmd->ictx->objects); iommufd_put_object(ucmd->ictx, &ioas->obj); return 0; case IOMMU_VFIO_IOAS_CLEAR: xa_lock(&ucmd->ictx->objects); ucmd->ictx->vfio_ioas = NULL; xa_unlock(&ucmd->ictx->objects); return 0; default: return -EOPNOTSUPP; } } static int iommufd_vfio_map_dma(struct iommufd_ctx *ictx, unsigned int cmd, void __user *arg) { u32 supported_flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; size_t minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); struct vfio_iommu_type1_dma_map map; int iommu_prot = IOMMU_CACHE; struct iommufd_ioas *ioas; unsigned long iova; int rc; if (copy_from_user(&map, arg, minsz)) return -EFAULT; if (map.argsz < minsz || map.flags & ~supported_flags) return -EINVAL; if (map.flags & VFIO_DMA_MAP_FLAG_READ) iommu_prot |= IOMMU_READ; if (map.flags & VFIO_DMA_MAP_FLAG_WRITE) iommu_prot |= IOMMU_WRITE; ioas = get_compat_ioas(ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); /* * Maps created through the legacy interface always use VFIO compatible * rlimit accounting. If the user wishes to use the faster user based * rlimit accounting then they must use the new interface. */ iova = map.iova; rc = iopt_map_user_pages(ictx, &ioas->iopt, &iova, u64_to_user_ptr(map.vaddr), map.size, iommu_prot, 0); iommufd_put_object(ictx, &ioas->obj); return rc; } static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd, void __user *arg) { size_t minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); /* * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP is obsoleted by the new * dirty tracking direction: * https://lore.kernel.org/kvm/20220731125503.142683-1-yishaih@nvidia.com/ * https://lore.kernel.org/kvm/20220428210933.3583-1-joao.m.martins@oracle.com/ */ u32 supported_flags = VFIO_DMA_UNMAP_FLAG_ALL; struct vfio_iommu_type1_dma_unmap unmap; unsigned long unmapped = 0; struct iommufd_ioas *ioas; int rc; if (copy_from_user(&unmap, arg, minsz)) return -EFAULT; if (unmap.argsz < minsz || unmap.flags & ~supported_flags) return -EINVAL; ioas = get_compat_ioas(ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); if (unmap.flags & VFIO_DMA_UNMAP_FLAG_ALL) { if (unmap.iova != 0 || unmap.size != 0) { rc = -EINVAL; goto err_put; } rc = iopt_unmap_all(&ioas->iopt, &unmapped); } else { if (READ_ONCE(ioas->iopt.disable_large_pages)) { /* * Create cuts at the start and last of the requested * range. If the start IOVA is 0 then it doesn't need to * be cut. */ unsigned long iovas[] = { unmap.iova + unmap.size - 1, unmap.iova - 1 }; rc = iopt_cut_iova(&ioas->iopt, iovas, unmap.iova ? 2 : 1); if (rc) goto err_put; } rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size, &unmapped); } unmap.size = unmapped; if (copy_to_user(arg, &unmap, minsz)) rc = -EFAULT; err_put: iommufd_put_object(ictx, &ioas->obj); return rc; } static int iommufd_vfio_cc_iommu(struct iommufd_ctx *ictx) { struct iommufd_hwpt_paging *hwpt_paging; struct iommufd_ioas *ioas; int rc = 1; ioas = get_compat_ioas(ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); mutex_lock(&ioas->mutex); list_for_each_entry(hwpt_paging, &ioas->hwpt_list, hwpt_item) { if (!hwpt_paging->enforce_cache_coherency) { rc = 0; break; } } mutex_unlock(&ioas->mutex); iommufd_put_object(ictx, &ioas->obj); return rc; } static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx, unsigned long type) { switch (type) { case VFIO_TYPE1_IOMMU: case VFIO_TYPE1v2_IOMMU: case VFIO_UNMAP_ALL: return 1; case VFIO_NOIOMMU_IOMMU: return IS_ENABLED(CONFIG_VFIO_NOIOMMU); case VFIO_DMA_CC_IOMMU: return iommufd_vfio_cc_iommu(ictx); case __VFIO_RESERVED_TYPE1_NESTING_IOMMU: return 0; /* * VFIO_DMA_MAP_FLAG_VADDR * https://lore.kernel.org/kvm/1611939252-7240-1-git-send-email-steven.sistare@oracle.com/ * https://lore.kernel.org/all/Yz777bJZjTyLrHEQ@nvidia.com/ * * It is hard to see how this could be implemented safely. */ case VFIO_UPDATE_VADDR: default: return 0; } } static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type) { bool no_iommu_mode = READ_ONCE(ictx->no_iommu_mode); struct iommufd_ioas *ioas = NULL; int rc = 0; /* * Emulation for NOIOMMU is imperfect in that VFIO blocks almost all * other ioctls. We let them keep working but they mostly fail since no * IOAS should exist. */ if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) && type == VFIO_NOIOMMU_IOMMU && no_iommu_mode) { if (!capable(CAP_SYS_RAWIO)) return -EPERM; return 0; } if ((type != VFIO_TYPE1_IOMMU && type != VFIO_TYPE1v2_IOMMU) || no_iommu_mode) return -EINVAL; /* VFIO fails the set_iommu if there is no group */ ioas = get_compat_ioas(ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); /* * The difference between TYPE1 and TYPE1v2 is the ability to unmap in * the middle of mapped ranges. This is complicated by huge page support * which creates single large IOPTEs that cannot be split by the iommu * driver. TYPE1 is very old at this point and likely nothing uses it, * however it is simple enough to emulate by simply disabling the * problematic large IOPTEs. Then we can safely unmap within any range. */ if (type == VFIO_TYPE1_IOMMU) rc = iopt_disable_large_pages(&ioas->iopt); iommufd_put_object(ictx, &ioas->obj); return rc; } static unsigned long iommufd_get_pagesizes(struct iommufd_ioas *ioas) { struct io_pagetable *iopt = &ioas->iopt; unsigned long pgsize_bitmap = ULONG_MAX; struct iommu_domain *domain; unsigned long index; down_read(&iopt->domains_rwsem); xa_for_each(&iopt->domains, index, domain) pgsize_bitmap &= domain->pgsize_bitmap; /* See vfio_update_pgsize_bitmap() */ if (pgsize_bitmap & ~PAGE_MASK) { pgsize_bitmap &= PAGE_MASK; pgsize_bitmap |= PAGE_SIZE; } pgsize_bitmap = max(pgsize_bitmap, ioas->iopt.iova_alignment); up_read(&iopt->domains_rwsem); return pgsize_bitmap; } static int iommufd_fill_cap_iova(struct iommufd_ioas *ioas, struct vfio_info_cap_header __user *cur, size_t avail) { struct vfio_iommu_type1_info_cap_iova_range __user *ucap_iovas = container_of(cur, struct vfio_iommu_type1_info_cap_iova_range __user, header); struct vfio_iommu_type1_info_cap_iova_range cap_iovas = { .header = { .id = VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, .version = 1, }, }; struct interval_tree_span_iter span; interval_tree_for_each_span(&span, &ioas->iopt.reserved_itree, 0, ULONG_MAX) { struct vfio_iova_range range; if (!span.is_hole) continue; range.start = span.start_hole; range.end = span.last_hole; if (avail >= struct_size(&cap_iovas, iova_ranges, cap_iovas.nr_iovas + 1) && copy_to_user(&ucap_iovas->iova_ranges[cap_iovas.nr_iovas], &range, sizeof(range))) return -EFAULT; cap_iovas.nr_iovas++; } if (avail >= struct_size(&cap_iovas, iova_ranges, cap_iovas.nr_iovas) && copy_to_user(ucap_iovas, &cap_iovas, sizeof(cap_iovas))) return -EFAULT; return struct_size(&cap_iovas, iova_ranges, cap_iovas.nr_iovas); } static int iommufd_fill_cap_dma_avail(struct iommufd_ioas *ioas, struct vfio_info_cap_header __user *cur, size_t avail) { struct vfio_iommu_type1_info_dma_avail cap_dma = { .header = { .id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL, .version = 1, }, /* * iommufd's limit is based on the cgroup's memory limit. * Normally vfio would return U16_MAX here, and provide a module * parameter to adjust it. Since S390 qemu userspace actually * pays attention and needs a value bigger than U16_MAX return * U32_MAX. */ .avail = U32_MAX, }; if (avail >= sizeof(cap_dma) && copy_to_user(cur, &cap_dma, sizeof(cap_dma))) return -EFAULT; return sizeof(cap_dma); } static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx, void __user *arg) { typedef int (*fill_cap_fn)(struct iommufd_ioas *ioas, struct vfio_info_cap_header __user *cur, size_t avail); static const fill_cap_fn fill_fns[] = { iommufd_fill_cap_dma_avail, iommufd_fill_cap_iova, }; size_t minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); struct vfio_info_cap_header __user *last_cap = NULL; struct vfio_iommu_type1_info info = {}; struct iommufd_ioas *ioas; size_t total_cap_size; int rc; int i; if (copy_from_user(&info, arg, minsz)) return -EFAULT; if (info.argsz < minsz) return -EINVAL; minsz = min_t(size_t, info.argsz, sizeof(info)); ioas = get_compat_ioas(ictx); if (IS_ERR(ioas)) return PTR_ERR(ioas); info.flags = VFIO_IOMMU_INFO_PGSIZES; info.iova_pgsizes = iommufd_get_pagesizes(ioas); info.cap_offset = 0; down_read(&ioas->iopt.iova_rwsem); total_cap_size = sizeof(info); for (i = 0; i != ARRAY_SIZE(fill_fns); i++) { int cap_size; if (info.argsz > total_cap_size) cap_size = fill_fns[i](ioas, arg + total_cap_size, info.argsz - total_cap_size); else cap_size = fill_fns[i](ioas, NULL, 0); if (cap_size < 0) { rc = cap_size; goto out_put; } cap_size = ALIGN(cap_size, sizeof(u64)); if (last_cap && info.argsz >= total_cap_size && put_user(total_cap_size, &last_cap->next)) { rc = -EFAULT; goto out_put; } last_cap = arg + total_cap_size; total_cap_size += cap_size; } /* * If the user did not provide enough space then only some caps are * returned and the argsz will be updated to the correct amount to get * all caps. */ if (info.argsz >= total_cap_size) info.cap_offset = sizeof(info); info.argsz = total_cap_size; info.flags |= VFIO_IOMMU_INFO_CAPS; if (copy_to_user(arg, &info, minsz)) { rc = -EFAULT; goto out_put; } rc = 0; out_put: up_read(&ioas->iopt.iova_rwsem); iommufd_put_object(ictx, &ioas->obj); return rc; } int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd, unsigned long arg) { void __user *uarg = (void __user *)arg; switch (cmd) { case VFIO_GET_API_VERSION: return VFIO_API_VERSION; case VFIO_SET_IOMMU: return iommufd_vfio_set_iommu(ictx, arg); case VFIO_CHECK_EXTENSION: return iommufd_vfio_check_extension(ictx, arg); case VFIO_IOMMU_GET_INFO: return iommufd_vfio_iommu_get_info(ictx, uarg); case VFIO_IOMMU_MAP_DMA: return iommufd_vfio_map_dma(ictx, cmd, uarg); case VFIO_IOMMU_UNMAP_DMA: return iommufd_vfio_unmap_dma(ictx, cmd, uarg); case VFIO_IOMMU_DIRTY_PAGES: default: return -ENOIOCTLCMD; } return -ENOIOCTLCMD; }
15 239 190 190 189 12 190 205 2 215 190 12 1 3 3 3 1 4 885 316 16 174 47 209 435 440 447 445 2 1225 490 342 442 380 416 849 8 148 66 656 95 89 82 61 8 53 92 325 43 274 274 274 253 22 203 76 135 133 22 16 160 172 1 1 165 165 1 6 34 103 103 6 3 2 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> */ #ifndef _NET_IPV6_H #define _NET_IPV6_H #include <linux/ipv6.h> #include <linux/hardirq.h> #include <linux/jhash.h> #include <linux/refcount.h> #include <linux/jump_label_ratelimit.h> #include <net/if_inet6.h> #include <net/flow.h> #include <net/flow_dissector.h> #include <net/inet_dscp.h> #include <net/snmp.h> #include <net/netns/hash.h> struct ip_tunnel_info; #define SIN6_LEN_RFC2133 24 #define IPV6_MAXPLEN 65535 /* * NextHeader field of IPv6 header */ #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */ #define NEXTHDR_IPV4 4 /* IPv4 in IPv6 */ #define NEXTHDR_TCP 6 /* TCP segment. */ #define NEXTHDR_UDP 17 /* UDP message. */ #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ #define NEXTHDR_ROUTING 43 /* Routing header. */ #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */ #define NEXTHDR_GRE 47 /* GRE header. */ #define NEXTHDR_ESP 50 /* Encapsulating security payload. */ #define NEXTHDR_AUTH 51 /* Authentication header. */ #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ #define NEXTHDR_NONE 59 /* No next header */ #define NEXTHDR_DEST 60 /* Destination options header. */ #define NEXTHDR_SCTP 132 /* SCTP message. */ #define NEXTHDR_MOBILITY 135 /* Mobility header. */ #define NEXTHDR_MAX 255 #define IPV6_DEFAULT_HOPLIMIT 64 #define IPV6_DEFAULT_MCASTHOPS 1 /* Limits on Hop-by-Hop and Destination options. * * Per RFC8200 there is no limit on the maximum number or lengths of options in * Hop-by-Hop or Destination options other then the packet must fit in an MTU. * We allow configurable limits in order to mitigate potential denial of * service attacks. * * There are three limits that may be set: * - Limit the number of options in a Hop-by-Hop or Destination options * extension header * - Limit the byte length of a Hop-by-Hop or Destination options extension * header * - Disallow unknown options * * The limits are expressed in corresponding sysctls: * * ipv6.sysctl.max_dst_opts_cnt * ipv6.sysctl.max_hbh_opts_cnt * ipv6.sysctl.max_dst_opts_len * ipv6.sysctl.max_hbh_opts_len * * max_*_opts_cnt is the number of TLVs that are allowed for Destination * options or Hop-by-Hop options. If the number is less than zero then unknown * TLVs are disallowed and the number of known options that are allowed is the * absolute value. Setting the value to INT_MAX indicates no limit. * * max_*_opts_len is the length limit in bytes of a Destination or * Hop-by-Hop options extension header. Setting the value to INT_MAX * indicates no length limit. * * If a limit is exceeded when processing an extension header the packet is * silently discarded. */ /* Default limits for Hop-by-Hop and Destination options */ #define IP6_DEFAULT_MAX_DST_OPTS_CNT 8 #define IP6_DEFAULT_MAX_HBH_OPTS_CNT 8 #define IP6_DEFAULT_MAX_DST_OPTS_LEN INT_MAX /* No limit */ #define IP6_DEFAULT_MAX_HBH_OPTS_LEN INT_MAX /* No limit */ /* * Addr type * * type - unicast | multicast * scope - local | site | global * v4 - compat * v4mapped * any * loopback */ #define IPV6_ADDR_ANY 0x0000U #define IPV6_ADDR_UNICAST 0x0001U #define IPV6_ADDR_MULTICAST 0x0002U #define IPV6_ADDR_LOOPBACK 0x0010U #define IPV6_ADDR_LINKLOCAL 0x0020U #define IPV6_ADDR_SITELOCAL 0x0040U #define IPV6_ADDR_COMPATv4 0x0080U #define IPV6_ADDR_SCOPE_MASK 0x00f0U #define IPV6_ADDR_MAPPED 0x1000U /* * Addr scopes */ #define IPV6_ADDR_MC_SCOPE(a) \ ((a)->s6_addr[1] & 0x0f) /* nonstandard */ #define __IPV6_ADDR_SCOPE_INVALID -1 #define IPV6_ADDR_SCOPE_NODELOCAL 0x01 #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 #define IPV6_ADDR_SCOPE_SITELOCAL 0x05 #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 #define IPV6_ADDR_SCOPE_GLOBAL 0x0e /* * Addr flags */ #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ ((a)->s6_addr[1] & 0x10) #define IPV6_ADDR_MC_FLAG_PREFIX(a) \ ((a)->s6_addr[1] & 0x20) #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ ((a)->s6_addr[1] & 0x40) /* * fragmentation header */ struct frag_hdr { __u8 nexthdr; __u8 reserved; __be16 frag_off; __be32 identification; }; /* * Jumbo payload option, as described in RFC 2675 2. */ struct hop_jumbo_hdr { u8 nexthdr; u8 hdrlen; u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */ u8 tlv_len; /* 4 */ __be32 jumbo_payload_len; }; #define IP6_MF 0x0001 #define IP6_OFFSET 0xFFF8 struct ip6_fraglist_iter { struct ipv6hdr *tmp_hdr; struct sk_buff *frag; int offset; unsigned int hlen; __be32 frag_id; u8 nexthdr; }; int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr, u8 nexthdr, __be32 frag_id, struct ip6_fraglist_iter *iter); void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter); static inline struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter) { struct sk_buff *skb = iter->frag; iter->frag = skb->next; skb_mark_not_on_list(skb); return skb; } struct ip6_frag_state { u8 *prevhdr; unsigned int hlen; unsigned int mtu; unsigned int left; int offset; int ptr; int hroom; int troom; __be32 frag_id; u8 nexthdr; }; void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu, unsigned short needed_tailroom, int hdr_room, u8 *prevhdr, u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state); struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state); #define IP6_REPLY_MARK(net, mark) \ ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0) #include <net/sock.h> /* sysctls */ extern int sysctl_mld_max_msf; extern int sysctl_mld_qrv; #define _DEVINC(net, statname, mod, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\ mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\ }) /* per device counters are atomic_long_t */ #define _DEVINCATOMIC(net, statname, mod, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\ }) /* per device and per net counters are atomic_long_t */ #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ }) #define _DEVADD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ unsigned long _field = (field); \ unsigned long _val = (val); \ if (likely(_idev != NULL)) \ mod##SNMP_ADD_STATS((_idev)->stats.statname, _field, _val); \ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, _field, _val);\ }) #define _DEVUPD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ unsigned long _val = (val); \ if (likely(_idev != NULL)) \ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, _val); \ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, _val);\ }) /* MIBs */ #define IP6_INC_STATS(net, idev,field) \ _DEVINC(net, ipv6, , idev, field) #define __IP6_INC_STATS(net, idev,field) \ _DEVINC(net, ipv6, __, idev, field) #define IP6_ADD_STATS(net, idev,field,val) \ _DEVADD(net, ipv6, , idev, field, val) #define __IP6_ADD_STATS(net, idev,field,val) \ _DEVADD(net, ipv6, __, idev, field, val) #define IP6_UPD_PO_STATS(net, idev,field,val) \ _DEVUPD(net, ipv6, , idev, field, val) #define __IP6_UPD_PO_STATS(net, idev,field,val) \ _DEVUPD(net, ipv6, __, idev, field, val) #define ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, , idev, field) #define __ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, __, idev, field) #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) #define ICMP6MSGIN_INC_STATS(net, idev, field) \ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) struct ip6_ra_chain { struct ip6_ra_chain *next; struct sock *sk; int sel; void (*destructor)(struct sock *); }; extern struct ip6_ra_chain *ip6_ra_chain; extern rwlock_t ip6_ra_lock; /* This structure is prepared by protocol, when parsing ancillary data and passed to IPv6. */ struct ipv6_txoptions { refcount_t refcnt; /* Length of this structure */ int tot_len; /* length of extension headers */ __u16 opt_flen; /* after fragment hdr */ __u16 opt_nflen; /* before fragment hdr */ struct ipv6_opt_hdr *hopopt; struct ipv6_opt_hdr *dst0opt; struct ipv6_rt_hdr *srcrt; /* Routing Header */ struct ipv6_opt_hdr *dst1opt; struct rcu_head rcu; /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ }; /* flowlabel_reflect sysctl values */ enum flowlabel_reflect { FLOWLABEL_REFLECT_ESTABLISHED = 1, FLOWLABEL_REFLECT_TCP_RESET = 2, FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, }; struct ip6_flowlabel { struct ip6_flowlabel __rcu *next; __be32 label; atomic_t users; struct in6_addr dst; struct ipv6_txoptions *opt; unsigned long linger; struct rcu_head rcu; u8 share; union { struct pid *pid; kuid_t uid; } owner; unsigned long lastuse; unsigned long expires; struct net *fl_net; }; #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) #define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000) #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) #define IPV6_TCLASS_SHIFT 20 struct ipv6_fl_socklist { struct ipv6_fl_socklist __rcu *next; struct ip6_flowlabel *fl; struct rcu_head rcu; }; struct ipcm6_cookie { struct sockcm_cookie sockc; __s16 hlimit; __s16 tclass; __u16 gso_size; __s8 dontfrag; struct ipv6_txoptions *opt; }; static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, const struct sock *sk) { *ipc6 = (struct ipcm6_cookie) { .hlimit = -1, .tclass = inet6_sk(sk)->tclass, .dontfrag = inet6_test_bit(DONTFRAG, sk), }; sockcm_init(&ipc6->sockc, sk); } static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) { struct ipv6_txoptions *opt; rcu_read_lock(); opt = rcu_dereference(np->opt); if (opt) { if (!refcount_inc_not_zero(&opt->refcnt)) opt = NULL; else opt = rcu_pointer_handoff(opt); } rcu_read_unlock(); return opt; } static inline void txopt_put(struct ipv6_txoptions *opt) { if (opt && refcount_dec_and_test(&opt->refcnt)) kfree_rcu(opt, rcu); } #if IS_ENABLED(CONFIG_IPV6) struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label); extern struct static_key_false_deferred ipv6_flowlabel_exclusive; static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) { if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) && READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl)) return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); return NULL; } #endif struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, struct ipv6_txoptions *fopt); void fl6_free_socklist(struct sock *sk); int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen); int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); void ip6_flowlabel_cleanup(void); bool ip6_autoflowlabel(struct net *net, const struct sock *sk); static inline void fl6_sock_release(struct ip6_flowlabel *fl) { if (fl) atomic_dec(&fl->users); } enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info); void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len); int ip6_ra_control(struct sock *sk, int sel); int ipv6_parse_hopopts(struct sk_buff *skb); struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt); struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, struct ipv6_opt_hdr *newopt); struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); static inline struct ipv6_txoptions * ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt) { if (!opt) return NULL; return __ipv6_fixup_options(opt_space, opt); } bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, const struct inet6_skb_parm *opt); struct ipv6_txoptions *ipv6_update_options(struct sock *sk, struct ipv6_txoptions *opt); /* This helper is specialized for BIG TCP needs. * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header. * It assumes headers are already in skb->head. * Returns: 0, or IPPROTO_TCP if a BIG TCP packet is there. */ static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb) { const struct hop_jumbo_hdr *jhdr; const struct ipv6hdr *nhdr; if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) return 0; if (skb->protocol != htons(ETH_P_IPV6)) return 0; if (skb_network_offset(skb) + sizeof(struct ipv6hdr) + sizeof(struct hop_jumbo_hdr) > skb_headlen(skb)) return 0; nhdr = ipv6_hdr(skb); if (nhdr->nexthdr != NEXTHDR_HOP) return 0; jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1); if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || jhdr->nexthdr != IPPROTO_TCP) return 0; return jhdr->nexthdr; } /* Return 0 if HBH header is successfully removed * Or if HBH removal is unnecessary (packet is not big TCP) * Return error to indicate dropping the packet */ static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb) { const int hophdr_len = sizeof(struct hop_jumbo_hdr); int nexthdr = ipv6_has_hopopt_jumbo(skb); struct ipv6hdr *h6; if (!nexthdr) return 0; if (skb_cow_head(skb, 0)) return -1; /* Remove the HBH header. * Layout: [Ethernet header][IPv6 header][HBH][L4 Header] */ memmove(skb_mac_header(skb) + hophdr_len, skb_mac_header(skb), skb_network_header(skb) - skb_mac_header(skb) + sizeof(struct ipv6hdr)); __skb_pull(skb, hophdr_len); skb->network_header += hophdr_len; skb->mac_header += hophdr_len; h6 = ipv6_hdr(skb); h6->nexthdr = nexthdr; return 0; } static inline bool ipv6_accept_ra(const struct inet6_dev *idev) { s32 accept_ra = READ_ONCE(idev->cnf.accept_ra); /* If forwarding is enabled, RA are not accepted unless the special * hybrid mode (accept_ra=2) is enabled. */ return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 : accept_ra; } #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */ #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */ #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */ int __ipv6_addr_type(const struct in6_addr *addr); static inline int ipv6_addr_type(const struct in6_addr *addr) { return __ipv6_addr_type(addr) & 0xffff; } static inline int ipv6_addr_scope(const struct in6_addr *addr) { return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK; } static inline int __ipv6_addr_src_scope(int type) { return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16); } static inline int ipv6_addr_src_scope(const struct in6_addr *addr) { return __ipv6_addr_src_scope(__ipv6_addr_type(addr)); } static inline bool __ipv6_addr_needs_scope_id(int type) { return type & IPV6_ADDR_LINKLOCAL || (type & IPV6_ADDR_MULTICAST && (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL))); } static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface) { return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0; } static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) { return memcmp(a1, a2, sizeof(struct in6_addr)); } static inline bool ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, const struct in6_addr *a2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul1 = (const unsigned long *)a1; const unsigned long *ulm = (const unsigned long *)m; const unsigned long *ul2 = (const unsigned long *)a2; return !!(((ul1[0] ^ ul2[0]) & ulm[0]) | ((ul1[1] ^ ul2[1]) & ulm[1])); #else return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); #endif } static inline void ipv6_addr_prefix(struct in6_addr *pfx, const struct in6_addr *addr, int plen) { /* caller must guarantee 0 <= plen <= 128 */ int o = plen >> 3, b = plen & 0x7; memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr)); memcpy(pfx->s6_addr, addr, o); if (b != 0) pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b); } static inline void ipv6_addr_prefix_copy(struct in6_addr *addr, const struct in6_addr *pfx, int plen) { /* caller must guarantee 0 <= plen <= 128 */ int o = plen >> 3, b = plen & 0x7; memcpy(addr->s6_addr, pfx, o); if (b != 0) { addr->s6_addr[o] &= ~(0xff00 >> b); addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b)); } } static inline void __ipv6_addr_set_half(__be32 *addr, __be32 wh, __be32 wl) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #if defined(__BIG_ENDIAN) if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) { *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl)); return; } #elif defined(__LITTLE_ENDIAN) if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) { *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh)); return; } #endif #endif addr[0] = wh; addr[1] = wl; } static inline void ipv6_addr_set(struct in6_addr *addr, __be32 w1, __be32 w2, __be32 w3, __be32 w4) { __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); } static inline bool ipv6_addr_equal(const struct in6_addr *a1, const struct in6_addr *a2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul1 = (const unsigned long *)a1; const unsigned long *ul2 = (const unsigned long *)a2; return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; #else return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; #endif } #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 static inline bool __ipv6_prefix_equal64_half(const __be64 *a1, const __be64 *a2, unsigned int len) { if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len)))) return false; return true; } static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, const struct in6_addr *addr2, unsigned int prefixlen) { const __be64 *a1 = (const __be64 *)addr1; const __be64 *a2 = (const __be64 *)addr2; if (prefixlen >= 64) { if (a1[0] ^ a2[0]) return false; return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64); } return __ipv6_prefix_equal64_half(a1, a2, prefixlen); } #else static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, const struct in6_addr *addr2, unsigned int prefixlen) { const __be32 *a1 = addr1->s6_addr32; const __be32 *a2 = addr2->s6_addr32; unsigned int pdw, pbi; /* check complete u32 in prefix */ pdw = prefixlen >> 5; if (pdw && memcmp(a1, a2, pdw << 2)) return false; /* check incomplete u32 in prefix */ pbi = prefixlen & 0x1f; if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) return false; return true; } #endif static inline bool ipv6_addr_any(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul = (const unsigned long *)a; return (ul[0] | ul[1]) == 0UL; #else return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | a->s6_addr32[3]) == 0; #endif } static inline u32 ipv6_addr_hash(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul = (const unsigned long *)a; unsigned long x = ul[0] ^ ul[1]; return (u32)(x ^ (x >> 32)); #else return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^ a->s6_addr32[2] ^ a->s6_addr32[3]); #endif } /* more secured version of ipv6_addr_hash() */ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) { return jhash2((__force const u32 *)a->s6_addr32, ARRAY_SIZE(a->s6_addr32), initval); } static inline bool ipv6_addr_loopback(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const __be64 *be = (const __be64 *)a; return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL; #else return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0; #endif } /* * Note that we must __force cast these to unsigned long to make sparse happy, * since all of the endian-annotated types are fixed size regardless of arch. */ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) { return ( #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 *(unsigned long *)a | #else (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) | #endif (__force unsigned long)(a->s6_addr32[2] ^ cpu_to_be32(0x0000ffff))) == 0UL; } static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a) { return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]); } static inline u32 ipv6_portaddr_hash(const struct net *net, const struct in6_addr *addr6, unsigned int port) { unsigned int hash, mix = net_hash_mix(net); if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; } /* * Check for a RFC 4843 ORCHID address * (Overlay Routable Cryptographic Hash Identifiers) */ static inline bool ipv6_addr_orchid(const struct in6_addr *a) { return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); } static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr) { return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); } static inline void ipv6_addr_set_v4mapped(const __be32 addr, struct in6_addr *v4mapped) { ipv6_addr_set(v4mapped, 0, 0, htonl(0x0000FFFF), addr); } /* * find the first different bit between two addresses * length of address must be a multiple of 32bits */ static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen) { const __be32 *a1 = token1, *a2 = token2; int i; addrlen >>= 2; for (i = 0; i < addrlen; i++) { __be32 xb = a1[i] ^ a2[i]; if (xb) return i * 32 + 31 - __fls(ntohl(xb)); } /* * we should *never* get to this point since that * would mean the addrs are equal * * However, we do get to it 8) And exactly, when * addresses are equal 8) * * ip route add 1111::/128 via ... * ip route add 1111::/64 via ... * and we are here. * * Ideally, this function should stop comparison * at prefix length. It does not, but it is still OK, * if returned value is greater than prefix length. * --ANK (980803) */ return addrlen << 5; } #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen) { const __be64 *a1 = token1, *a2 = token2; int i; addrlen >>= 3; for (i = 0; i < addrlen; i++) { __be64 xb = a1[i] ^ a2[i]; if (xb) return i * 64 + 63 - __fls(be64_to_cpu(xb)); } return addrlen << 6; } #endif static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 if (__builtin_constant_p(addrlen) && !(addrlen & 7)) return __ipv6_addr_diff64(token1, token2, addrlen); #endif return __ipv6_addr_diff32(token1, token2, addrlen); } static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) { return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } __be32 ipv6_select_ident(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr); __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, struct dst_entry *dst) { int hlimit; if (ipv6_addr_is_multicast(&fl6->daddr)) hlimit = READ_ONCE(np->mcast_hops); else hlimit = READ_ONCE(np->hop_limit); if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); return hlimit; } /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store * Equivalent to : flow->v6addrs.src = iph->saddr; * flow->v6addrs.dst = iph->daddr; */ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, const struct ipv6hdr *iph) { BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != offsetof(typeof(flow->addrs), v6addrs.src) + sizeof(flow->addrs.v6addrs.src)); memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs)); flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } #if IS_ENABLED(CONFIG_IPV6) static inline bool ipv6_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { return net->ipv6.sysctl.ip_nonlocal_bind || test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) || test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags); } /* Sysctl settings for net ipv6.auto_flowlabels */ #define IP6_AUTO_FLOW_LABEL_OFF 0 #define IP6_AUTO_FLOW_LABEL_OPTOUT 1 #define IP6_AUTO_FLOW_LABEL_OPTIN 2 #define IP6_AUTO_FLOW_LABEL_FORCED 3 #define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED #define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, __be32 flowlabel, bool autolabel, struct flowi6 *fl6) { u32 hash; /* @flowlabel may include more than a flow label, eg, the traffic class. * Here we want only the flow label value. */ flowlabel &= IPV6_FLOWLABEL_MASK; if (flowlabel || net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || (!autolabel && net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED)) return flowlabel; hash = skb_get_hash_flowi6(skb, fl6); /* Since this is being sent on the wire obfuscate hash a bit * to minimize possibility that any useful information to an * attacker is leaked. Only lower 20 bits are relevant. */ hash = rol32(hash, 16); flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; if (net->ipv6.sysctl.flowlabel_state_ranges) flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG; return flowlabel; } static inline int ip6_default_np_autolabel(struct net *net) { switch (net->ipv6.sysctl.auto_flowlabels) { case IP6_AUTO_FLOW_LABEL_OFF: case IP6_AUTO_FLOW_LABEL_OPTIN: default: return 0; case IP6_AUTO_FLOW_LABEL_OPTOUT: case IP6_AUTO_FLOW_LABEL_FORCED: return 1; } } #else static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, __be32 flowlabel, bool autolabel, struct flowi6 *fl6) { return flowlabel; } static inline int ip6_default_np_autolabel(struct net *net) { return 0; } #endif #if IS_ENABLED(CONFIG_IPV6) static inline int ip6_multipath_hash_policy(const struct net *net) { return net->ipv6.sysctl.multipath_hash_policy; } static inline u32 ip6_multipath_hash_fields(const struct net *net) { return net->ipv6.sysctl.multipath_hash_fields; } #else static inline int ip6_multipath_hash_policy(const struct net *net) { return 0; } static inline u32 ip6_multipath_hash_fields(const struct net *net) { return 0; } #endif /* * Header manipulation */ static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass, __be32 flowlabel) { *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel; } static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr) { return *(__be32 *)hdr & IPV6_FLOWINFO_MASK; } static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr) { return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK; } static inline u8 ip6_tclass(__be32 flowinfo) { return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT; } static inline dscp_t ip6_dscp(__be32 flowinfo) { return inet_dsfield_to_dscp(ip6_tclass(flowinfo)); } static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel) { return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel; } static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6) { return fl6->flowlabel & IPV6_FLOWLABEL_MASK; } /* * Prototypes exported by ipv6 */ /* * rcv function (called from netdevice level) */ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, struct net_device *orig_dev); int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); /* * upper-layer output functions */ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags); int ip6_push_pending_frames(struct sock *sk); void ip6_flush_pending_frames(struct sock *sk); int ip6_send_skb(struct sk_buff *skb); struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue, struct inet_cork_full *cork, struct inet6_cork *v6_cork); struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork); static inline struct sk_buff *ip6_finish_skb(struct sock *sk) { return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork, &inet6_sk(sk)->cork); } int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool connected); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *orig_dst); /* * skb processing functions */ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb); int ip6_forward(struct sk_buff *skb); int ip6_input(struct sk_buff *skb); int ip6_mc_input(struct sk_buff *skb); void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, bool have_final); int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); /* * Extension header (options) processing */ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto, struct in6_addr **daddr_p, struct in6_addr *saddr); void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto); int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, __be16 *frag_offp); bool ipv6_ext_hdr(u8 nexthdr); enum { IP6_FH_F_FRAG = (1 << 0), IP6_FH_F_AUTH = (1 << 1), IP6_FH_F_SKIP_RH = (1 << 2), }; /* find specified header and get offset to it */ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, unsigned short *fragoff, int *fragflg); int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type); struct in6_addr *fl6_update_dst(struct flowi6 *fl6, const struct ipv6_txoptions *opt, struct in6_addr *orig); /* * socket options (ipv6_sockglue.c) */ DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount); int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int do_ipv6_getsockopt(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int __ip6_datagram_connect(struct sock *sk, struct sockaddr_unsized *addr, int addr_len); int ip6_datagram_connect(struct sock *sk, struct sockaddr_unsized *addr, int addr_len); int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr_unsized *addr, int addr_len); int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr); void ip6_datagram_release_cb(struct sock *sk); int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, int *addr_len); void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); void inet6_cleanup_sock(struct sock *sk); void inet6_sock_destruct(struct sock *sk); int inet6_release(struct socket *sock); int inet6_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len); int inet6_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int peer); int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int inet6_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); /* * reassembly.c */ extern const struct proto_ops inet6_stream_ops; extern const struct proto_ops inet6_dgram_ops; extern const struct proto_ops inet6_sockraw_ops; struct group_source_req; struct group_filter; int ip6_mc_source(int add, int omode, struct sock *sk, struct group_source_req *pgsr); int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf, struct sockaddr_storage *list); int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, sockptr_t optval, size_t ss_offset); #ifdef CONFIG_PROC_FS int ac6_proc_init(struct net *net); void ac6_proc_exit(struct net *net); int raw6_proc_init(void); void raw6_proc_exit(void); int tcp6_proc_init(struct net *net); void tcp6_proc_exit(struct net *net); int udp6_proc_init(struct net *net); void udp6_proc_exit(struct net *net); int udplite6_proc_init(void); void udplite6_proc_exit(void); int ipv6_misc_proc_init(void); void ipv6_misc_proc_exit(void); int snmp6_register_dev(struct inet6_dev *idev); int snmp6_unregister_dev(struct inet6_dev *idev); #else static inline int ac6_proc_init(struct net *net) { return 0; } static inline void ac6_proc_exit(struct net *net) { } static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; } static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } #endif #ifdef CONFIG_SYSCTL struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); size_t ipv6_icmp_sysctl_table_size(void); struct ctl_table *ipv6_route_sysctl_init(struct net *net); size_t ipv6_route_sysctl_table_size(struct net *net); int ipv6_sysctl_register(void); void ipv6_sysctl_unregister(void); #endif int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, const struct in6_addr *addr, unsigned int mode); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); static inline int ip6_sock_set_v6only(struct sock *sk) { if (inet_sk(sk)->inet_num) return -EINVAL; lock_sock(sk); sk->sk_ipv6only = true; release_sock(sk); return 0; } static inline void ip6_sock_set_recverr(struct sock *sk) { inet6_set_bit(RECVERR6, sk); } #define IPV6_PREFER_SRC_MASK (IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBLIC | \ IPV6_PREFER_SRC_COA) static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val) { unsigned int prefmask = ~IPV6_PREFER_SRC_MASK; unsigned int pref = 0; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { case IPV6_PREFER_SRC_PUBLIC: pref |= IPV6_PREFER_SRC_PUBLIC; prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP); break; case IPV6_PREFER_SRC_TMP: pref |= IPV6_PREFER_SRC_TMP; prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP); break; case IPV6_PREFER_SRC_PUBTMP_DEFAULT: prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP); break; case 0: break; default: return -EINVAL; } /* check HOME/COA conflicts */ switch (val & (IPV6_PREFER_SRC_HOME | IPV6_PREFER_SRC_COA)) { case IPV6_PREFER_SRC_HOME: prefmask &= ~IPV6_PREFER_SRC_COA; break; case IPV6_PREFER_SRC_COA: pref |= IPV6_PREFER_SRC_COA; break; case 0: break; default: return -EINVAL; } /* check CGA/NONCGA conflicts */ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { case IPV6_PREFER_SRC_CGA: case IPV6_PREFER_SRC_NONCGA: case 0: break; default: return -EINVAL; } WRITE_ONCE(inet6_sk(sk)->srcprefs, (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref); return 0; } static inline void ip6_sock_set_recvpktinfo(struct sock *sk) { lock_sock(sk); inet6_sk(sk)->rxopt.bits.rxinfo = true; release_sock(sk); } #define IPV6_ADDR_WORDS 4 static inline void ipv6_addr_cpu_to_be32(__be32 *dst, const u32 *src) { cpu_to_be32_array(dst, src, IPV6_ADDR_WORDS); } static inline void ipv6_addr_be32_to_cpu(u32 *dst, const __be32 *src) { be32_to_cpu_array(dst, src, IPV6_ADDR_WORDS); } #endif /* _NET_IPV6_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * 25-Jul-1998 Major changes to allow for ip chain table * * 3-Jan-2000 Named tables to allow packet selection for different uses. */ /* * Format of an IP firewall descriptor * * src, dst, src_mask, dst_mask are always stored in network byte order. * flags are stored in host byte order (of course). * Port numbers are stored in HOST byte order. */ #ifndef _UAPI_IPTABLES_H #define _UAPI_IPTABLES_H #include <linux/types.h> #include <linux/compiler.h> #include <linux/if.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter/x_tables.h> #ifndef __KERNEL__ #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN #define ipt_match xt_match #define ipt_target xt_target #define ipt_table xt_table #define ipt_get_revision xt_get_revision #define ipt_entry_match xt_entry_match #define ipt_entry_target xt_entry_target #define ipt_standard_target xt_standard_target #define ipt_error_target xt_error_target #define ipt_counters xt_counters #define IPT_CONTINUE XT_CONTINUE #define IPT_RETURN XT_RETURN /* This group is older than old (iptables < v1.4.0-rc1~89) */ #include <linux/netfilter/xt_tcpudp.h> #define ipt_udp xt_udp #define ipt_tcp xt_tcp #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION #define IPT_TCP_INV_MASK XT_TCP_INV_MASK #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT #define IPT_UDP_INV_MASK XT_UDP_INV_MASK /* The argument to IPT_SO_ADD_COUNTERS. */ #define ipt_counters_info xt_counters_info /* Standard return verdict, or do jump. */ #define IPT_STANDARD_TARGET XT_STANDARD_TARGET /* Error verdict. */ #define IPT_ERROR_TARGET XT_ERROR_TARGET /* fn returns 0 to continue iteration */ #define IPT_MATCH_ITERATE(e, fn, args...) \ XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) /* fn returns 0 to continue iteration */ #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) #endif /* Yes, Virginia, you have to zero the padding. */ struct ipt_ip { /* Source and destination IP addr */ struct in_addr src, dst; /* Mask for src and dest IP addr */ struct in_addr smsk, dmsk; char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; /* Protocol, 0 = ANY */ __u16 proto; /* Flags word */ __u8 flags; /* Inverse flags */ __u8 invflags; }; /* Values for "flag" field in struct ipt_ip (general ip structure). */ #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ #define IPT_F_MASK 0x03 /* All possible flag bits mask. */ /* Values for "inv" field in struct ipt_ip. */ #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ #define IPT_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ #define IPT_INV_TOS 0x04 /* Invert the sense of TOS. */ #define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ #define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ #define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */ #define IPT_INV_PROTO XT_INV_PROTO #define IPT_INV_MASK 0x7F /* All possible flag bits mask. */ /* This structure defines each of the firewall rules. Consists of 3 parts which are 1) general IP header stuff 2) match specific stuff 3) the target to perform if the rule matches */ struct ipt_entry { struct ipt_ip ip; /* Mark with fields that we care about. */ unsigned int nfcache; /* Size of ipt_entry + matches */ __u16 target_offset; /* Size of ipt_entry + matches + target */ __u16 next_offset; /* Back pointer */ unsigned int comefrom; /* Packet and byte counters. */ struct xt_counters counters; /* The matches (if any), then the target. */ unsigned char elems[]; }; /* * New IP firewall options for [gs]etsockopt at the RAW IP level. * Unlike BSD Linux inherits IP options so you don't have to use a raw * socket for this. Instead we check rights in the calls. * * ATTENTION: check linux/in.h before adding new number here. */ #define IPT_BASE_CTL 64 #define IPT_SO_SET_REPLACE (IPT_BASE_CTL) #define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1) #define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS #define IPT_SO_GET_INFO (IPT_BASE_CTL) #define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1) #define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2) #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET /* ICMP matching stuff */ struct ipt_icmp { __u8 type; /* type to match */ __u8 code[2]; /* range of code */ __u8 invflags; /* Inverse flags */ }; /* Values for "inv" field for struct ipt_icmp. */ #define IPT_ICMP_INV 0x01 /* Invert the sense of type/code test */ /* The argument to IPT_SO_GET_INFO */ struct ipt_getinfo { /* Which table: caller fills this in. */ char name[XT_TABLE_MAXNAMELEN]; /* Kernel fills these in. */ /* Which hook entry points are valid: bitmask */ unsigned int valid_hooks; /* Hook entry points: one per netfilter hook. */ unsigned int hook_entry[NF_INET_NUMHOOKS]; /* Underflow points. */ unsigned int underflow[NF_INET_NUMHOOKS]; /* Number of entries */ unsigned int num_entries; /* Size of entries. */ unsigned int size; }; /* The argument to IPT_SO_SET_REPLACE. */ struct ipt_replace { /* Which table. */ char name[XT_TABLE_MAXNAMELEN]; /* Which hook entry points are valid: bitmask. You can't change this. */ unsigned int valid_hooks; /* Number of entries */ unsigned int num_entries; /* Total size of new entries */ unsigned int size; /* Hook entry points. */ unsigned int hook_entry[NF_INET_NUMHOOKS]; /* Underflow points. */ unsigned int underflow[NF_INET_NUMHOOKS]; /* Information about old entries: */ /* Number of counters (must be equal to current number of entries). */ unsigned int num_counters; /* The old entries' counters. */ struct xt_counters __user *counters; /* The entries (hang off end: not really an array). */ struct ipt_entry entries[]; }; /* The argument to IPT_SO_GET_ENTRIES. */ struct ipt_get_entries { /* Which table: user fills this in. */ char name[XT_TABLE_MAXNAMELEN]; /* User fills this in: total entry size. */ unsigned int size; /* The entries. */ struct ipt_entry entrytable[]; }; /* Helper functions */ static __inline__ struct xt_entry_target * ipt_get_target(struct ipt_entry *e) { return (struct xt_entry_target *)((char *)e + e->target_offset); } /* * Main firewall chains definitions and global var's definitions. */ #endif /* _UAPI_IPTABLES_H */
226 225 224 238 189 154 154 154 6 6 6 6 6 5 6 6 6 6 1 5 5 5 5 6 6 6 19 8 19 19 19 19 19 19 19 19 19 6 6 6 6 6 6 6 6 6 6 126 119 3 125 2 2 119 119 119 119 10 10 10 10 10 10 7 7 6 7 7 7 6 8 8 46 2 45 20 5 20 4 20 20 19 20 20 40 46 9 10 8 10 25 25 24 2 1 1 1 2 53 49 49 48 5 5 5 5 5 53 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 /* * net/tipc/msg.c: TIPC message header routines * * Copyright (c) 2000-2006, 2014-2015, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <net/sock.h> #include "core.h" #include "msg.h" #include "addr.h" #include "name_table.h" #include "crypto.h" #define BUF_ALIGN(x) ALIGN(x, 4) #define MAX_FORWARD_SIZE 1024 #ifdef CONFIG_TIPC_CRYPTO #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16) #define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE) #else #define BUF_HEADROOM (LL_MAX_HEADER + 48) #define BUF_OVERHEAD BUF_HEADROOM #endif const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /** * tipc_buf_acquire - creates a TIPC message buffer * @size: message size (including TIPC header) * @gfp: memory allocation flags * * Return: a new buffer with data pointers set to the specified size. * * NOTE: * Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) { struct sk_buff *skb; skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp); if (skb) { skb_reserve(skb, BUF_HEADROOM); skb_put(skb, size); skb->next = NULL; } return skb; } void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, u32 hsize, u32 dnode) { memset(m, 0, hsize); msg_set_version(m); msg_set_user(m, user); msg_set_hdr_sz(m, hsize); msg_set_size(m, hsize); msg_set_prevnode(m, own_node); msg_set_type(m, type); if (hsize > SHORT_H_SIZE) { msg_set_orignode(m, own_node); msg_set_destnode(m, dnode); } } struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, uint data_sz, u32 dnode, u32 onode, u32 dport, u32 oport, int errcode) { struct tipc_msg *msg; struct sk_buff *buf; buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); if (unlikely(!buf)) return NULL; msg = buf_msg(buf); tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); msg_set_size(msg, hdr_sz + data_sz); msg_set_origport(msg, oport); msg_set_destport(msg, dport); msg_set_errcode(msg, errcode); return buf; } /* tipc_buf_append(): Append a buffer to the fragment list of another buffer * @*headbuf: in: NULL for first frag, otherwise value returned from prev call * out: set when successful non-complete reassembly, otherwise NULL * @*buf: in: the buffer to append. Always defined * out: head buf after successful complete reassembly, otherwise NULL * Returns 1 when reassembly complete, otherwise 0 */ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) { struct sk_buff *head = *headbuf; struct sk_buff *frag = *buf; struct sk_buff *tail = NULL; struct tipc_msg *msg; u32 fragid; int delta; bool headstolen; if (!frag) goto err; msg = buf_msg(frag); fragid = msg_type(msg); frag->next = NULL; skb_pull(frag, msg_hdr_sz(msg)); if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; if (skb_has_frag_list(frag) && __skb_linearize(frag)) goto err; *buf = NULL; frag = skb_unshare(frag, GFP_ATOMIC); if (unlikely(!frag)) goto err; head = *headbuf = frag; TIPC_SKB_CB(head)->tail = NULL; return 0; } if (!head) goto err; /* Either the input skb ownership is transferred to headskb * or the input skb is freed, clear the reference to avoid * bad access on error path. */ *buf = NULL; if (skb_try_coalesce(head, frag, &headstolen, &delta)) { kfree_skb_partial(frag, headstolen); } else { tail = TIPC_SKB_CB(head)->tail; if (!skb_has_frag_list(head)) skb_shinfo(head)->frag_list = frag; else tail->next = frag; head->truesize += frag->truesize; head->data_len += frag->len; head->len += frag->len; TIPC_SKB_CB(head)->tail = frag; } if (fragid == LAST_FRAGMENT) { TIPC_SKB_CB(head)->validated = 0; if (unlikely(!tipc_msg_validate(&head))) goto err; *buf = head; TIPC_SKB_CB(head)->tail = NULL; *headbuf = NULL; return 1; } return 0; err: kfree_skb(*buf); kfree_skb(*headbuf); *buf = *headbuf = NULL; return 0; } /** * tipc_msg_append(): Append data to tail of an existing buffer queue * @_hdr: header to be used * @m: the data to be appended * @mss: max allowable size of buffer * @dlen: size of data to be appended * @txq: queue to append to * * Return: the number of 1k blocks appended or errno value */ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, int mss, struct sk_buff_head *txq) { struct sk_buff *skb; int accounted, total, curr; int mlen, cpy, rem = dlen; struct tipc_msg *hdr; skb = skb_peek_tail(txq); accounted = skb ? msg_blocks(buf_msg(skb)) : 0; total = accounted; do { if (!skb || skb->len >= mss) { skb = tipc_buf_acquire(mss, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; skb_orphan(skb); skb_trim(skb, MIN_H_SIZE); hdr = buf_msg(skb); skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE); msg_set_hdr_sz(hdr, MIN_H_SIZE); msg_set_size(hdr, MIN_H_SIZE); __skb_queue_tail(txq, skb); total += 1; } hdr = buf_msg(skb); curr = msg_blocks(hdr); mlen = msg_size(hdr); cpy = min_t(size_t, rem, mss - mlen); if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) return -EFAULT; msg_set_size(hdr, mlen + cpy); skb_put(skb, cpy); rem -= cpy; total += msg_blocks(hdr) - curr; } while (rem > 0); return total - accounted; } /* tipc_msg_validate - validate basic format of received message * * This routine ensures a TIPC message has an acceptable header, and at least * as much data as the header indicates it should. The routine also ensures * that the entire message header is stored in the main fragment of the message * buffer, to simplify future access to message header fields. * * Note: Having extra info present in the message header or data areas is OK. * TIPC will ignore the excess, under the assumption that it is optional info * introduced by a later release of the protocol. */ bool tipc_msg_validate(struct sk_buff **_skb) { struct sk_buff *skb = *_skb; struct tipc_msg *hdr; int msz, hsz; /* Ensure that flow control ratio condition is satisfied */ if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) { skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC); if (!skb) return false; kfree_skb(*_skb); *_skb = skb; } if (unlikely(TIPC_SKB_CB(skb)->validated)) return true; if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) return false; hsz = msg_hdr_sz(buf_msg(skb)); if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) return false; if (unlikely(!pskb_may_pull(skb, hsz))) return false; hdr = buf_msg(skb); if (unlikely(msg_version(hdr) != TIPC_VERSION)) return false; msz = msg_size(hdr); if (unlikely(msz < hsz)) return false; if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) return false; if (unlikely(skb->len < msz)) return false; TIPC_SKB_CB(skb)->validated = 1; return true; } /** * tipc_msg_fragment - build a fragment skb list for TIPC message * * @skb: TIPC message skb * @hdr: internal msg header to be put on the top of the fragments * @pktmax: max size of a fragment incl. the header * @frags: returned fragment skb list * * Return: 0 if the fragmentation is successful, otherwise: -EINVAL * or -ENOMEM */ int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, int pktmax, struct sk_buff_head *frags) { int pktno, nof_fragms, dsz, dmax, eat; struct tipc_msg *_hdr; struct sk_buff *_skb; u8 *data; /* Non-linear buffer? */ if (skb_linearize(skb)) return -ENOMEM; data = (u8 *)skb->data; dsz = msg_size(buf_msg(skb)); dmax = pktmax - INT_H_SIZE; if (dsz <= dmax || !dmax) return -EINVAL; nof_fragms = dsz / dmax + 1; for (pktno = 1; pktno <= nof_fragms; pktno++) { if (pktno < nof_fragms) eat = dmax; else eat = dsz % dmax; /* Allocate a new fragment */ _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); if (!_skb) goto error; skb_orphan(_skb); __skb_queue_tail(frags, _skb); /* Copy header & data to the fragment */ skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); data += eat; /* Update the fragment's header */ _hdr = buf_msg(_skb); msg_set_fragm_no(_hdr, pktno); msg_set_nof_fragms(_hdr, nof_fragms); msg_set_size(_hdr, INT_H_SIZE + eat); } return 0; error: __skb_queue_purge(frags); __skb_queue_head_init(frags); return -ENOMEM; } /** * tipc_msg_build - create buffer chain containing specified header and data * @mhdr: Message header, to be prepended to data * @m: User message * @offset: buffer offset for fragmented messages (FIXME) * @dsz: Total length of user data * @pktmax: Max packet size that can be used * @list: Buffer or chain of buffers to be returned to caller * * Note that the recursive call we are making here is safe, since it can * logically go only one further level down. * * Return: message data size or errno: -ENOMEM, -EFAULT */ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, int dsz, int pktmax, struct sk_buff_head *list) { int mhsz = msg_hdr_sz(mhdr); struct tipc_msg pkthdr; int msz = mhsz + dsz; int pktrem = pktmax; struct sk_buff *skb; int drem = dsz; int pktno = 1; char *pktpos; int pktsz; int rc; msg_set_size(mhdr, msz); /* No fragmentation needed? */ if (likely(msz <= pktmax)) { skb = tipc_buf_acquire(msz, GFP_KERNEL); /* Fall back to smaller MTU if node local message */ if (unlikely(!skb)) { if (pktmax != MAX_MSG_SIZE) return -ENOMEM; rc = tipc_msg_build(mhdr, m, offset, dsz, one_page_mtu, list); if (rc != dsz) return rc; if (tipc_msg_assemble(list)) return dsz; return -ENOMEM; } skb_orphan(skb); __skb_queue_tail(list, skb); skb_copy_to_linear_data(skb, mhdr, mhsz); pktpos = skb->data + mhsz; if (copy_from_iter_full(pktpos, dsz, &m->msg_iter)) return dsz; rc = -EFAULT; goto error; } /* Prepare reusable fragment header */ tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); msg_set_size(&pkthdr, pktmax); msg_set_fragm_no(&pkthdr, pktno); msg_set_importance(&pkthdr, msg_importance(mhdr)); /* Prepare first fragment */ skb = tipc_buf_acquire(pktmax, GFP_KERNEL); if (!skb) return -ENOMEM; skb_orphan(skb); __skb_queue_tail(list, skb); pktpos = skb->data; skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); pktpos += INT_H_SIZE; pktrem -= INT_H_SIZE; skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); pktpos += mhsz; pktrem -= mhsz; do { if (drem < pktrem) pktrem = drem; if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) { rc = -EFAULT; goto error; } drem -= pktrem; if (!drem) break; /* Prepare new fragment: */ if (drem < (pktmax - INT_H_SIZE)) pktsz = drem + INT_H_SIZE; else pktsz = pktmax; skb = tipc_buf_acquire(pktsz, GFP_KERNEL); if (!skb) { rc = -ENOMEM; goto error; } skb_orphan(skb); __skb_queue_tail(list, skb); msg_set_type(&pkthdr, FRAGMENT); msg_set_size(&pkthdr, pktsz); msg_set_fragm_no(&pkthdr, ++pktno); skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); pktpos = skb->data + INT_H_SIZE; pktrem = pktsz - INT_H_SIZE; } while (1); msg_set_type(buf_msg(skb), LAST_FRAGMENT); return dsz; error: __skb_queue_purge(list); __skb_queue_head_init(list); return rc; } /** * tipc_msg_bundle - Append contents of a buffer to tail of an existing one * @bskb: the bundle buffer to append to * @msg: message to be appended * @max: max allowable size for the bundle buffer * * Return: "true" if bundling has been performed, otherwise "false" */ static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg, u32 max) { struct tipc_msg *bmsg = buf_msg(bskb); u32 msz, bsz, offset, pad; msz = msg_size(msg); bsz = msg_size(bmsg); offset = BUF_ALIGN(bsz); pad = offset - bsz; if (unlikely(skb_tailroom(bskb) < (pad + msz))) return false; if (unlikely(max < (offset + msz))) return false; skb_put(bskb, pad + msz); skb_copy_to_linear_data_offset(bskb, offset, msg, msz); msg_set_size(bmsg, offset + msz); msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); return true; } /** * tipc_msg_try_bundle - Try to bundle a new message to the last one * @tskb: the last/target message to which the new one will be appended * @skb: the new message skb pointer * @mss: max message size (header inclusive) * @dnode: destination node for the message * @new_bundle: if this call made a new bundle or not * * Return: "true" if the new message skb is potential for bundling this time or * later, in the case a bundling has been done this time, the skb is consumed * (the skb pointer = NULL). * Otherwise, "false" if the skb cannot be bundled at all. */ bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, u32 dnode, bool *new_bundle) { struct tipc_msg *msg, *inner, *outer; u32 tsz; /* First, check if the new buffer is suitable for bundling */ msg = buf_msg(*skb); if (msg_user(msg) == MSG_FRAGMENTER) return false; if (msg_user(msg) == TUNNEL_PROTOCOL) return false; if (msg_user(msg) == BCAST_PROTOCOL) return false; if (mss <= INT_H_SIZE + msg_size(msg)) return false; /* Ok, but the last/target buffer can be empty? */ if (unlikely(!tskb)) return true; /* Is it a bundle already? Try to bundle the new message to it */ if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) { *new_bundle = false; goto bundle; } /* Make a new bundle of the two messages if possible */ tsz = msg_size(buf_msg(tskb)); if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg))) return true; if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, GFP_ATOMIC))) return true; inner = buf_msg(tskb); skb_push(tskb, INT_H_SIZE); outer = buf_msg(tskb); tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE, dnode); msg_set_importance(outer, msg_importance(inner)); msg_set_size(outer, INT_H_SIZE + tsz); msg_set_msgcnt(outer, 1); *new_bundle = true; bundle: if (likely(tipc_msg_bundle(tskb, msg, mss))) { consume_skb(*skb); *skb = NULL; } return true; } /** * tipc_msg_extract(): extract bundled inner packet from buffer * @skb: buffer to be extracted from. * @iskb: extracted inner buffer, to be returned * @pos: position in outer message of msg to be extracted. * Returns position of next msg. * Consumes outer buffer when last packet extracted * Return: true when there is an extracted buffer, otherwise false */ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) { struct tipc_msg *hdr, *ihdr; int imsz; *iskb = NULL; if (unlikely(skb_linearize(skb))) goto none; hdr = buf_msg(skb); if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) goto none; ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); imsz = msg_size(ihdr); if ((*pos + imsz) > msg_data_sz(hdr)) goto none; *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); if (!*iskb) goto none; skb_copy_to_linear_data(*iskb, ihdr, imsz); if (unlikely(!tipc_msg_validate(iskb))) goto none; *pos += BUF_ALIGN(imsz); return true; none: kfree_skb(skb); kfree_skb(*iskb); *iskb = NULL; return false; } /** * tipc_msg_reverse(): swap source and destination addresses and add error code * @own_node: originating node id for reversed message * @skb: buffer containing message to be reversed; will be consumed * @err: error code to be set in message, if any * Replaces consumed buffer with new one when successful * Return: true if success, otherwise false */ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) { struct sk_buff *_skb = *skb; struct tipc_msg *_hdr, *hdr; int hlen, dlen; if (skb_linearize(_skb)) goto exit; _hdr = buf_msg(_skb); dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); hlen = msg_hdr_sz(_hdr); if (msg_dest_droppable(_hdr)) goto exit; if (msg_errcode(_hdr)) goto exit; /* Never return SHORT header */ if (hlen == SHORT_H_SIZE) hlen = BASIC_H_SIZE; /* Don't return data along with SYN+, - sender has a clone */ if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) dlen = 0; /* Allocate new buffer to return */ *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); if (!*skb) goto exit; memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); /* Build reverse header in new buffer */ hdr = buf_msg(*skb); msg_set_hdr_sz(hdr, hlen); msg_set_errcode(hdr, err); msg_set_non_seq(hdr, 0); msg_set_origport(hdr, msg_destport(_hdr)); msg_set_destport(hdr, msg_origport(_hdr)); msg_set_destnode(hdr, msg_prevnode(_hdr)); msg_set_prevnode(hdr, own_node); msg_set_orignode(hdr, own_node); msg_set_size(hdr, hlen + dlen); skb_orphan(_skb); kfree_skb(_skb); return true; exit: kfree_skb(_skb); *skb = NULL; return false; } bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) { struct sk_buff *skb, *_skb; skb_queue_walk(msg, skb) { _skb = skb_clone(skb, GFP_ATOMIC); if (!_skb) { __skb_queue_purge(cpy); pr_err_ratelimited("Failed to clone buffer chain\n"); return false; } __skb_queue_tail(cpy, _skb); } return true; } /** * tipc_msg_lookup_dest(): try to find new destination for named message * @net: pointer to associated network namespace * @skb: the buffer containing the message. * @err: error code to be used by caller if lookup fails * Does not consume buffer * Return: true if a destination is found, false otherwise */ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) { struct tipc_msg *msg = buf_msg(skb); u32 scope = msg_lookup_scope(msg); u32 self = tipc_own_addr(net); u32 inst = msg_nameinst(msg); struct tipc_socket_addr sk; struct tipc_uaddr ua; if (!msg_isdata(msg)) return false; if (!msg_named(msg)) return false; if (msg_errcode(msg)) return false; *err = TIPC_ERR_NO_NAME; if (skb_linearize(skb)) return false; msg = buf_msg(skb); if (msg_reroute_cnt(msg)) return false; tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope, msg_nametype(msg), inst, inst); sk.node = tipc_scope2node(net, scope); if (!tipc_nametbl_lookup_anycast(net, &ua, &sk)) return false; msg_incr_reroute_cnt(msg); if (sk.node != self) msg_set_prevnode(msg, self); msg_set_destnode(msg, sk.node); msg_set_destport(msg, sk.ref); *err = TIPC_OK; return true; } /* tipc_msg_assemble() - assemble chain of fragments into one message */ bool tipc_msg_assemble(struct sk_buff_head *list) { struct sk_buff *skb, *tmp = NULL; if (skb_queue_len(list) == 1) return true; while ((skb = __skb_dequeue(list))) { skb->next = NULL; if (tipc_buf_append(&tmp, &skb)) { __skb_queue_tail(list, skb); return true; } if (!tmp) break; } __skb_queue_purge(list); __skb_queue_head_init(list); pr_warn("Failed do assemble buffer\n"); return false; } /* tipc_msg_reassemble() - clone a buffer chain of fragments and * reassemble the clones into one message */ bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq) { struct sk_buff *skb, *_skb; struct sk_buff *frag = NULL; struct sk_buff *head = NULL; int hdr_len; /* Copy header if single buffer */ if (skb_queue_len(list) == 1) { skb = skb_peek(list); hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC); if (!_skb) return false; __skb_queue_tail(rcvq, _skb); return true; } /* Clone all fragments and reassemble */ skb_queue_walk(list, skb) { frag = skb_clone(skb, GFP_ATOMIC); if (!frag) goto error; frag->next = NULL; if (tipc_buf_append(&head, &frag)) break; if (!head) goto error; } __skb_queue_tail(rcvq, frag); return true; error: pr_warn("Failed do clone local mcast rcv buffer\n"); kfree_skb(head); return false; } bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, struct sk_buff_head *cpy) { struct sk_buff *skb, *_skb; skb_queue_walk(msg, skb) { _skb = pskb_copy(skb, GFP_ATOMIC); if (!_skb) { __skb_queue_purge(cpy); return false; } msg_set_destnode(buf_msg(_skb), dst); __skb_queue_tail(cpy, _skb); } return true; } /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number * @list: list to be appended to * @seqno: sequence number of buffer to add * @skb: buffer to add */ bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, struct sk_buff *skb) { struct sk_buff *_skb, *tmp; if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { __skb_queue_head(list, skb); return true; } if (more(seqno, buf_seqno(skb_peek_tail(list)))) { __skb_queue_tail(list, skb); return true; } skb_queue_walk_safe(list, _skb, tmp) { if (more(seqno, buf_seqno(_skb))) continue; if (seqno == buf_seqno(_skb)) break; __skb_queue_before(list, _skb, skb); return true; } kfree_skb(skb); return false; } void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb, struct sk_buff_head *xmitq) { if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) __skb_queue_tail(xmitq, skb); }
3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 // SPDX-License-Identifier: GPL-2.0-or-later /* Request key authorisation token key definition. * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * See Documentation/security/keys/request-key.rst */ #include <linux/sched.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "internal.h" #include <keys/request_key_auth-type.h> static int request_key_auth_preparse(struct key_preparsed_payload *); static void request_key_auth_free_preparse(struct key_preparsed_payload *); static int request_key_auth_instantiate(struct key *, struct key_preparsed_payload *); static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); static long request_key_auth_read(const struct key *, char *, size_t); /* * The request-key authorisation key type definition. */ struct key_type key_type_request_key_auth = { .name = ".request_key_auth", .def_datalen = sizeof(struct request_key_auth), .preparse = request_key_auth_preparse, .free_preparse = request_key_auth_free_preparse, .instantiate = request_key_auth_instantiate, .describe = request_key_auth_describe, .revoke = request_key_auth_revoke, .destroy = request_key_auth_destroy, .read = request_key_auth_read, }; static int request_key_auth_preparse(struct key_preparsed_payload *prep) { return 0; } static void request_key_auth_free_preparse(struct key_preparsed_payload *prep) { } /* * Instantiate a request-key authorisation key. */ static int request_key_auth_instantiate(struct key *key, struct key_preparsed_payload *prep) { rcu_assign_keypointer(key, (struct request_key_auth *)prep->data); return 0; } /* * Describe an authorisation token. */ static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = dereference_key_rcu(key); if (!rka) return; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_positive(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); } /* * Read the callout_info data (retrieves the callout information). * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, char *buffer, size_t buflen) { struct request_key_auth *rka = dereference_key_locked(key); size_t datalen; long ret; if (!rka) return -EKEYREVOKED; datalen = rka->callout_len; ret = datalen; /* we can return the data as is */ if (buffer && buflen > 0) { if (buflen > datalen) buflen = datalen; memcpy(buffer, rka->callout_info, buflen); } return ret; } static void free_request_key_auth(struct request_key_auth *rka) { if (!rka) return; key_put(rka->target_key); key_put(rka->dest_keyring); if (rka->cred) put_cred(rka->cred); kfree(rka->callout_info); kfree(rka); } /* * Dispose of the request_key_auth record under RCU conditions */ static void request_key_auth_rcu_disposal(struct rcu_head *rcu) { struct request_key_auth *rka = container_of(rcu, struct request_key_auth, rcu); free_request_key_auth(rka); } /* * Handle revocation of an authorisation token key. * * Called with the key sem write-locked. */ static void request_key_auth_revoke(struct key *key) { struct request_key_auth *rka = dereference_key_locked(key); kenter("{%d}", key->serial); rcu_assign_keypointer(key, NULL); call_rcu(&rka->rcu, request_key_auth_rcu_disposal); } /* * Destroy an instantiation authorisation token key. */ static void request_key_auth_destroy(struct key *key) { struct request_key_auth *rka = rcu_access_pointer(key->payload.rcu_data0); kenter("{%d}", key->serial); if (rka) { rcu_assign_keypointer(key, NULL); call_rcu(&rka->rcu, request_key_auth_rcu_disposal); } } /* * Create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data. */ struct key *request_key_auth_new(struct key *target, const char *op, const void *callout_info, size_t callout_len, struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current_cred(); struct key *authkey = NULL; char desc[20]; int ret = -ENOMEM; kenter("%d,", target->serial); /* allocate a auth record */ rka = kzalloc(sizeof(*rka), GFP_KERNEL); if (!rka) goto error; rka->callout_info = kmemdup(callout_info, callout_len, GFP_KERNEL); if (!rka->callout_info) goto error_free_rka; rka->callout_len = callout_len; strscpy(rka->op, op, sizeof(rka->op)); /* see if the calling process is already servicing the key request of * another process */ if (cred->request_key_auth) { /* it is - use that instantiation context here too */ down_read(&cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) { up_read(&cred->request_key_auth->sem); ret = -EKEYREVOKED; goto error_free_rka; } irka = cred->request_key_auth->payload.data[0]; rka->cred = get_cred(irka->cred); rka->pid = irka->pid; up_read(&cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ rka->cred = get_cred(cred); rka->pid = current->pid; } rka->target_key = key_get(target); rka->dest_keyring = key_get(dest_keyring); /* allocate the auth key */ sprintf(desc, "%x", target->serial); authkey = key_alloc(&key_type_request_key_auth, desc, cred->fsuid, cred->fsgid, cred, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_POS_LINK | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error_free_rka; } /* construct the auth key */ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); if (ret < 0) goto error_put_authkey; kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage)); return authkey; error_put_authkey: key_put(authkey); error_free_rka: free_request_key_auth(rka); error: kleave("= %d", ret); return ERR_PTR(ret); } /* * Search the current process's keyrings for the authorisation key for * instantiation of a key. */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { char description[16]; struct keyring_search_context ctx = { .index_key.type = &key_type_request_key_auth, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = (KEYRING_SEARCH_DO_STATE_CHECK | KEYRING_SEARCH_RECURSE), }; struct key *authkey; key_ref_t authkey_ref; ctx.index_key.desc_len = sprintf(description, "%x", target_id); rcu_read_lock(); authkey_ref = search_process_keyrings_rcu(&ctx); rcu_read_unlock(); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); if (authkey == ERR_PTR(-EAGAIN)) authkey = ERR_PTR(-ENOKEY); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; }
1 903 905 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 /* Copyright 2011, Siemens AG * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ /* Based on patches from Jon Smirl <jonsmirl@gmail.com> * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ieee802154.h> #include <linux/if_arp.h> #include <net/ipv6.h> #include <net/netdev_lock.h> #include "6lowpan_i.h" static int open_count; static const struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; static int lowpan_dev_init(struct net_device *ldev) { netdev_lockdep_set_classes(ldev); return 0; } static int lowpan_open(struct net_device *dev) { if (!open_count) lowpan_rx_init(); open_count++; return 0; } static int lowpan_stop(struct net_device *dev) { open_count--; if (!open_count) lowpan_rx_exit(); return 0; } static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n) { struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n)); /* default no short_addr is available for a neighbour */ neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC); return 0; } static int lowpan_get_iflink(const struct net_device *dev) { return READ_ONCE(lowpan_802154_dev(dev)->wdev->ifindex); } static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, .ndo_open = lowpan_open, .ndo_stop = lowpan_stop, .ndo_neigh_construct = lowpan_neigh_construct, .ndo_get_iflink = lowpan_get_iflink, }; static void lowpan_setup(struct net_device *ldev) { memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN); /* We need an ipv6hdr as minimum len when calling xmit */ ldev->hard_header_len = sizeof(struct ipv6hdr); ldev->flags = IFF_BROADCAST | IFF_MULTICAST; ldev->priv_flags |= IFF_NO_QUEUE; ldev->netdev_ops = &lowpan_netdev_ops; ldev->header_ops = &lowpan_header_ops; ldev->needs_free_netdev = true; ldev->netns_immutable = true; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) return -EINVAL; } return 0; } static int lowpan_newlink(struct net_device *ldev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct nlattr **tb = params->tb; struct net_device *wdev; int ret; ASSERT_RTNL(); pr_debug("adding new link\n"); if (!tb[IFLA_LINK]) return -EINVAL; if (params->link_net && !net_eq(params->link_net, dev_net(ldev))) return -EINVAL; /* find and hold wpan device */ wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK])); if (!wdev) return -ENODEV; if (wdev->type != ARPHRD_IEEE802154) { dev_put(wdev); return -EINVAL; } if (wdev->ieee802154_ptr->lowpan_dev) { dev_put(wdev); return -EBUSY; } lowpan_802154_dev(ldev)->wdev = wdev; /* Set the lowpan hardware address to the wpan hardware address. */ __dev_addr_set(ldev, wdev->dev_addr, IEEE802154_ADDR_LEN); /* We need headroom for possible wpan_dev_hard_header call and tailroom * for encryption/fcs handling. The lowpan interface will replace * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6 * header. */ ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN + wdev->needed_headroom; ldev->needed_tailroom = wdev->needed_tailroom; ldev->neigh_priv_len = sizeof(struct lowpan_802154_neigh); ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154); if (ret < 0) { dev_put(wdev); return ret; } wdev->ieee802154_ptr->lowpan_dev = ldev; return 0; } static void lowpan_dellink(struct net_device *ldev, struct list_head *head) { struct net_device *wdev = lowpan_802154_dev(ldev)->wdev; ASSERT_RTNL(); wdev->ieee802154_ptr->lowpan_dev = NULL; lowpan_unregister_netdevice(ldev); dev_put(wdev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, .validate = lowpan_validate, }; static inline int __init lowpan_netlink_init(void) { return rtnl_link_register(&lowpan_link_ops); } static inline void lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } static int lowpan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct wpan_dev *wpan_dev; if (ndev->type != ARPHRD_IEEE802154) return NOTIFY_DONE; wpan_dev = ndev->ieee802154_ptr; if (!wpan_dev) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: /* Check if wpan interface is unregistered that we * also delete possible lowpan interfaces which belongs * to the wpan interface. */ if (wpan_dev->lowpan_dev) lowpan_dellink(wpan_dev->lowpan_dev, NULL); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static struct notifier_block lowpan_dev_notifier = { .notifier_call = lowpan_device_event, }; static int __init lowpan_init_module(void) { int err = 0; err = lowpan_net_frag_init(); if (err < 0) goto out; err = lowpan_netlink_init(); if (err < 0) goto out_frag; err = register_netdevice_notifier(&lowpan_dev_notifier); if (err < 0) goto out_pack; return 0; out_pack: lowpan_netlink_fini(); out_frag: lowpan_net_frag_exit(); out: return err; } static void __exit lowpan_cleanup_module(void) { lowpan_netlink_fini(); lowpan_net_frag_exit(); unregister_netdevice_notifier(&lowpan_dev_notifier); } module_init(lowpan_init_module); module_exit(lowpan_cleanup_module); MODULE_DESCRIPTION("IPv6 over Low power Wireless Personal Area Network IEEE 802.15.4 core"); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("lowpan");
432 432 432 432 6 6 6 432 432 432 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 // SPDX-License-Identifier: GPL-2.0 /* * xfrm6_policy.c: based on xfrm4_policy.c * * Authors: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * YOSHIFUJI Hideaki * Split up af-specific portion * */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <net/addrconf.h> #include <net/dst.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/l3mdev.h> static struct dst_entry *xfrm6_dst_lookup(const struct xfrm_dst_lookup_params *params) { struct flowi6 fl6; struct dst_entry *dst; int err; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_l3mdev = l3mdev_master_ifindex_by_index(params->net, params->oif); fl6.flowi6_mark = params->mark; memcpy(&fl6.daddr, params->daddr, sizeof(fl6.daddr)); if (params->saddr) memcpy(&fl6.saddr, params->saddr, sizeof(fl6.saddr)); fl6.flowi4_proto = params->ipproto; fl6.uli = params->uli; dst = ip6_route_output(params->net, NULL, &fl6); err = dst->error; if (dst->error) { dst_release(dst); dst = ERR_PTR(err); } return dst; } static int xfrm6_get_saddr(xfrm_address_t *saddr, const struct xfrm_dst_lookup_params *params) { struct dst_entry *dst; struct net_device *dev; struct inet6_dev *idev; dst = xfrm6_dst_lookup(params); if (IS_ERR(dst)) return -EHOSTUNREACH; idev = ip6_dst_idev(dst); if (!idev) { dst_release(dst); return -EHOSTUNREACH; } dev = idev->dev; ipv6_dev_get_saddr(dev_net(dev), dev, &params->daddr->in6, 0, &saddr->in6); dst_release(dst); return 0; } static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { struct rt6_info *rt = dst_rt6_info(xdst->route); xdst->u.dst.dev = dev; netdev_hold(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC); xdst->u.rt6.rt6i_idev = in6_dev_get(dev); if (!xdst->u.rt6.rt6i_idev) { netdev_put(dev, &xdst->u.dst.dev_tracker); return -ENODEV; } /* Sheit... I remember I did this right. Apparently, * it was magically lost, so this code needs audit */ xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | RTF_LOCAL); xdst->route_cookie = rt6_get_cookie(rt); xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; xdst->u.rt6.rt6i_dst = rt->rt6i_dst; xdst->u.rt6.rt6i_src = rt->rt6i_src; rt6_uncached_list_add(&xdst->u.rt6); return 0; } static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu, bool confirm_neigh) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct dst_entry *path = xdst->route; path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh); } static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct dst_entry *path = xdst->route; path->ops->redirect(path, sk, skb); } static void xfrm6_dst_destroy(struct dst_entry *dst) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; dst_destroy_metrics_generic(dst); rt6_uncached_list_del(&xdst->u.rt6); if (likely(xdst->u.rt6.rt6i_idev)) in6_dev_put(xdst->u.rt6.rt6i_idev); xfrm_dst_destroy(xdst); } static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev) { struct xfrm_dst *xdst; xdst = (struct xfrm_dst *)dst; if (xdst->u.rt6.rt6i_idev->dev == dev) { struct inet6_dev *loopback_idev = in6_dev_get(dev_net(dev)->loopback_dev); do { in6_dev_put(xdst->u.rt6.rt6i_idev); xdst->u.rt6.rt6i_idev = loopback_idev; in6_dev_hold(loopback_idev); xdst = (struct xfrm_dst *)xfrm_dst_child(&xdst->u.dst); } while (xdst->u.dst.xfrm); __in6_dev_put(loopback_idev); } xfrm_dst_ifdown(dst, dev); } static struct dst_ops xfrm6_dst_ops_template = { .family = AF_INET6, .update_pmtu = xfrm6_update_pmtu, .redirect = xfrm6_redirect, .cow_metrics = dst_cow_metrics_generic, .destroy = xfrm6_dst_destroy, .ifdown = xfrm6_dst_ifdown, .local_out = __ip6_local_out, .gc_thresh = 32768, }; static const struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .dst_ops = &xfrm6_dst_ops_template, .dst_lookup = xfrm6_dst_lookup, .get_saddr = xfrm6_get_saddr, .fill_dst = xfrm6_fill_dst, .blackhole_route = ip6_blackhole_route, }; static int __init xfrm6_policy_init(void) { return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo, AF_INET6); } static void xfrm6_policy_fini(void) { xfrm_policy_unregister_afinfo(&xfrm6_policy_afinfo); } #ifdef CONFIG_SYSCTL static struct ctl_table xfrm6_policy_table[] = { { .procname = "xfrm6_gc_thresh", .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, }; static int __net_init xfrm6_net_sysctl_init(struct net *net) { struct ctl_table *table; struct ctl_table_header *hdr; table = xfrm6_policy_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL); if (!table) goto err_alloc; table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh; } hdr = register_net_sysctl_sz(net, "net/ipv6", table, ARRAY_SIZE(xfrm6_policy_table)); if (!hdr) goto err_reg; net->ipv6.sysctl.xfrm6_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void __net_exit xfrm6_net_sysctl_exit(struct net *net) { const struct ctl_table *table; if (!net->ipv6.sysctl.xfrm6_hdr) return; table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv6.sysctl.xfrm6_hdr); if (!net_eq(net, &init_net)) kfree(table); } #else /* CONFIG_SYSCTL */ static inline int xfrm6_net_sysctl_init(struct net *net) { return 0; } static inline void xfrm6_net_sysctl_exit(struct net *net) { } #endif static int __net_init xfrm6_net_init(struct net *net) { int ret; memcpy(&net->xfrm.xfrm6_dst_ops, &xfrm6_dst_ops_template, sizeof(xfrm6_dst_ops_template)); ret = dst_entries_init(&net->xfrm.xfrm6_dst_ops); if (ret) return ret; ret = xfrm6_net_sysctl_init(net); if (ret) dst_entries_destroy(&net->xfrm.xfrm6_dst_ops); return ret; } static void __net_exit xfrm6_net_exit(struct net *net) { xfrm6_net_sysctl_exit(net); dst_entries_destroy(&net->xfrm.xfrm6_dst_ops); } static struct pernet_operations xfrm6_net_ops = { .init = xfrm6_net_init, .exit = xfrm6_net_exit, }; int __init xfrm6_init(void) { int ret; ret = xfrm6_policy_init(); if (ret) goto out; ret = xfrm6_state_init(); if (ret) goto out_policy; ret = xfrm6_protocol_init(); if (ret) goto out_state; ret = register_pernet_subsys(&xfrm6_net_ops); if (ret) goto out_protocol; ret = xfrm_nat_keepalive_init(AF_INET6); if (ret) goto out_nat_keepalive; out: return ret; out_nat_keepalive: unregister_pernet_subsys(&xfrm6_net_ops); out_protocol: xfrm6_protocol_fini(); out_state: xfrm6_state_fini(); out_policy: xfrm6_policy_fini(); goto out; } void xfrm6_fini(void) { xfrm_nat_keepalive_fini(AF_INET6); unregister_pernet_subsys(&xfrm6_net_ops); xfrm6_protocol_fini(); xfrm6_policy_fini(); xfrm6_state_fini(); }
1 20 20 53 53 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 // SPDX-License-Identifier: GPL-2.0 /* Multipath TCP * * Copyright (c) 2020, Red Hat, Inc. */ #define pr_fmt(fmt) "MPTCP: " fmt #include "protocol.h" #include "mptcp_pm_gen.h" #define MPTCP_PM_CMD_GRP_OFFSET 0 #define MPTCP_PM_EV_GRP_OFFSET 1 static const struct genl_multicast_group mptcp_pm_mcgrps[] = { [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, }, [MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME, .flags = GENL_MCAST_CAP_NET_ADMIN, }, }; static int mptcp_pm_family_to_addr(int family) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (family == AF_INET6) return MPTCP_PM_ADDR_ATTR_ADDR6; #endif return MPTCP_PM_ADDR_ATTR_ADDR4; } static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[], const struct nlattr *attr, struct genl_info *info, struct mptcp_addr_info *addr, bool require_family) { int err, addr_addr; if (!attr) { GENL_SET_ERR_MSG(info, "missing address info"); return -EINVAL; } /* no validation needed - was already done via nested policy */ err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, mptcp_pm_address_nl_policy, info->extack); if (err) return err; if (tb[MPTCP_PM_ADDR_ATTR_ID]) addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]); if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) { if (!require_family) return 0; NL_SET_ERR_MSG_ATTR(info->extack, attr, "missing family"); return -EINVAL; } addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]); if (addr->family != AF_INET #if IS_ENABLED(CONFIG_MPTCP_IPV6) && addr->family != AF_INET6 #endif ) { NL_SET_ERR_MSG_ATTR(info->extack, attr, "unknown address family"); return -EINVAL; } addr_addr = mptcp_pm_family_to_addr(addr->family); if (!tb[addr_addr]) { NL_SET_ERR_MSG_ATTR(info->extack, attr, "missing address data"); return -EINVAL; } #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (addr->family == AF_INET6) addr->addr6 = nla_get_in6_addr(tb[addr_addr]); else #endif addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]); if (tb[MPTCP_PM_ADDR_ATTR_PORT]) addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); return 0; } int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, struct mptcp_addr_info *addr) { struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; memset(addr, 0, sizeof(*addr)); return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true); } int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, bool require_family, struct mptcp_pm_addr_entry *entry) { struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; int err; memset(entry, 0, sizeof(*entry)); err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family); if (err) return err; if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) { s32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]); entry->ifindex = val; } if (tb[MPTCP_PM_ADDR_ATTR_FLAGS]) entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]) & MPTCP_PM_ADDR_FLAGS_MASK; if (tb[MPTCP_PM_ADDR_ATTR_PORT]) entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); return 0; } static int mptcp_nl_fill_addr(struct sk_buff *skb, struct mptcp_pm_addr_entry *entry) { struct mptcp_addr_info *addr = &entry->addr; struct nlattr *attr; attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR); if (!attr) return -EMSGSIZE; if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family)) goto nla_put_failure; if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port))) goto nla_put_failure; if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id)) goto nla_put_failure; if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags)) goto nla_put_failure; if (entry->ifindex && nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex)) goto nla_put_failure; if (addr->family == AF_INET && nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4, addr->addr.s_addr)) goto nla_put_failure; #if IS_ENABLED(CONFIG_MPTCP_IPV6) else if (addr->family == AF_INET6 && nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6)) goto nla_put_failure; #endif nla_nest_end(skb, attr); return 0; nla_put_failure: nla_nest_cancel(skb, attr); return -EMSGSIZE; } static int mptcp_pm_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, struct genl_info *info) { if (info->attrs[MPTCP_PM_ATTR_TOKEN]) return mptcp_userspace_pm_get_addr(id, addr, info); return mptcp_pm_nl_get_addr(id, addr, info); } int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info) { struct mptcp_pm_addr_entry addr; struct nlattr *attr; struct sk_buff *msg; void *reply; int ret; if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) return -EINVAL; attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; ret = mptcp_pm_parse_entry(attr, info, false, &addr); if (ret < 0) return ret; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, info->genlhdr->cmd); if (!reply) { GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); ret = -EMSGSIZE; goto fail; } ret = mptcp_pm_get_addr(addr.addr.id, &addr, info); if (ret) { NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); goto fail; } ret = mptcp_nl_fill_addr(msg, &addr); if (ret) goto fail; genlmsg_end(msg, reply); ret = genlmsg_reply(msg, info); return ret; fail: nlmsg_free(msg); return ret; } int mptcp_pm_genl_fill_addr(struct sk_buff *msg, struct netlink_callback *cb, struct mptcp_pm_addr_entry *entry) { void *hdr; hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &mptcp_genl_family, NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); if (!hdr) return -EINVAL; if (mptcp_nl_fill_addr(msg, entry) < 0) { genlmsg_cancel(msg, hdr); return -EINVAL; } genlmsg_end(msg, hdr); return 0; } static int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb) { const struct genl_info *info = genl_info_dump(cb); if (info->attrs[MPTCP_PM_ATTR_TOKEN]) return mptcp_userspace_pm_dump_addr(msg, cb); return mptcp_pm_nl_dump_addr(msg, cb); } int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { return mptcp_pm_dump_addr(msg, cb); } static int mptcp_pm_set_flags(struct genl_info *info) { struct mptcp_pm_addr_entry loc = { .addr = { .family = AF_UNSPEC }, }; struct nlattr *attr_loc; int ret = -EINVAL; if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR)) return ret; attr_loc = info->attrs[MPTCP_PM_ATTR_ADDR]; ret = mptcp_pm_parse_entry(attr_loc, info, false, &loc); if (ret < 0) return ret; if (info->attrs[MPTCP_PM_ATTR_TOKEN]) return mptcp_userspace_pm_set_flags(&loc, info); return mptcp_pm_nl_set_flags(&loc, info); } int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info) { return mptcp_pm_set_flags(info); } static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp) { genlmsg_multicast_netns(&mptcp_genl_family, net, nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp); } bool mptcp_userspace_pm_active(const struct mptcp_sock *msk) { return genl_has_listeners(&mptcp_genl_family, sock_net((const struct sock *)msk), MPTCP_PM_EV_GRP_OFFSET); } static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk) { const struct inet_sock *issk = inet_sk(ssk); const struct mptcp_subflow_context *sf; if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) return -EMSGSIZE; switch (ssk->sk_family) { case AF_INET: if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) return -EMSGSIZE; if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr)) return -EMSGSIZE; break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: { if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &issk->pinet6->saddr)) return -EMSGSIZE; if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr)) return -EMSGSIZE; break; } #endif default: WARN_ON_ONCE(1); return -EMSGSIZE; } if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) return -EMSGSIZE; if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport)) return -EMSGSIZE; sf = mptcp_subflow_ctx(ssk); if (WARN_ON_ONCE(!sf)) return -EINVAL; if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf))) return -EMSGSIZE; if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id)) return -EMSGSIZE; return 0; } static int mptcp_event_put_token_and_ssk(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { const struct sock *sk = (const struct sock *)msk; const struct mptcp_subflow_context *sf; u8 sk_err; if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) return -EMSGSIZE; if (mptcp_event_add_subflow(skb, ssk)) return -EMSGSIZE; sf = mptcp_subflow_ctx(ssk); if (WARN_ON_ONCE(!sf)) return -EINVAL; if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup)) return -EMSGSIZE; if (ssk->sk_bound_dev_if && nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if)) return -EMSGSIZE; sk_err = READ_ONCE(ssk->sk_err); if (sk_err && sk->sk_state == TCP_ESTABLISHED && nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err)) return -EMSGSIZE; return 0; } static int mptcp_event_sub_established(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { return mptcp_event_put_token_and_ssk(skb, msk, ssk); } static int mptcp_event_sub_closed(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { const struct mptcp_subflow_context *sf; if (mptcp_event_put_token_and_ssk(skb, msk, ssk)) return -EMSGSIZE; sf = mptcp_subflow_ctx(ssk); if (!sf->reset_seen) return 0; if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason)) return -EMSGSIZE; if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient)) return -EMSGSIZE; return 0; } static int mptcp_event_created(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)); u16 flags = 0; if (err) return err; if (READ_ONCE(msk->pm.server_side)) { flags |= MPTCP_PM_EV_FLAG_SERVER_SIDE; /* Deprecated, and only set when it is the server side */ if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, 1)) return -EMSGSIZE; } if (READ_ONCE(msk->pm.remote_deny_join_id0)) flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0; if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags)) return -EMSGSIZE; return mptcp_event_add_subflow(skb, ssk); } void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id) { struct net *net = sock_net((const struct sock *)msk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED); if (!nlh) goto nla_put_failure; if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) goto nla_put_failure; if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id)) goto nla_put_failure; genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); return; nla_put_failure: nlmsg_free(skb); } void mptcp_event_addr_announced(const struct sock *ssk, const struct mptcp_addr_info *info) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct mptcp_sock *msk = mptcp_sk(subflow->conn); struct net *net = sock_net(ssk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_ANNOUNCED); if (!nlh) goto nla_put_failure; if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) goto nla_put_failure; if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id)) goto nla_put_failure; if (nla_put_be16(skb, MPTCP_ATTR_DPORT, info->port == 0 ? inet_sk(ssk)->inet_dport : info->port)) goto nla_put_failure; switch (info->family) { case AF_INET: if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr)) goto nla_put_failure; break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6)) goto nla_put_failure; break; #endif default: WARN_ON_ONCE(1); goto nla_put_failure; } genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); return; nla_put_failure: nlmsg_free(skb); } void mptcp_event_pm_listener(const struct sock *ssk, enum mptcp_event_type event) { const struct inet_sock *issk = inet_sk(ssk); struct net *net = sock_net(ssk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, event); if (!nlh) goto nla_put_failure; if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) goto nla_put_failure; if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) goto nla_put_failure; switch (ssk->sk_family) { case AF_INET: if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) goto nla_put_failure; break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: { if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &issk->pinet6->saddr)) goto nla_put_failure; break; } #endif default: WARN_ON_ONCE(1); goto nla_put_failure; } genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, GFP_KERNEL); return; nla_put_failure: nlmsg_free(skb); } void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp) { struct net *net = sock_net((const struct sock *)msk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type); if (!nlh) goto nla_put_failure; switch (type) { case MPTCP_EVENT_UNSPEC: WARN_ON_ONCE(1); break; case MPTCP_EVENT_CREATED: case MPTCP_EVENT_ESTABLISHED: if (mptcp_event_created(skb, msk, ssk) < 0) goto nla_put_failure; break; case MPTCP_EVENT_CLOSED: if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0) goto nla_put_failure; break; case MPTCP_EVENT_ANNOUNCED: case MPTCP_EVENT_REMOVED: /* call mptcp_event_addr_announced()/removed instead */ WARN_ON_ONCE(1); break; case MPTCP_EVENT_SUB_ESTABLISHED: case MPTCP_EVENT_SUB_PRIORITY: if (mptcp_event_sub_established(skb, msk, ssk) < 0) goto nla_put_failure; break; case MPTCP_EVENT_SUB_CLOSED: if (mptcp_event_sub_closed(skb, msk, ssk) < 0) goto nla_put_failure; break; case MPTCP_EVENT_LISTENER_CREATED: case MPTCP_EVENT_LISTENER_CLOSED: break; } genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, gfp); return; nla_put_failure: nlmsg_free(skb); } struct genl_family mptcp_genl_family __ro_after_init = { .name = MPTCP_PM_NAME, .version = MPTCP_PM_VER, .netnsok = true, .module = THIS_MODULE, .ops = mptcp_pm_nl_ops, .n_ops = ARRAY_SIZE(mptcp_pm_nl_ops), .resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1, .mcgrps = mptcp_pm_mcgrps, .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps), }; void __init mptcp_pm_nl_init(void) { if (genl_register_family(&mptcp_genl_family)) panic("Failed to register MPTCP PM netlink family\n"); }
1 1 1 1 6 6 6 6 6 107 5 4 3 5 5 4 4 4 4 4 4 5 9 8 8 4 3 4 4 3 3 1 1 5 4 4 3 8 9 9 1 1 1 1 1 1 1 1 1 1 1 1 1 11 10 9 9 9 15 15 14 12 1 1 11 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 432 432 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 // SPDX-License-Identifier: GPL-2.0-or-later /* * ip6_flowlabel.c IPv6 flowlabel manager. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/pid_namespace.h> #include <linux/jump_label_ratelimit.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/rawv6.h> #include <net/transp_v6.h> #include <linux/uaccess.h> #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified in old IPv6 RFC. Well, it was reasonable value. */ #define FL_MAX_LINGER 150 /* Maximal linger timeout */ /* FL hash table */ #define FL_MAX_PER_SOCK 32 #define FL_MAX_SIZE 4096 #define FL_HASH_MASK 255 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK) static atomic_t fl_size = ATOMIC_INIT(0); static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1]; static void ip6_fl_gc(struct timer_list *unused); static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc); /* FL hash table lock: it protects only of GC */ static DEFINE_SPINLOCK(ip6_fl_lock); /* Big socket sock */ static DEFINE_SPINLOCK(ip6_sk_fl_lock); DEFINE_STATIC_KEY_DEFERRED_FALSE(ipv6_flowlabel_exclusive, HZ); EXPORT_SYMBOL(ipv6_flowlabel_exclusive); #define for_each_fl_rcu(hash, fl) \ for (fl = rcu_dereference(fl_ht[(hash)]); \ fl != NULL; \ fl = rcu_dereference(fl->next)) #define for_each_fl_continue_rcu(fl) \ for (fl = rcu_dereference(fl->next); \ fl != NULL; \ fl = rcu_dereference(fl->next)) #define for_each_sk_fl_rcu(sk, sfl) \ for (sfl = rcu_dereference(inet_sk(sk)->ipv6_fl_list); \ sfl != NULL; \ sfl = rcu_dereference(sfl->next)) static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label) { struct ip6_flowlabel *fl; for_each_fl_rcu(FL_HASH(label), fl) { if (fl->label == label && net_eq(fl->fl_net, net)) return fl; } return NULL; } static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label) { struct ip6_flowlabel *fl; rcu_read_lock(); fl = __fl_lookup(net, label); if (fl && !atomic_inc_not_zero(&fl->users)) fl = NULL; rcu_read_unlock(); return fl; } static bool fl_shared_exclusive(struct ip6_flowlabel *fl) { return fl->share == IPV6_FL_S_EXCL || fl->share == IPV6_FL_S_PROCESS || fl->share == IPV6_FL_S_USER; } static void fl_free_rcu(struct rcu_head *head) { struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu); if (fl->share == IPV6_FL_S_PROCESS) put_pid(fl->owner.pid); kfree(fl->opt); kfree(fl); } static void fl_free(struct ip6_flowlabel *fl) { if (!fl) return; if (fl_shared_exclusive(fl) || fl->opt) static_branch_slow_dec_deferred(&ipv6_flowlabel_exclusive); call_rcu(&fl->rcu, fl_free_rcu); } static void fl_release(struct ip6_flowlabel *fl) { spin_lock_bh(&ip6_fl_lock); fl->lastuse = jiffies; if (atomic_dec_and_test(&fl->users)) { unsigned long ttd = fl->lastuse + fl->linger; if (time_after(ttd, fl->expires)) fl->expires = ttd; ttd = fl->expires; if (fl->opt && fl->share == IPV6_FL_S_EXCL) { struct ipv6_txoptions *opt = fl->opt; fl->opt = NULL; kfree(opt); } if (!timer_pending(&ip6_fl_gc_timer) || time_after(ip6_fl_gc_timer.expires, ttd)) mod_timer(&ip6_fl_gc_timer, ttd); } spin_unlock_bh(&ip6_fl_lock); } static void ip6_fl_gc(struct timer_list *unused) { int i; unsigned long now = jiffies; unsigned long sched = 0; spin_lock(&ip6_fl_lock); for (i = 0; i <= FL_HASH_MASK; i++) { struct ip6_flowlabel *fl; struct ip6_flowlabel __rcu **flp; flp = &fl_ht[i]; while ((fl = rcu_dereference_protected(*flp, lockdep_is_held(&ip6_fl_lock))) != NULL) { if (atomic_read(&fl->users) == 0) { unsigned long ttd = fl->lastuse + fl->linger; if (time_after(ttd, fl->expires)) fl->expires = ttd; ttd = fl->expires; if (time_after_eq(now, ttd)) { *flp = fl->next; fl_free(fl); atomic_dec(&fl_size); continue; } if (!sched || time_before(ttd, sched)) sched = ttd; } flp = &fl->next; } } if (!sched && atomic_read(&fl_size)) sched = now + FL_MAX_LINGER; if (sched) { mod_timer(&ip6_fl_gc_timer, sched); } spin_unlock(&ip6_fl_lock); } static void __net_exit ip6_fl_purge(struct net *net) { int i; spin_lock_bh(&ip6_fl_lock); for (i = 0; i <= FL_HASH_MASK; i++) { struct ip6_flowlabel *fl; struct ip6_flowlabel __rcu **flp; flp = &fl_ht[i]; while ((fl = rcu_dereference_protected(*flp, lockdep_is_held(&ip6_fl_lock))) != NULL) { if (net_eq(fl->fl_net, net) && atomic_read(&fl->users) == 0) { *flp = fl->next; fl_free(fl); atomic_dec(&fl_size); continue; } flp = &fl->next; } } spin_unlock_bh(&ip6_fl_lock); } static struct ip6_flowlabel *fl_intern(struct net *net, struct ip6_flowlabel *fl, __be32 label) { struct ip6_flowlabel *lfl; fl->label = label & IPV6_FLOWLABEL_MASK; rcu_read_lock(); spin_lock_bh(&ip6_fl_lock); if (label == 0) { for (;;) { fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK; if (fl->label) { lfl = __fl_lookup(net, fl->label); if (!lfl) break; } } } else { /* * we dropper the ip6_fl_lock, so this entry could reappear * and we need to recheck with it. * * OTOH no need to search the active socket first, like it is * done in ipv6_flowlabel_opt - sock is locked, so new entry * with the same label can only appear on another sock */ lfl = __fl_lookup(net, fl->label); if (lfl) { atomic_inc(&lfl->users); spin_unlock_bh(&ip6_fl_lock); rcu_read_unlock(); return lfl; } } fl->lastuse = jiffies; fl->next = fl_ht[FL_HASH(fl->label)]; rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl); atomic_inc(&fl_size); spin_unlock_bh(&ip6_fl_lock); rcu_read_unlock(); return NULL; } /* Socket flowlabel lists */ struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label) { struct ipv6_fl_socklist *sfl; label &= IPV6_FLOWLABEL_MASK; rcu_read_lock(); for_each_sk_fl_rcu(sk, sfl) { struct ip6_flowlabel *fl = sfl->fl; if (fl->label == label && atomic_inc_not_zero(&fl->users)) { fl->lastuse = jiffies; rcu_read_unlock(); return fl; } } rcu_read_unlock(); return NULL; } EXPORT_SYMBOL_GPL(__fl6_sock_lookup); void fl6_free_socklist(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct ipv6_fl_socklist *sfl; if (!rcu_access_pointer(inet->ipv6_fl_list)) return; spin_lock_bh(&ip6_sk_fl_lock); while ((sfl = rcu_dereference_protected(inet->ipv6_fl_list, lockdep_is_held(&ip6_sk_fl_lock))) != NULL) { inet->ipv6_fl_list = sfl->next; spin_unlock_bh(&ip6_sk_fl_lock); fl_release(sfl->fl); kfree_rcu(sfl, rcu); spin_lock_bh(&ip6_sk_fl_lock); } spin_unlock_bh(&ip6_sk_fl_lock); } /* Service routines */ /* It is the only difficult place. flowlabel enforces equal headers before and including routing header, however user may supply options following rthdr. */ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, struct ipv6_txoptions *fopt) { struct ipv6_txoptions *fl_opt = fl->opt; if (!fopt || fopt->opt_flen == 0) return fl_opt; if (fl_opt) { opt_space->hopopt = fl_opt->hopopt; opt_space->dst0opt = fl_opt->dst0opt; opt_space->srcrt = fl_opt->srcrt; opt_space->opt_nflen = fl_opt->opt_nflen; } else { if (fopt->opt_nflen == 0) return fopt; opt_space->hopopt = NULL; opt_space->dst0opt = NULL; opt_space->srcrt = NULL; opt_space->opt_nflen = 0; } opt_space->dst1opt = fopt->dst1opt; opt_space->opt_flen = fopt->opt_flen; opt_space->tot_len = fopt->tot_len; return opt_space; } EXPORT_SYMBOL_GPL(fl6_merge_options); static unsigned long check_linger(unsigned long ttl) { if (ttl < FL_MIN_LINGER) return FL_MIN_LINGER*HZ; if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN)) return 0; return ttl*HZ; } static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires) { linger = check_linger(linger); if (!linger) return -EPERM; expires = check_linger(expires); if (!expires) return -EPERM; spin_lock_bh(&ip6_fl_lock); fl->lastuse = jiffies; if (time_before(fl->linger, linger)) fl->linger = linger; if (time_before(expires, fl->linger)) expires = fl->linger; if (time_before(fl->expires, fl->lastuse + expires)) fl->expires = fl->lastuse + expires; spin_unlock_bh(&ip6_fl_lock); return 0; } static struct ip6_flowlabel * fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, sockptr_t optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; int addr_type; int err; olen = optlen - CMSG_ALIGN(sizeof(*freq)); err = -EINVAL; if (olen > 64 * 1024) goto done; err = -ENOMEM; fl = kzalloc(sizeof(*fl), GFP_KERNEL); if (!fl) goto done; if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; struct ipcm6_cookie ipc6; err = -ENOMEM; fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); if (!fl->opt) goto done; memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; if (copy_from_sockptr_offset(fl->opt + 1, optval, CMSG_ALIGN(sizeof(*freq)), olen)) goto done; msg.msg_controllen = olen; msg.msg_control = (void *)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); ipc6.opt = fl->opt; err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6); if (err) goto done; err = -EINVAL; if (fl->opt->opt_flen) goto done; if (fl->opt->opt_nflen == 0) { kfree(fl->opt); fl->opt = NULL; } } fl->fl_net = net; fl->expires = jiffies; err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); if (err) goto done; fl->share = freq->flr_share; addr_type = ipv6_addr_type(&freq->flr_dst); if ((addr_type & IPV6_ADDR_MAPPED) || addr_type == IPV6_ADDR_ANY) { err = -EINVAL; goto done; } fl->dst = freq->flr_dst; atomic_set(&fl->users, 1); switch (fl->share) { case IPV6_FL_S_EXCL: case IPV6_FL_S_ANY: break; case IPV6_FL_S_PROCESS: fl->owner.pid = get_task_pid(current, PIDTYPE_PID); break; case IPV6_FL_S_USER: fl->owner.uid = current_euid(); break; default: err = -EINVAL; goto done; } if (fl_shared_exclusive(fl) || fl->opt) { WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1); static_branch_deferred_inc(&ipv6_flowlabel_exclusive); } return fl; done: if (fl) { kfree(fl->opt); kfree(fl); } *err_p = err; return NULL; } static int mem_check(struct sock *sk) { int room = FL_MAX_SIZE - atomic_read(&fl_size); struct ipv6_fl_socklist *sfl; int count = 0; if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) return 0; rcu_read_lock(); for_each_sk_fl_rcu(sk, sfl) count++; rcu_read_unlock(); if (room <= 0 || ((count >= FL_MAX_PER_SOCK || (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) && !capable(CAP_NET_ADMIN))) return -ENOBUFS; return 0; } static inline void fl_link(struct sock *sk, struct ipv6_fl_socklist *sfl, struct ip6_flowlabel *fl) { struct inet_sock *inet = inet_sk(sk); spin_lock_bh(&ip6_sk_fl_lock); sfl->fl = fl; sfl->next = inet->ipv6_fl_list; rcu_assign_pointer(inet->ipv6_fl_list, sfl); spin_unlock_bh(&ip6_sk_fl_lock); } int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_fl_socklist *sfl; if (flags & IPV6_FL_F_REMOTE) { freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK; return 0; } if (inet6_test_bit(REPFLOW, sk)) { freq->flr_label = np->flow_label; return 0; } rcu_read_lock(); for_each_sk_fl_rcu(sk, sfl) { if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) { spin_lock_bh(&ip6_fl_lock); freq->flr_label = sfl->fl->label; freq->flr_dst = sfl->fl->dst; freq->flr_share = sfl->fl->share; freq->flr_expires = (sfl->fl->expires - jiffies) / HZ; freq->flr_linger = sfl->fl->linger / HZ; spin_unlock_bh(&ip6_fl_lock); rcu_read_unlock(); return 0; } } rcu_read_unlock(); return -ENOENT; } #define socklist_dereference(__sflp) \ rcu_dereference_protected(__sflp, lockdep_is_held(&ip6_sk_fl_lock)) static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_fl_socklist __rcu **sflp; struct ipv6_fl_socklist *sfl; if (freq->flr_flags & IPV6_FL_F_REFLECT) { if (sk->sk_protocol != IPPROTO_TCP) return -ENOPROTOOPT; if (!inet6_test_bit(REPFLOW, sk)) return -ESRCH; np->flow_label = 0; inet6_clear_bit(REPFLOW, sk); return 0; } spin_lock_bh(&ip6_sk_fl_lock); for (sflp = &inet_sk(sk)->ipv6_fl_list; (sfl = socklist_dereference(*sflp)) != NULL; sflp = &sfl->next) { if (sfl->fl->label == freq->flr_label) goto found; } spin_unlock_bh(&ip6_sk_fl_lock); return -ESRCH; found: if (freq->flr_label == (np->flow_label & IPV6_FLOWLABEL_MASK)) np->flow_label &= ~IPV6_FLOWLABEL_MASK; *sflp = sfl->next; spin_unlock_bh(&ip6_sk_fl_lock); fl_release(sfl->fl); kfree_rcu(sfl, rcu); return 0; } static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq) { struct net *net = sock_net(sk); struct ipv6_fl_socklist *sfl; int err; rcu_read_lock(); for_each_sk_fl_rcu(sk, sfl) { if (sfl->fl->label == freq->flr_label) { err = fl6_renew(sfl->fl, freq->flr_linger, freq->flr_expires); rcu_read_unlock(); return err; } } rcu_read_unlock(); if (freq->flr_share == IPV6_FL_S_NONE && ns_capable(net->user_ns, CAP_NET_ADMIN)) { struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label); if (fl) { err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); fl_release(fl); return err; } } return -ESRCH; } static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, sockptr_t optval, int optlen) { struct ipv6_fl_socklist *sfl, *sfl1 = NULL; struct ip6_flowlabel *fl, *fl1 = NULL; struct net *net = sock_net(sk); int err; if (freq->flr_flags & IPV6_FL_F_REFLECT) { if (net->ipv6.sysctl.flowlabel_consistency) { net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n"); return -EPERM; } if (sk->sk_protocol != IPPROTO_TCP) return -ENOPROTOOPT; inet6_set_bit(REPFLOW, sk); return 0; } if (freq->flr_label & ~IPV6_FLOWLABEL_MASK) return -EINVAL; if (net->ipv6.sysctl.flowlabel_state_ranges && (freq->flr_label & IPV6_FLOWLABEL_STATELESS_FLAG)) return -ERANGE; fl = fl_create(net, sk, freq, optval, optlen, &err); if (!fl) return err; sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); if (freq->flr_label) { err = -EEXIST; rcu_read_lock(); for_each_sk_fl_rcu(sk, sfl) { if (sfl->fl->label == freq->flr_label) { if (freq->flr_flags & IPV6_FL_F_EXCL) { rcu_read_unlock(); goto done; } fl1 = sfl->fl; if (!atomic_inc_not_zero(&fl1->users)) fl1 = NULL; break; } } rcu_read_unlock(); if (!fl1) fl1 = fl_lookup(net, freq->flr_label); if (fl1) { recheck: err = -EEXIST; if (freq->flr_flags&IPV6_FL_F_EXCL) goto release; err = -EPERM; if (fl1->share == IPV6_FL_S_EXCL || fl1->share != fl->share || ((fl1->share == IPV6_FL_S_PROCESS) && (fl1->owner.pid != fl->owner.pid)) || ((fl1->share == IPV6_FL_S_USER) && !uid_eq(fl1->owner.uid, fl->owner.uid))) goto release; err = -ENOMEM; if (!sfl1) goto release; if (fl->linger > fl1->linger) fl1->linger = fl->linger; if ((long)(fl->expires - fl1->expires) > 0) fl1->expires = fl->expires; fl_link(sk, sfl1, fl1); fl_free(fl); return 0; release: fl_release(fl1); goto done; } } err = -ENOENT; if (!(freq->flr_flags & IPV6_FL_F_CREATE)) goto done; err = -ENOMEM; if (!sfl1) goto done; err = mem_check(sk); if (err != 0) goto done; fl1 = fl_intern(net, fl, freq->flr_label); if (fl1) goto recheck; if (!freq->flr_label) { size_t offset = offsetof(struct in6_flowlabel_req, flr_label); if (copy_to_sockptr_offset(optval, offset, &fl->label, sizeof(fl->label))) { /* Intentionally ignore fault. */ } } fl_link(sk, sfl1, fl); return 0; done: fl_free(fl); kfree(sfl1); return err; } int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen) { struct in6_flowlabel_req freq; if (optlen < sizeof(freq)) return -EINVAL; if (copy_from_sockptr(&freq, optval, sizeof(freq))) return -EFAULT; switch (freq.flr_action) { case IPV6_FL_A_PUT: return ipv6_flowlabel_put(sk, &freq); case IPV6_FL_A_RENEW: return ipv6_flowlabel_renew(sk, &freq); case IPV6_FL_A_GET: return ipv6_flowlabel_get(sk, &freq, optval, optlen); default: return -EINVAL; } } #ifdef CONFIG_PROC_FS struct ip6fl_iter_state { struct seq_net_private p; struct pid_namespace *pid_ns; int bucket; }; #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private) static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq) { struct ip6_flowlabel *fl = NULL; struct ip6fl_iter_state *state = ip6fl_seq_private(seq); struct net *net = seq_file_net(seq); for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { for_each_fl_rcu(state->bucket, fl) { if (net_eq(fl->fl_net, net)) goto out; } } fl = NULL; out: return fl; } static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); struct net *net = seq_file_net(seq); for_each_fl_continue_rcu(fl) { if (net_eq(fl->fl_net, net)) goto out; } try_again: if (++state->bucket <= FL_HASH_MASK) { for_each_fl_rcu(state->bucket, fl) { if (net_eq(fl->fl_net, net)) goto out; } goto try_again; } fl = NULL; out: return fl; } static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos) { struct ip6_flowlabel *fl = ip6fl_get_first(seq); if (fl) while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL) --pos; return pos ? NULL : fl; } static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); state->pid_ns = proc_pid_ns(file_inode(seq->file)->i_sb); rcu_read_lock(); return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip6_flowlabel *fl; if (v == SEQ_START_TOKEN) fl = ip6fl_get_first(seq); else fl = ip6fl_get_next(seq, v); ++*pos; return fl; } static void ip6fl_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int ip6fl_seq_show(struct seq_file *seq, void *v) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); if (v == SEQ_START_TOKEN) { seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n"); } else { struct ip6_flowlabel *fl = v; seq_printf(seq, "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", (unsigned int)ntohl(fl->label), fl->share, ((fl->share == IPV6_FL_S_PROCESS) ? pid_nr_ns(fl->owner.pid, state->pid_ns) : ((fl->share == IPV6_FL_S_USER) ? from_kuid_munged(seq_user_ns(seq), fl->owner.uid) : 0)), atomic_read(&fl->users), fl->linger/HZ, (long)(fl->expires - jiffies)/HZ, &fl->dst, fl->opt ? fl->opt->opt_nflen : 0); } return 0; } static const struct seq_operations ip6fl_seq_ops = { .start = ip6fl_seq_start, .next = ip6fl_seq_next, .stop = ip6fl_seq_stop, .show = ip6fl_seq_show, }; static int __net_init ip6_flowlabel_proc_init(struct net *net) { if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net, &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state))) return -ENOMEM; return 0; } static void __net_exit ip6_flowlabel_proc_fini(struct net *net) { remove_proc_entry("ip6_flowlabel", net->proc_net); } #else static inline int ip6_flowlabel_proc_init(struct net *net) { return 0; } static inline void ip6_flowlabel_proc_fini(struct net *net) { } #endif static void __net_exit ip6_flowlabel_net_exit(struct net *net) { ip6_fl_purge(net); ip6_flowlabel_proc_fini(net); } static struct pernet_operations ip6_flowlabel_net_ops = { .init = ip6_flowlabel_proc_init, .exit = ip6_flowlabel_net_exit, }; int ip6_flowlabel_init(void) { return register_pernet_subsys(&ip6_flowlabel_net_ops); } void ip6_flowlabel_cleanup(void) { static_key_deferred_flush(&ipv6_flowlabel_exclusive); timer_delete(&ip6_fl_gc_timer); unregister_pernet_subsys(&ip6_flowlabel_net_ops); }
2 4 2736 4 4 4 4 4 4 4 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 // SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * * NOHZ implementation for low and high resolution timers * * Started by: Thomas Gleixner and Ingo Molnar */ #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/percpu.h> #include <linux/nmi.h> #include <linux/profile.h> #include <linux/sched/signal.h> #include <linux/sched/clock.h> #include <linux/sched/stat.h> #include <linux/sched/nohz.h> #include <linux/sched/loadavg.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> #include <linux/context_tracking.h> #include <linux/mm.h> #include <asm/irq_regs.h> #include "tick-internal.h" #include <trace/events/timer.h> /* * Per-CPU nohz control structure */ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); struct tick_sched *tick_get_tick_sched(int cpu) { return &per_cpu(tick_cpu_sched, cpu); } /* * The time when the last jiffy update happened. Write access must hold * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a * consistent view of jiffies and last_jiffies_update. */ static ktime_t last_jiffies_update; /* * Must be called with interrupts disabled ! */ static void tick_do_update_jiffies64(ktime_t now) { unsigned long ticks = 1; ktime_t delta, nextp; /* * 64-bit can do a quick check without holding the jiffies lock and * without looking at the sequence count. The smp_load_acquire() * pairs with the update done later in this function. * * 32-bit cannot do that because the store of 'tick_next_period' * consists of two 32-bit stores, and the first store could be * moved by the CPU to a random point in the future. */ if (IS_ENABLED(CONFIG_64BIT)) { if (ktime_before(now, smp_load_acquire(&tick_next_period))) return; } else { unsigned int seq; /* * Avoid contention on 'jiffies_lock' and protect the quick * check with the sequence count. */ do { seq = read_seqcount_begin(&jiffies_seq); nextp = tick_next_period; } while (read_seqcount_retry(&jiffies_seq, seq)); if (ktime_before(now, nextp)) return; } /* Quick check failed, i.e. update is required. */ raw_spin_lock(&jiffies_lock); /* * Re-evaluate with the lock held. Another CPU might have done the * update already. */ if (ktime_before(now, tick_next_period)) { raw_spin_unlock(&jiffies_lock); return; } write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, tick_next_period); if (unlikely(delta >= TICK_NSEC)) { /* Slow path for long idle sleep times */ s64 incr = TICK_NSEC; ticks += ktime_divns(delta, incr); last_jiffies_update = ktime_add_ns(last_jiffies_update, incr * ticks); } else { last_jiffies_update = ktime_add_ns(last_jiffies_update, TICK_NSEC); } /* Advance jiffies to complete the 'jiffies_seq' protected job */ jiffies_64 += ticks; /* Keep the tick_next_period variable up to date */ nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); if (IS_ENABLED(CONFIG_64BIT)) { /* * Pairs with smp_load_acquire() in the lockless quick * check above, and ensures that the update to 'jiffies_64' is * not reordered vs. the store to 'tick_next_period', neither * by the compiler nor by the CPU. */ smp_store_release(&tick_next_period, nextp); } else { /* * A plain store is good enough on 32-bit, as the quick check * above is protected by the sequence count. */ tick_next_period = nextp; } /* * Release the sequence count. calc_global_load() below is not * protected by it, but 'jiffies_lock' needs to be held to prevent * concurrent invocations. */ write_seqcount_end(&jiffies_seq); calc_global_load(); raw_spin_unlock(&jiffies_lock); update_wall_time(); } /* * Initialize and return retrieve the jiffies update. */ static ktime_t tick_init_jiffy_update(void) { ktime_t period; raw_spin_lock(&jiffies_lock); write_seqcount_begin(&jiffies_seq); /* Have we started the jiffies update yet ? */ if (last_jiffies_update == 0) { u32 rem; /* * Ensure that the tick is aligned to a multiple of * TICK_NSEC. */ div_u64_rem(tick_next_period, TICK_NSEC, &rem); if (rem) tick_next_period += TICK_NSEC - rem; last_jiffies_update = tick_next_period; } period = last_jiffies_update; write_seqcount_end(&jiffies_seq); raw_spin_unlock(&jiffies_lock); return period; } static inline int tick_sched_flag_test(struct tick_sched *ts, unsigned long flag) { return !!(ts->flags & flag); } static inline void tick_sched_flag_set(struct tick_sched *ts, unsigned long flag) { lockdep_assert_irqs_disabled(); ts->flags |= flag; } static inline void tick_sched_flag_clear(struct tick_sched *ts, unsigned long flag) { lockdep_assert_irqs_disabled(); ts->flags &= ~flag; } /* * Allow only one non-timekeeper CPU at a time update jiffies from * the timer tick. * * Returns true if update was run. */ static bool tick_limited_update_jiffies64(struct tick_sched *ts, ktime_t now) { static atomic_t in_progress; int inp; inp = atomic_read(&in_progress); if (inp || !atomic_try_cmpxchg(&in_progress, &inp, 1)) return false; if (ts->last_tick_jiffies == jiffies) tick_do_update_jiffies64(now); atomic_set(&in_progress, 0); return true; } #define MAX_STALLED_JIFFIES 5 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) { int tick_cpu, cpu = smp_processor_id(); /* * Check if the do_timer duty was dropped. We don't care about * concurrency: This happens only when the CPU in charge went * into a long sleep. If two CPUs happen to assign themselves to * this duty, then the jiffies update is still serialized by * 'jiffies_lock'. * * If nohz_full is enabled, this should not happen because the * 'tick_do_timer_cpu' CPU never relinquishes. */ tick_cpu = READ_ONCE(tick_do_timer_cpu); if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) { #ifdef CONFIG_NO_HZ_FULL WARN_ON_ONCE(tick_nohz_full_running); #endif WRITE_ONCE(tick_do_timer_cpu, cpu); tick_cpu = cpu; } /* Check if jiffies need an update */ if (tick_cpu == cpu) tick_do_update_jiffies64(now); /* * If the jiffies update stalled for too long (timekeeper in stop_machine() * or VMEXIT'ed for several msecs), force an update. */ if (ts->last_tick_jiffies != jiffies) { ts->stalled_jiffies = 0; ts->last_tick_jiffies = READ_ONCE(jiffies); } else { if (++ts->stalled_jiffies >= MAX_STALLED_JIFFIES) { if (tick_limited_update_jiffies64(ts, now)) { ts->stalled_jiffies = 0; ts->last_tick_jiffies = READ_ONCE(jiffies); } } } if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) ts->got_idle_tick = 1; } static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) { /* * When we are idle and the tick is stopped, we have to touch * the watchdog as we might not schedule for a really long * time. This happens on completely idle SMP systems while * waiting on the login prompt. We also increment the "start of * idle" jiffy stamp so the idle accounting adjustment we do * when we go busy again does not account too many ticks. */ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { touch_softlockup_watchdog_sched(); if (is_idle_task(current)) ts->idle_jiffies++; /* * In case the current tick fired too early past its expected * expiration, make sure we don't bypass the next clock reprogramming * to the same deadline. */ ts->next_tick = 0; } update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); } /* * We rearm the timer until we get disabled by the idle code. * Called with interrupts disabled. */ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); tick_sched_do_timer(ts, now); /* * Do not call when we are not in IRQ context and have * no valid 'regs' pointer */ if (regs) tick_sched_handle(ts, regs); else ts->next_tick = 0; /* * In dynticks mode, tick reprogram is deferred: * - to the idle task if in dynticks-idle * - to IRQ exit if in full-dynticks. */ if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED))) return HRTIMER_NORESTART; hrtimer_forward(timer, now, TICK_NSEC); return HRTIMER_RESTART; } #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; EXPORT_SYMBOL_GPL(tick_nohz_full_mask); bool tick_nohz_full_running; EXPORT_SYMBOL_GPL(tick_nohz_full_running); static atomic_t tick_dep_mask; static bool check_tick_dependency(atomic_t *dep) { int val = atomic_read(dep); if (val & TICK_DEP_MASK_POSIX_TIMER) { trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); return true; } if (val & TICK_DEP_MASK_PERF_EVENTS) { trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); return true; } if (val & TICK_DEP_MASK_SCHED) { trace_tick_stop(0, TICK_DEP_MASK_SCHED); return true; } if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); return true; } if (val & TICK_DEP_MASK_RCU) { trace_tick_stop(0, TICK_DEP_MASK_RCU); return true; } if (val & TICK_DEP_MASK_RCU_EXP) { trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP); return true; } return false; } static bool can_stop_full_tick(int cpu, struct tick_sched *ts) { lockdep_assert_irqs_disabled(); if (unlikely(!cpu_online(cpu))) return false; if (check_tick_dependency(&tick_dep_mask)) return false; if (check_tick_dependency(&ts->tick_dep_mask)) return false; if (check_tick_dependency(&current->tick_dep_mask)) return false; if (check_tick_dependency(&current->signal->tick_dep_mask)) return false; return true; } static void nohz_full_kick_func(struct irq_work *work) { /* Empty, the tick restart happens on tick_nohz_irq_exit() */ } static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = IRQ_WORK_INIT_HARD(nohz_full_kick_func); /* * Kick this CPU if it's full dynticks in order to force it to * re-evaluate its dependency on the tick and restart it if necessary. * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), * is NMI safe. */ static void tick_nohz_full_kick(void) { if (!tick_nohz_full_cpu(smp_processor_id())) return; irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); } /* * Kick the CPU if it's full dynticks in order to force it to * re-evaluate its dependency on the tick and restart it if necessary. */ void tick_nohz_full_kick_cpu(int cpu) { if (!tick_nohz_full_cpu(cpu)) return; irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); } static void tick_nohz_kick_task(struct task_struct *tsk) { int cpu; /* * If the task is not running, run_posix_cpu_timers() * has nothing to elapse, and an IPI can then be optimized out. * * activate_task() STORE p->tick_dep_mask * STORE p->on_rq * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) * LOCK rq->lock LOAD p->on_rq * smp_mb__after_spin_lock() * tick_nohz_task_switch() * LOAD p->tick_dep_mask * * XXX given a task picks up the dependency on schedule(), should we * only care about tasks that are currently on the CPU instead of all * that are on the runqueue? * * That is, does this want to be: task_on_cpu() / task_curr()? */ if (!sched_task_on_rq(tsk)) return; /* * If the task concurrently migrates to another CPU, * we guarantee it sees the new tick dependency upon * schedule. * * set_task_cpu(p, cpu); * STORE p->cpu = @cpu * __schedule() (switch to task 'p') * LOCK rq->lock * smp_mb__after_spin_lock() STORE p->tick_dep_mask * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) * LOAD p->tick_dep_mask LOAD p->cpu */ cpu = task_cpu(tsk); preempt_disable(); if (cpu_online(cpu)) tick_nohz_full_kick_cpu(cpu); preempt_enable(); } /* * Kick all full dynticks CPUs in order to force these to re-evaluate * their dependency on the tick and restart it if necessary. */ static void tick_nohz_full_kick_all(void) { int cpu; if (!tick_nohz_full_running) return; preempt_disable(); for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) tick_nohz_full_kick_cpu(cpu); preempt_enable(); } static void tick_nohz_dep_set_all(atomic_t *dep, enum tick_dep_bits bit) { int prev; prev = atomic_fetch_or(BIT(bit), dep); if (!prev) tick_nohz_full_kick_all(); } /* * Set a global tick dependency. Used by perf events that rely on freq and * unstable clocks. */ void tick_nohz_dep_set(enum tick_dep_bits bit) { tick_nohz_dep_set_all(&tick_dep_mask, bit); } void tick_nohz_dep_clear(enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &tick_dep_mask); } /* * Set per-CPU tick dependency. Used by scheduler and perf events in order to * manage event-throttling. */ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { int prev; struct tick_sched *ts; ts = per_cpu_ptr(&tick_cpu_sched, cpu); prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); if (!prev) { preempt_disable(); /* Perf needs local kick that is NMI safe */ if (cpu == smp_processor_id()) { tick_nohz_full_kick(); } else { /* Remote IRQ work not NMI-safe */ if (!WARN_ON_ONCE(in_nmi())) tick_nohz_full_kick_cpu(cpu); } preempt_enable(); } } EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); atomic_andnot(BIT(bit), &ts->tick_dep_mask); } EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); /* * Set a per-task tick dependency. RCU needs this. Also posix CPU timers * in order to elapse per task timers. */ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) tick_nohz_kick_task(tsk); } EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &tsk->tick_dep_mask); } EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); /* * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse * per process timers. */ void tick_nohz_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit) { int prev; struct signal_struct *sig = tsk->signal; prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); if (!prev) { struct task_struct *t; lockdep_assert_held(&tsk->sighand->siglock); __for_each_thread(sig, t) tick_nohz_kick_task(t); } } void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) { atomic_andnot(BIT(bit), &sig->tick_dep_mask); } /* * Re-evaluate the need for the tick as we switch the current task. * It might need the tick due to per task/process properties: * perf events, posix CPU timers, ... */ void __tick_nohz_task_switch(void) { struct tick_sched *ts; if (!tick_nohz_full_cpu(smp_processor_id())) return; ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { if (atomic_read(&current->tick_dep_mask) || atomic_read(&current->signal->tick_dep_mask)) tick_nohz_full_kick(); } } /* Get the boot-time nohz CPU list from the kernel parameters. */ void __init tick_nohz_full_setup(cpumask_var_t cpumask) { alloc_bootmem_cpumask_var(&tick_nohz_full_mask); cpumask_copy(tick_nohz_full_mask, cpumask); tick_nohz_full_running = true; } bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { /* * The 'tick_do_timer_cpu' CPU handles housekeeping duty (unbound * timers, workqueues, timekeeping, ...) on behalf of full dynticks * CPUs. It must remain online when nohz full is enabled. */ if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu) return false; return true; } static int tick_nohz_cpu_down(unsigned int cpu) { return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; } void __init tick_nohz_init(void) { int cpu, ret; if (!tick_nohz_full_running) return; /* * Full dynticks uses IRQ work to drive the tick rescheduling on safe * locking contexts. But then we need IRQ work to raise its own * interrupts to avoid circular dependency on the tick. */ if (!arch_irq_work_has_interrupt()) { pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n"); cpumask_clear(tick_nohz_full_mask); tick_nohz_full_running = false; return; } if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { pr_warn("NO_HZ: Clearing %d from nohz_full range " "for timekeeping\n", cpu); cpumask_clear_cpu(cpu, tick_nohz_full_mask); } } for_each_cpu(cpu, tick_nohz_full_mask) ct_cpu_track_user(cpu); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "kernel/nohz:predown", NULL, tick_nohz_cpu_down); WARN_ON(ret < 0); pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", cpumask_pr_args(tick_nohz_full_mask)); } #endif /* #ifdef CONFIG_NO_HZ_FULL */ /* * NOHZ - aka dynamic tick functionality */ #ifdef CONFIG_NO_HZ_COMMON /* * NO HZ enabled ? */ bool tick_nohz_enabled __read_mostly = true; unsigned long tick_nohz_active __read_mostly; /* * Enable / Disable tickless mode */ static int __init setup_tick_nohz(char *str) { return (kstrtobool(str, &tick_nohz_enabled) == 0); } __setup("nohz=", setup_tick_nohz); bool tick_nohz_tick_stopped(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } bool tick_nohz_tick_stopped_cpu(int cpu) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } /** * tick_nohz_update_jiffies - update jiffies when idle was interrupted * @now: current ktime_t * * Called from interrupt entry when the CPU was idle * * In case the sched_tick was stopped on this CPU, we have to check if jiffies * must be updated. Otherwise an interrupt handler could use a stale jiffy * value. We do this unconditionally on any CPU, as we don't know whether the * CPU, which has the update task assigned, is in a long sleep. */ static void tick_nohz_update_jiffies(ktime_t now) { unsigned long flags; __this_cpu_write(tick_cpu_sched.idle_waketime, now); local_irq_save(flags); tick_do_update_jiffies64(now); local_irq_restore(flags); touch_softlockup_watchdog_sched(); } static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) { ktime_t delta; if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))) return; delta = ktime_sub(now, ts->idle_entrytime); write_seqcount_begin(&ts->idle_sleeptime_seq); if (nr_iowait_cpu(smp_processor_id()) > 0) ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); else ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); ts->idle_entrytime = now; tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_wakeup_event(); } static void tick_nohz_start_idle(struct tick_sched *ts) { write_seqcount_begin(&ts->idle_sleeptime_seq); ts->idle_entrytime = ktime_get(); tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_sleep_event(); } static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, bool compute_delta, u64 *last_update_time) { ktime_t now, idle; unsigned int seq; if (!tick_nohz_active) return -1; now = ktime_get(); if (last_update_time) *last_update_time = ktime_to_us(now); do { seq = read_seqcount_begin(&ts->idle_sleeptime_seq); if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) { ktime_t delta = ktime_sub(now, ts->idle_entrytime); idle = ktime_add(*sleeptime, delta); } else { idle = *sleeptime; } } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); return ktime_to_us(idle); } /** * get_cpu_idle_time_us - get the total idle time of a CPU * @cpu: CPU number to query * @last_update_time: variable to store update time in. Do not update * counters if NULL. * * Return the cumulative idle time (since boot) for a given * CPU, in microseconds. Note that this is partially broken due to * the counter of iowait tasks that can be remotely updated without * any synchronization. Therefore it is possible to observe backward * values within two consecutive reads. * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu */ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, !nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); /** * get_cpu_iowait_time_us - get the total iowait time of a CPU * @cpu: CPU number to query * @last_update_time: variable to store update time in. Do not update * counters if NULL. * * Return the cumulative iowait time (since boot) for a given * CPU, in microseconds. Note this is partially broken due to * the counter of iowait tasks that can be remotely updated without * any synchronization. Therefore it is possible to observe backward * values within two consecutive reads. * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu */ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) { hrtimer_cancel(&ts->sched_timer); hrtimer_set_expires(&ts->sched_timer, ts->last_tick); /* Forward the time to expire in the future */ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); } else { tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); } /* * Reset to make sure the next tick stop doesn't get fooled by past * cached clock deadline. */ ts->next_tick = 0; } static inline bool local_timer_softirq_pending(void) { return local_timers_pending() & BIT(TIMER_SOFTIRQ); } /* * Read jiffies and the time when jiffies were updated last */ u64 get_jiffies_update(unsigned long *basej) { unsigned long basejiff; unsigned int seq; u64 basemono; do { seq = read_seqcount_begin(&jiffies_seq); basemono = last_jiffies_update; basejiff = jiffies; } while (read_seqcount_retry(&jiffies_seq, seq)); *basej = basejiff; return basemono; } /** * tick_nohz_next_event() - return the clock monotonic based next event * @ts: pointer to tick_sched struct * @cpu: CPU number * * Return: * *%0 - When the next event is a maximum of TICK_NSEC in the future * and the tick is not stopped yet * *%next_event - Next event based on clock monotonic */ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { u64 basemono, next_tick, delta, expires; unsigned long basejiff; int tick_cpu; basemono = get_jiffies_update(&basejiff); ts->last_jiffies = basejiff; ts->timer_expires_base = basemono; /* * Keep the periodic tick, when RCU, architecture or irq_work * requests it. * Aside of that, check whether the local timer softirq is * pending. If so, its a bad idea to call get_next_timer_interrupt(), * because there is an already expired timer, so it will request * immediate expiry, which rearms the hardware timer with a * minimal delta, which brings us back to this place * immediately. Lather, rinse and repeat... */ if (rcu_needs_cpu() || arch_needs_cpu() || irq_work_needs_cpu() || local_timer_softirq_pending()) { next_tick = basemono + TICK_NSEC; } else { /* * Get the next pending timer. If high resolution * timers are enabled this only takes the timer wheel * timers into account. If high resolution timers are * disabled this also looks at the next expiring * hrtimer. */ next_tick = get_next_timer_interrupt(basejiff, basemono); ts->next_timer = next_tick; } /* Make sure next_tick is never before basemono! */ if (WARN_ON_ONCE(basemono > next_tick)) next_tick = basemono; /* * If the tick is due in the next period, keep it ticking or * force prod the timer. */ delta = next_tick - basemono; if (delta <= (u64)TICK_NSEC) { /* * We've not stopped the tick yet, and there's a timer in the * next period, so no point in stopping it either, bail. */ if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->timer_expires = 0; goto out; } } /* * If this CPU is the one which had the do_timer() duty last, we limit * the sleep time to the timekeeping 'max_deferment' value. * Otherwise we can sleep as long as we want. */ delta = timekeeping_max_deferment(); tick_cpu = READ_ONCE(tick_do_timer_cpu); if (tick_cpu != cpu && (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) delta = KTIME_MAX; /* Calculate the next expiry time */ if (delta < (KTIME_MAX - basemono)) expires = basemono + delta; else expires = KTIME_MAX; ts->timer_expires = min_t(u64, expires, next_tick); out: return ts->timer_expires; } static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); unsigned long basejiff = ts->last_jiffies; u64 basemono = ts->timer_expires_base; bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED); int tick_cpu; u64 expires; /* Make sure we won't be trying to stop it twice in a row. */ ts->timer_expires_base = 0; /* * Now the tick should be stopped definitely - so the timer base needs * to be marked idle as well to not miss a newly queued timer. */ expires = timer_base_try_to_set_idle(basejiff, basemono, &timer_idle); if (expires > ts->timer_expires) { /* * This path could only happen when the first timer was removed * between calculating the possible sleep length and now (when * high resolution mode is not active, timer could also be a * hrtimer). * * We have to stick to the original calculated expiry value to * not stop the tick for too long with a shallow C-state (which * was programmed by cpuidle because of an early next expiration * value). */ expires = ts->timer_expires; } /* If the timer base is not idle, retain the not yet stopped tick. */ if (!timer_idle) return; /* * If this CPU is the one which updates jiffies, then give up * the assignment and let it be taken by the CPU which runs * the tick timer next, which might be this CPU as well. If we * don't drop this here, the jiffies might be stale and * do_timer() never gets invoked. Keep track of the fact that it * was the one which had the do_timer() duty last. */ tick_cpu = READ_ONCE(tick_do_timer_cpu); if (tick_cpu == cpu) { WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE); tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST); } else if (tick_cpu != TICK_DO_TIMER_NONE) { tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST); } /* Skip reprogram of event if it's not changed */ if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { /* Sanity check: make sure clockevent is actually programmed */ if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) return; WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu " "timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick, dev->next_event, hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); } /* * tick_nohz_stop_tick() can be called several times before * tick_nohz_restart_sched_tick() is called. This happens when * interrupts arrive which do not cause a reschedule. In the first * call we save the current tick time, so we can restart the * scheduler tick in tick_nohz_restart_sched_tick(). */ if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { calc_load_nohz_start(); quiet_vmstat(); ts->last_tick = hrtimer_get_expires(&ts->sched_timer); tick_sched_flag_set(ts, TS_FLAG_STOPPED); trace_tick_stop(1, TICK_DEP_MASK_NONE); } ts->next_tick = expires; /* * If the expiration time == KTIME_MAX, then we simply stop * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) hrtimer_cancel(&ts->sched_timer); else tick_program_event(KTIME_MAX, 1); return; } if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED_HARD); } else { hrtimer_set_expires(&ts->sched_timer, expires); tick_program_event(expires, 1); } } static void tick_nohz_retain_tick(struct tick_sched *ts) { ts->timer_expires_base = 0; } #ifdef CONFIG_NO_HZ_FULL static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu) { if (tick_nohz_next_event(ts, cpu)) tick_nohz_stop_tick(ts, cpu); else tick_nohz_retain_tick(ts); } #endif /* CONFIG_NO_HZ_FULL */ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) { /* Update jiffies first */ tick_do_update_jiffies64(now); /* * Clear the timer idle flag, so we avoid IPIs on remote queueing and * the clock forward checks in the enqueue path: */ timer_clear_idle(); calc_load_nohz_stop(); touch_softlockup_watchdog_sched(); /* Cancel the scheduled timer and restore the tick: */ tick_sched_flag_clear(ts, TS_FLAG_STOPPED); tick_nohz_restart(ts, now); } static void __tick_nohz_full_update_tick(struct tick_sched *ts, ktime_t now) { #ifdef CONFIG_NO_HZ_FULL int cpu = smp_processor_id(); if (can_stop_full_tick(cpu, ts)) tick_nohz_full_stop_tick(ts, cpu); else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) tick_nohz_restart_sched_tick(ts, now); #endif } static void tick_nohz_full_update_tick(struct tick_sched *ts) { if (!tick_nohz_full_cpu(smp_processor_id())) return; if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ)) return; __tick_nohz_full_update_tick(ts, ktime_get()); } /* * A pending softirq outside an IRQ (or softirq disabled section) context * should be waiting for ksoftirqd to handle it. Therefore we shouldn't * reach this code due to the need_resched() early check in can_stop_idle_tick(). * * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, * triggering the code below, since wakep_softirqd() is ignored. * */ static bool report_idle_softirq(void) { static int ratelimit; unsigned int pending = local_softirq_pending(); if (likely(!pending)) return false; /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ if (!cpu_active(smp_processor_id())) { pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; if (!pending) return false; } /* On RT, softirq handling may be waiting on some lock */ if (local_bh_blocked()) return false; if (ratelimit < 10) { pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", pending); ratelimit++; } return true; } static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) { WARN_ON_ONCE(cpu_is_offline(cpu)); if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ))) return false; if (need_resched()) return false; if (unlikely(report_idle_softirq())) return false; if (tick_nohz_full_enabled()) { int tick_cpu = READ_ONCE(tick_do_timer_cpu); /* * Keep the tick alive to guarantee timekeeping progression * if there are full dynticks CPUs around */ if (tick_cpu == cpu) return false; /* Should not happen for nohz-full */ if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE)) return false; } return true; } /** * tick_nohz_idle_stop_tick - stop the idle tick from the idle task * * When the next event is more than a tick into the future, stop the idle tick */ void tick_nohz_idle_stop_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); int cpu = smp_processor_id(); ktime_t expires; /* * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the * tick timer expiration time is known already. */ if (ts->timer_expires_base) expires = ts->timer_expires; else if (can_stop_idle_tick(cpu, ts)) expires = tick_nohz_next_event(ts, cpu); else return; ts->idle_calls++; if (expires > 0LL) { int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); tick_nohz_stop_tick(ts, cpu); ts->idle_sleeps++; ts->idle_expires = expires; if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->idle_jiffies = ts->last_jiffies; nohz_balance_enter_idle(cpu); } } else { tick_nohz_retain_tick(ts); } } void tick_nohz_idle_retain_tick(void) { tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); } /** * tick_nohz_idle_enter - prepare for entering idle on the current CPU * * Called when we start the idle loop. */ void tick_nohz_idle_enter(void) { struct tick_sched *ts; lockdep_assert_irqs_enabled(); local_irq_disable(); ts = this_cpu_ptr(&tick_cpu_sched); WARN_ON_ONCE(ts->timer_expires_base); tick_sched_flag_set(ts, TS_FLAG_INIDLE); tick_nohz_start_idle(ts); local_irq_enable(); } /** * tick_nohz_irq_exit - Notify the tick about IRQ exit * * A timer may have been added/modified/deleted either by the current IRQ, * or by another place using this IRQ as a notification. This IRQ may have * also updated the RCU callback list. These events may require a * re-evaluation of the next tick. Depending on the context: * * 1) If the CPU is idle and no resched is pending, just proceed with idle * time accounting. The next tick will be re-evaluated on the next idle * loop iteration. * * 2) If the CPU is nohz_full: * * 2.1) If there is any tick dependency, restart the tick if stopped. * * 2.2) If there is no tick dependency, (re-)evaluate the next tick and * stop/update it accordingly. */ void tick_nohz_irq_exit(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) tick_nohz_start_idle(ts); else tick_nohz_full_update_tick(ts); } /** * tick_nohz_idle_got_tick - Check whether or not the tick handler has run * * Return: %true if the tick handler has run, otherwise %false */ bool tick_nohz_idle_got_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (ts->got_idle_tick) { ts->got_idle_tick = 0; return true; } return false; } /** * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer * or the tick, whichever expires first. Note that, if the tick has been * stopped, it returns the next hrtimer. * * Called from power state control code with interrupts disabled * * Return: the next expiration time */ ktime_t tick_nohz_get_next_hrtimer(void) { return __this_cpu_read(tick_cpu_device.evtdev)->next_event; } /** * tick_nohz_get_sleep_length - return the expected length of the current sleep * @delta_next: duration until the next event if the tick cannot be stopped * * Called from power state control code with interrupts disabled. * * The return value of this function and/or the value returned by it through the * @delta_next pointer can be negative which must be taken into account by its * callers. * * Return: the expected length of the current sleep */ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); int cpu = smp_processor_id(); /* * The idle entry time is expected to be a sufficient approximation of * the current time at this point. */ ktime_t now = ts->idle_entrytime; ktime_t next_event; WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); *delta_next = ktime_sub(dev->next_event, now); if (!can_stop_idle_tick(cpu, ts)) return *delta_next; next_event = tick_nohz_next_event(ts, cpu); if (!next_event) return *delta_next; /* * If the next highres timer to expire is earlier than 'next_event', the * idle governor needs to know that. */ next_event = min_t(u64, next_event, hrtimer_next_event_without(&ts->sched_timer)); return ktime_sub(next_event, now); } /** * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value * for a particular CPU. * @cpu: target CPU number * * Called from the schedutil frequency scaling governor in scheduler context. * * Return: the current idle calls counter value for @cpu */ unsigned long tick_nohz_get_idle_calls_cpu(int cpu) { struct tick_sched *ts = tick_get_tick_sched(cpu); return ts->idle_calls; } static void tick_nohz_account_idle_time(struct tick_sched *ts, ktime_t now) { unsigned long ticks; ts->idle_exittime = now; if (vtime_accounting_enabled_this_cpu()) return; /* * We stopped the tick in idle. update_process_times() would miss the * time we slept, as it does only a 1 tick accounting. * Enforce that this is accounted to idle ! */ ticks = jiffies - ts->idle_jiffies; /* * We might be one off. Do not randomly account a huge number of ticks! */ if (ticks && ticks < LONG_MAX) account_idle_ticks(ticks); } void tick_nohz_idle_restart_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ktime_t now = ktime_get(); tick_nohz_restart_sched_tick(ts, now); tick_nohz_account_idle_time(ts, now); } } static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) { if (tick_nohz_full_cpu(smp_processor_id())) __tick_nohz_full_update_tick(ts, now); else tick_nohz_restart_sched_tick(ts, now); tick_nohz_account_idle_time(ts, now); } /** * tick_nohz_idle_exit - Update the tick upon idle task exit * * When the idle task exits, update the tick depending on the * following situations: * * 1) If the CPU is not in nohz_full mode (most cases), then * restart the tick. * * 2) If the CPU is in nohz_full mode (corner case): * 2.1) If the tick can be kept stopped (no tick dependencies) * then re-evaluate the next tick and try to keep it stopped * as long as possible. * 2.2) If the tick has dependencies, restart the tick. * */ void tick_nohz_idle_exit(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); bool idle_active, tick_stopped; ktime_t now; local_irq_disable(); WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); WARN_ON_ONCE(ts->timer_expires_base); tick_sched_flag_clear(ts, TS_FLAG_INIDLE); idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE); tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); if (idle_active || tick_stopped) now = ktime_get(); if (idle_active) tick_nohz_stop_idle(ts, now); if (tick_stopped) tick_nohz_idle_update_tick(ts, now); local_irq_enable(); } /* * In low-resolution mode, the tick handler must be implemented directly * at the clockevent level. hrtimer can't be used instead, because its * infrastructure actually relies on the tick itself as a backend in * low-resolution mode (see hrtimer_run_queues()). */ static void tick_nohz_lowres_handler(struct clock_event_device *dev) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); dev->next_event = KTIME_MAX; if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); } static inline void tick_nohz_activate(struct tick_sched *ts) { if (!tick_nohz_enabled) return; tick_sched_flag_set(ts, TS_FLAG_NOHZ); /* One update is enough */ if (!test_and_set_bit(0, &tick_nohz_active)) timers_update_nohz(); } /** * tick_nohz_switch_to_nohz - switch to NOHZ mode */ static void tick_nohz_switch_to_nohz(void) { if (!tick_nohz_enabled) return; if (tick_switch_to_oneshot(tick_nohz_lowres_handler)) return; /* * Recycle the hrtimer in 'ts', so we can share the * highres code. */ tick_setup_sched_timer(false); } static inline void tick_nohz_irq_enter(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); ktime_t now; if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE)) return; now = ktime_get(); if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)) tick_nohz_stop_idle(ts, now); /* * If all CPUs are idle we may need to update a stale jiffies value. * Note nohz_full is a special case: a timekeeper is guaranteed to stay * alive but it might be busy looping with interrupts disabled in some * rare case (typically stop machine). So we must make sure we have a * last resort. */ if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) tick_nohz_update_jiffies(now); } #else static inline void tick_nohz_switch_to_nohz(void) { } static inline void tick_nohz_irq_enter(void) { } static inline void tick_nohz_activate(struct tick_sched *ts) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * Called from irq_enter() to notify about the possible interruption of idle() */ void tick_irq_enter(void) { tick_check_oneshot_broadcast_this_cpu(); tick_nohz_irq_enter(); } static int sched_skew_tick; static int __init skew_tick(char *str) { get_option(&str, &sched_skew_tick); return 0; } early_param("skew_tick", skew_tick); /** * tick_setup_sched_timer - setup the tick emulation timer * @hrtimer: whether to use the hrtimer or not */ void tick_setup_sched_timer(bool hrtimer) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); /* Emulate tick processing via per-CPU hrtimers: */ hrtimer_setup(&ts->sched_timer, tick_nohz_handler, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) tick_sched_flag_set(ts, TS_FLAG_HIGHRES); /* Get the next period (per-CPU) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); /* Offset the tick to avert 'jiffies_lock' contention. */ if (sched_skew_tick) { u64 offset = TICK_NSEC >> 1; do_div(offset, num_possible_cpus()); offset *= smp_processor_id(); hrtimer_add_expires_ns(&ts->sched_timer, offset); } hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); else tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); tick_nohz_activate(ts); } /* * Shut down the tick and make sure the CPU won't try to retake the timekeeping * duty before disabling IRQs in idle for the last time. */ void tick_sched_timer_dying(int cpu) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t idle_sleeptime, iowait_sleeptime; unsigned long idle_calls, idle_sleeps; /* This must happen before hrtimers are migrated! */ if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) hrtimer_cancel(&ts->sched_timer); idle_sleeptime = ts->idle_sleeptime; iowait_sleeptime = ts->iowait_sleeptime; idle_calls = ts->idle_calls; idle_sleeps = ts->idle_sleeps; memset(ts, 0, sizeof(*ts)); ts->idle_sleeptime = idle_sleeptime; ts->iowait_sleeptime = iowait_sleeptime; ts->idle_calls = idle_calls; ts->idle_sleeps = idle_sleeps; } /* * Async notification about clocksource changes */ void tick_clock_notify(void) { int cpu; for_each_possible_cpu(cpu) set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); } /* * Async notification about clock event changes */ void tick_oneshot_notify(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); set_bit(0, &ts->check_clocks); } /* * Check if a change happened, which makes oneshot possible. * * Called cyclically from the hrtimer softirq (driven by the timer * softirq). 'allow_nohz' signals that we can switch into low-res NOHZ * mode, because high resolution timers are disabled (either compile * or runtime). Called with interrupts disabled. */ int tick_check_oneshot_change(int allow_nohz) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (!test_and_clear_bit(0, &ts->check_clocks)) return 0; if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) return 0; if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) return 0; if (!allow_nohz) return 1; tick_nohz_switch_to_nohz(); return 0; }
57 53 53 8 154 151 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM huge_memory #if !defined(__HUGE_MEMORY_H) || defined(TRACE_HEADER_MULTI_READ) #define __HUGE_MEMORY_H #include <linux/tracepoint.h> #define SCAN_STATUS \ EM( SCAN_FAIL, "failed") \ EM( SCAN_SUCCEED, "succeeded") \ EM( SCAN_NO_PTE_TABLE, "no_pte_table") \ EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \ EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \ EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \ EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \ EM( SCAN_PAGE_NULL, "page_null") \ EM( SCAN_SCAN_ABORT, "scan_aborted") \ EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \ EM( SCAN_PAGE_LRU, "page_not_in_lru") \ EM( SCAN_PAGE_LOCK, "page_locked") \ EM( SCAN_PAGE_ANON, "page_not_anon") \ EM( SCAN_PAGE_COMPOUND, "page_compound") \ EM( SCAN_ANY_PROCESS, "no_process_for_page") \ EM( SCAN_VMA_NULL, "vma_null") \ EM( SCAN_VMA_CHECK, "vma_check_failed") \ EM( SCAN_ADDRESS_RANGE, "not_suitable_address_range") \ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ EM( SCAN_TRUNCATED, "truncated") \ EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \ EM( SCAN_STORE_FAILED, "store_failed") \ EM( SCAN_COPY_MC, "copy_poisoned_page") \ EMe(SCAN_PAGE_FILLED, "page_filled") #undef EM #undef EMe #define EM(a, b) TRACE_DEFINE_ENUM(a); #define EMe(a, b) TRACE_DEFINE_ENUM(a); SCAN_STATUS #undef EM #undef EMe #define EM(a, b) {a, b}, #define EMe(a, b) {a, b} TRACE_EVENT(mm_khugepaged_scan_pmd, TP_PROTO(struct mm_struct *mm, struct folio *folio, int referenced, int none_or_zero, int status, int unmapped), TP_ARGS(mm, folio, referenced, none_or_zero, status, unmapped), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(unsigned long, pfn) __field(int, referenced) __field(int, none_or_zero) __field(int, status) __field(int, unmapped) ), TP_fast_assign( __entry->mm = mm; __entry->pfn = folio ? folio_pfn(folio) : -1; __entry->referenced = referenced; __entry->none_or_zero = none_or_zero; __entry->status = status; __entry->unmapped = unmapped; ), TP_printk("mm=%p, scan_pfn=0x%lx, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d", __entry->mm, __entry->pfn, __entry->referenced, __entry->none_or_zero, __print_symbolic(__entry->status, SCAN_STATUS), __entry->unmapped) ); TRACE_EVENT(mm_collapse_huge_page, TP_PROTO(struct mm_struct *mm, int isolated, int status), TP_ARGS(mm, isolated, status), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(int, isolated) __field(int, status) ), TP_fast_assign( __entry->mm = mm; __entry->isolated = isolated; __entry->status = status; ), TP_printk("mm=%p, isolated=%d, status=%s", __entry->mm, __entry->isolated, __print_symbolic(__entry->status, SCAN_STATUS)) ); TRACE_EVENT(mm_collapse_huge_page_isolate, TP_PROTO(struct folio *folio, int none_or_zero, int referenced, int status), TP_ARGS(folio, none_or_zero, referenced, status), TP_STRUCT__entry( __field(unsigned long, pfn) __field(int, none_or_zero) __field(int, referenced) __field(int, status) ), TP_fast_assign( __entry->pfn = folio ? folio_pfn(folio) : -1; __entry->none_or_zero = none_or_zero; __entry->referenced = referenced; __entry->status = status; ), TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, status=%s", __entry->pfn, __entry->none_or_zero, __entry->referenced, __print_symbolic(__entry->status, SCAN_STATUS)) ); TRACE_EVENT(mm_collapse_huge_page_swapin, TP_PROTO(struct mm_struct *mm, int swapped_in, int referenced, int ret), TP_ARGS(mm, swapped_in, referenced, ret), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(int, swapped_in) __field(int, referenced) __field(int, ret) ), TP_fast_assign( __entry->mm = mm; __entry->swapped_in = swapped_in; __entry->referenced = referenced; __entry->ret = ret; ), TP_printk("mm=%p, swapped_in=%d, referenced=%d, ret=%d", __entry->mm, __entry->swapped_in, __entry->referenced, __entry->ret) ); TRACE_EVENT(mm_khugepaged_scan_file, TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file, int present, int swap, int result), TP_ARGS(mm, folio, file, present, swap, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(unsigned long, pfn) __string(filename, file->f_path.dentry->d_iname) __field(int, present) __field(int, swap) __field(int, result) ), TP_fast_assign( __entry->mm = mm; __entry->pfn = folio ? folio_pfn(folio) : -1; __assign_str(filename); __entry->present = present; __entry->swap = swap; __entry->result = result; ), TP_printk("mm=%p, scan_pfn=0x%lx, filename=%s, present=%d, swap=%d, result=%s", __entry->mm, __entry->pfn, __get_str(filename), __entry->present, __entry->swap, __print_symbolic(__entry->result, SCAN_STATUS)) ); TRACE_EVENT(mm_khugepaged_collapse_file, TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index, unsigned long addr, bool is_shmem, struct file *file, int nr, int result), TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(unsigned long, hpfn) __field(pgoff_t, index) __field(unsigned long, addr) __field(bool, is_shmem) __string(filename, file->f_path.dentry->d_iname) __field(int, nr) __field(int, result) ), TP_fast_assign( __entry->mm = mm; __entry->hpfn = new_folio ? folio_pfn(new_folio) : -1; __entry->index = index; __entry->addr = addr; __entry->is_shmem = is_shmem; __assign_str(filename); __entry->nr = nr; __entry->result = result; ), TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%lx, is_shmem=%d, filename=%s, nr=%d, result=%s", __entry->mm, __entry->hpfn, __entry->index, __entry->addr, __entry->is_shmem, __get_str(filename), __entry->nr, __print_symbolic(__entry->result, SCAN_STATUS)) ); #endif /* __HUGE_MEMORY_H */ #include <trace/define_trace.h>
2 742 747 743 671 745 56 745 744 746 11 11 11 630 630 1630 628 2 630 627 627 24 24 24 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 // SPDX-License-Identifier: GPL-2.0-only /* * net/core/dst.c Protocol independent destination cache. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <net/net_namespace.h> #include <linux/sched.h> #include <linux/prefetch.h> #include <net/lwtunnel.h> #include <net/xfrm.h> #include <net/dst.h> #include <net/dst_metadata.h> int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) { kfree_skb(skb); return 0; } EXPORT_SYMBOL(dst_discard_out); const struct dst_metrics dst_default_metrics = { /* This initializer is needed to force linker to place this variable * into const section. Otherwise it might end into bss section. * We really want to avoid false sharing on this variable, and catch * any writes on it. */ .refcnt = REFCOUNT_INIT(1), }; EXPORT_SYMBOL(dst_default_metrics); void dst_init(struct dst_entry *dst, struct dst_ops *ops, struct net_device *dev, int initial_obsolete, unsigned short flags) { dst->dev = dev; netdev_hold(dev, &dst->dev_tracker, GFP_ATOMIC); dst->ops = ops; dst_init_metrics(dst, dst_default_metrics.metrics, true); dst->expires = 0UL; #ifdef CONFIG_XFRM dst->xfrm = NULL; #endif dst->input = dst_discard; dst->output = dst_discard_out; dst->error = 0; dst->obsolete = initial_obsolete; dst->header_len = 0; dst->trailer_len = 0; #ifdef CONFIG_IP_ROUTE_CLASSID dst->tclassid = 0; #endif dst->lwtstate = NULL; rcuref_init(&dst->__rcuref, 1); INIT_LIST_HEAD(&dst->rt_uncached); dst->__use = 0; dst->lastuse = jiffies; dst->flags = flags; if (!(flags & DST_NOCOUNT)) dst_entries_add(ops, 1); } EXPORT_SYMBOL(dst_init); void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_obsolete, unsigned short flags) { struct dst_entry *dst; if (ops->gc && !(flags & DST_NOCOUNT) && dst_entries_get_fast(ops) > ops->gc_thresh) ops->gc(ops); dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); if (!dst) return NULL; dst_init(dst, ops, dev, initial_obsolete, flags); return dst; } EXPORT_SYMBOL(dst_alloc); static void dst_destroy(struct dst_entry *dst) { struct dst_entry *child = NULL; smp_rmb(); #ifdef CONFIG_XFRM if (dst->xfrm) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; child = xdst->child; } #endif if (dst->ops->destroy) dst->ops->destroy(dst); netdev_put(dst->dev, &dst->dev_tracker); lwtstate_put(dst->lwtstate); if (dst->flags & DST_METADATA) metadata_dst_free((struct metadata_dst *)dst); else kmem_cache_free(dst->ops->kmem_cachep, dst); dst = child; if (dst) dst_release_immediate(dst); } static void dst_destroy_rcu(struct rcu_head *head) { struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); dst_destroy(dst); } /* Operations to mark dst as DEAD and clean up the net device referenced * by dst: * 1. put the dst under blackhole interface and discard all tx/rx packets * on this route. * 2. release the net_device * This function should be called when removing routes from the fib tree * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to * make the next dst_ops->check() fail. */ void dst_dev_put(struct dst_entry *dst) { struct net_device *dev = dst->dev; WRITE_ONCE(dst->obsolete, DST_OBSOLETE_DEAD); if (dst->ops->ifdown) dst->ops->ifdown(dst, dev); WRITE_ONCE(dst->input, dst_discard); WRITE_ONCE(dst->output, dst_discard_out); rcu_assign_pointer(dst->dev_rcu, blackhole_netdev); netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker, GFP_ATOMIC); } EXPORT_SYMBOL(dst_dev_put); static void dst_count_dec(struct dst_entry *dst) { if (!(dst->flags & DST_NOCOUNT)) dst_entries_add(dst->ops, -1); } void dst_release(struct dst_entry *dst) { if (dst && rcuref_put(&dst->__rcuref)) { #ifdef CONFIG_DST_CACHE if (dst->flags & DST_METADATA) { struct metadata_dst *md_dst = (struct metadata_dst *)dst; if (md_dst->type == METADATA_IP_TUNNEL) dst_cache_reset_now(&md_dst->u.tun_info.dst_cache); } #endif dst_count_dec(dst); call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); } } EXPORT_SYMBOL(dst_release); void dst_release_immediate(struct dst_entry *dst) { if (dst && rcuref_put(&dst->__rcuref)) { dst_count_dec(dst); dst_destroy(dst); } } EXPORT_SYMBOL(dst_release_immediate); u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) { struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC); if (p) { struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); unsigned long prev, new; refcount_set(&p->refcnt, 1); memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); new = (unsigned long) p; prev = cmpxchg(&dst->_metrics, old, new); if (prev != old) { kfree(p); p = (struct dst_metrics *)__DST_METRICS_PTR(prev); if (prev & DST_METRICS_READ_ONLY) p = NULL; } else if (prev & DST_METRICS_REFCOUNTED) { if (refcount_dec_and_test(&old_p->refcnt)) kfree(old_p); } } BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); return (u32 *)p; } EXPORT_SYMBOL(dst_cow_metrics_generic); /* Caller asserts that dst_metrics_read_only(dst) is false. */ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) { unsigned long prev, new; new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; prev = cmpxchg(&dst->_metrics, old, new); if (prev == old) kfree(__DST_METRICS_PTR(old)); } EXPORT_SYMBOL(__dst_destroy_metrics_generic); struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie) { return NULL; } u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) { return NULL; } struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { return NULL; } void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu, bool confirm_neigh) { } EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu); void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { } EXPORT_SYMBOL_GPL(dst_blackhole_redirect); unsigned int dst_blackhole_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); return mtu ? : dst_dev(dst)->mtu; } EXPORT_SYMBOL_GPL(dst_blackhole_mtu); static struct dst_ops dst_blackhole_ops = { .family = AF_UNSPEC, .neigh_lookup = dst_blackhole_neigh_lookup, .check = dst_blackhole_check, .cow_metrics = dst_blackhole_cow_metrics, .update_pmtu = dst_blackhole_update_pmtu, .redirect = dst_blackhole_redirect, .mtu = dst_blackhole_mtu, }; static void __metadata_dst_init(struct metadata_dst *md_dst, enum metadata_type type, u8 optslen) { struct dst_entry *dst; dst = &md_dst->dst; dst_init(dst, &dst_blackhole_ops, NULL, DST_OBSOLETE_NONE, DST_METADATA | DST_NOCOUNT); memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); md_dst->type = type; } struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type, gfp_t flags) { struct metadata_dst *md_dst; md_dst = kmalloc(struct_size(md_dst, u.tun_info.options, optslen), flags); if (!md_dst) return NULL; __metadata_dst_init(md_dst, type, optslen); return md_dst; } EXPORT_SYMBOL_GPL(metadata_dst_alloc); void metadata_dst_free(struct metadata_dst *md_dst) { #ifdef CONFIG_DST_CACHE if (md_dst->type == METADATA_IP_TUNNEL) dst_cache_destroy(&md_dst->u.tun_info.dst_cache); #endif if (md_dst->type == METADATA_XFRM) dst_release(md_dst->u.xfrm_info.dst_orig); kfree(md_dst); } EXPORT_SYMBOL_GPL(metadata_dst_free); struct metadata_dst __percpu * metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags) { int cpu; struct metadata_dst __percpu *md_dst; md_dst = __alloc_percpu_gfp(struct_size(md_dst, u.tun_info.options, optslen), __alignof__(struct metadata_dst), flags); if (!md_dst) return NULL; for_each_possible_cpu(cpu) __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); return md_dst; } EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu); void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst) { int cpu; for_each_possible_cpu(cpu) { struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); #ifdef CONFIG_DST_CACHE if (one_md_dst->type == METADATA_IP_TUNNEL) dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache); #endif if (one_md_dst->type == METADATA_XFRM) dst_release(one_md_dst->u.xfrm_info.dst_orig); } free_percpu(md_dst); } EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);
26 4 22 25 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 IBM Corporation * * Author: * Mimi Zohar <zohar@us.ibm.com> */ #include <linux/xattr.h> #include <linux/evm.h> int posix_xattr_acl(const char *xattr) { int xattr_len = strlen(xattr); if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len) && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0)) return 1; if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len) && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0)) return 1; return 0; }
65 55 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 /* * Copyright © 2006 Keith Packard * Copyright © 2007-2008 Dave Airlie * Copyright © 2007-2008 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __DRM_CRTC_H__ #define __DRM_CRTC_H__ #include <linux/spinlock.h> #include <linux/types.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_mode_object.h> #include <drm/drm_modes.h> #include <drm/drm_device.h> #include <drm/drm_plane.h> #include <drm/drm_debugfs_crc.h> #include <drm/drm_mode_config.h> struct drm_connector; struct drm_device; struct drm_framebuffer; struct drm_mode_set; struct drm_file; struct drm_printer; struct drm_self_refresh_data; struct device_node; struct edid; static inline int64_t U642I64(uint64_t val) { return (int64_t)*((int64_t *)&val); } static inline uint64_t I642U64(int64_t val) { return (uint64_t)*((uint64_t *)&val); } struct drm_crtc; struct drm_pending_vblank_event; struct drm_plane; struct drm_bridge; struct drm_atomic_state; struct drm_crtc_helper_funcs; struct drm_plane_helper_funcs; /** * struct drm_crtc_state - mutable CRTC state * * Note that the distinction between @enable and @active is rather subtle: * Flipping @active while @enable is set without changing anything else may * never return in a failure from the &drm_mode_config_funcs.atomic_check * callback. Userspace assumes that a DPMS On will always succeed. In other * words: @enable controls resource assignment, @active controls the actual * hardware state. * * The three booleans active_changed, connectors_changed and mode_changed are * intended to indicate whether a full modeset is needed, rather than strictly * describing what has changed in a commit. See also: * drm_atomic_crtc_needs_modeset() */ struct drm_crtc_state { /** @crtc: backpointer to the CRTC */ struct drm_crtc *crtc; /** * @enable: Whether the CRTC should be enabled, gates all other state. * This controls reservations of shared resources. Actual hardware state * is controlled by @active. */ bool enable; /** * @active: Whether the CRTC is actively displaying (used for DPMS). * Implies that @enable is set. The driver must not release any shared * resources if @active is set to false but @enable still true, because * userspace expects that a DPMS ON always succeeds. * * Hence drivers must not consult @active in their various * &drm_mode_config_funcs.atomic_check callback to reject an atomic * commit. They can consult it to aid in the computation of derived * hardware state, since even in the DPMS OFF state the display hardware * should be as much powered down as when the CRTC is completely * disabled through setting @enable to false. */ bool active; /** * @planes_changed: Planes on this crtc are updated. Used by the atomic * helpers and drivers to steer the atomic commit control flow. */ bool planes_changed : 1; /** * @mode_changed: @mode or @enable has been changed. Used by the atomic * helpers and drivers to steer the atomic commit control flow. See also * drm_atomic_crtc_needs_modeset(). * * Drivers are supposed to set this for any CRTC state changes that * require a full modeset. They can also reset it to false if e.g. a * @mode change can be done without a full modeset by only changing * scaler settings. */ bool mode_changed : 1; /** * @active_changed: @active has been toggled. Used by the atomic * helpers and drivers to steer the atomic commit control flow. See also * drm_atomic_crtc_needs_modeset(). */ bool active_changed : 1; /** * @connectors_changed: Connectors to this crtc have been updated, * either in their state or routing. Used by the atomic * helpers and drivers to steer the atomic commit control flow. See also * drm_atomic_crtc_needs_modeset(). * * Drivers are supposed to set this as-needed from their own atomic * check code, e.g. from &drm_encoder_helper_funcs.atomic_check */ bool connectors_changed : 1; /** * @zpos_changed: zpos values of planes on this crtc have been updated. * Used by the atomic helpers and drivers to steer the atomic commit * control flow. */ bool zpos_changed : 1; /** * @color_mgmt_changed: Color management properties have changed * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and * drivers to steer the atomic commit control flow. */ bool color_mgmt_changed : 1; /** * @no_vblank: * * Reflects the ability of a CRTC to send VBLANK events. This state * usually depends on the pipeline configuration. If set to true, DRM * atomic helpers will send out a fake VBLANK event during display * updates after all hardware changes have been committed. This is * implemented in drm_atomic_helper_fake_vblank(). * * One usage is for drivers and/or hardware without support for VBLANK * interrupts. Such drivers typically do not initialize vblanking * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs * without initialized vblanking, this field is set to true in * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be * send out on each update of the display pipeline by * drm_atomic_helper_fake_vblank(). * * Another usage is CRTCs feeding a writeback connector operating in * oneshot mode. In this case the fake VBLANK event is only generated * when a job is queued to the writeback connector, and we want the * core to fake VBLANK events when this part of the pipeline hasn't * changed but others had or when the CRTC and connectors are being * disabled. * * __drm_atomic_helper_crtc_duplicate_state() will not reset the value * from the current state, the CRTC driver is then responsible for * updating this field when needed. * * Note that the combination of &drm_crtc_state.event == NULL and * &drm_crtc_state.no_blank == true is valid and usually used when the * writeback connector attached to the CRTC has a new job queued. In * this case the driver will send the VBLANK event on its own when the * writeback job is complete. */ bool no_vblank; /** * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to * this CRTC. */ u32 plane_mask; /** * @connector_mask: Bitmask of drm_connector_mask(connector) of * connectors attached to this CRTC. */ u32 connector_mask; /** * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders * attached to this CRTC. */ u32 encoder_mask; /** * @adjusted_mode: * * Internal display timings which can be used by the driver to handle * differences between the mode requested by userspace in @mode and what * is actually programmed into the hardware. * * For drivers using &drm_bridge, this stores hardware display timings * used between the CRTC and the first bridge. For other drivers, the * meaning of the adjusted_mode field is purely driver implementation * defined information, and will usually be used to store the hardware * display timings used between the CRTC and encoder blocks. */ struct drm_display_mode adjusted_mode; /** * @mode: * * Display timings requested by userspace. The driver should try to * match the refresh rate as close as possible (but note that it's * undefined what exactly is close enough, e.g. some of the HDMI modes * only differ in less than 1% of the refresh rate). The active width * and height as observed by userspace for positioning planes must match * exactly. * * For external connectors where the sink isn't fixed (like with a * built-in panel), this mode here should match the physical mode on the * wire to the last details (i.e. including sync polarities and * everything). */ struct drm_display_mode mode; /** * @mode_blob: &drm_property_blob for @mode, for exposing the mode to * atomic userspace. */ struct drm_property_blob *mode_blob; /** * @degamma_lut: * * Lookup table for converting framebuffer pixel data before apply the * color conversion matrix @ctm. See drm_crtc_enable_color_mgmt(). The * blob (if not NULL) is an array of &struct drm_color_lut. */ struct drm_property_blob *degamma_lut; /** * @ctm: * * Color transformation matrix. See drm_crtc_enable_color_mgmt(). The * blob (if not NULL) is a &struct drm_color_ctm. */ struct drm_property_blob *ctm; /** * @gamma_lut: * * Lookup table for converting pixel data after the color conversion * matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not * NULL) is an array of &struct drm_color_lut. * * Note that for mostly historical reasons stemming from Xorg heritage, * this is also used to store the color map (also sometimes color lut, * CLUT or color palette) for indexed formats like DRM_FORMAT_C8. */ struct drm_property_blob *gamma_lut; /** * @target_vblank: * * Target vertical blank period when a page flip * should take effect. */ u32 target_vblank; /** * @async_flip: * * This is set when DRM_MODE_PAGE_FLIP_ASYNC is set in the legacy * PAGE_FLIP IOCTL. It's not wired up for the atomic IOCTL itself yet. */ bool async_flip; /** * @vrr_enabled: * * Indicates if variable refresh rate should be enabled for the CRTC. * Support for the requested vrr state will depend on driver and * hardware capabiltiy - lacking support is not treated as failure. */ bool vrr_enabled; /** * @self_refresh_active: * * Used by the self refresh helpers to denote when a self refresh * transition is occurring. This will be set on enable/disable callbacks * when self refresh is being enabled or disabled. In some cases, it may * not be desirable to fully shut off the crtc during self refresh. * CRTC's can inspect this flag and determine the best course of action. */ bool self_refresh_active; /** * @scaling_filter: * * Scaling filter to be applied */ enum drm_scaling_filter scaling_filter; /** * @sharpness_strength: * * Used by the user to set the sharpness intensity. * The value ranges from 0-255. * Default value is 0 which disable the sharpness feature. * Any value greater than 0 enables sharpening with the * specified strength. */ u8 sharpness_strength; /** * @event: * * Optional pointer to a DRM event to signal upon completion of the * state update. The driver must send out the event when the atomic * commit operation completes. There are two cases: * * - The event is for a CRTC which is being disabled through this * atomic commit. In that case the event can be send out any time * after the hardware has stopped scanning out the current * framebuffers. It should contain the timestamp and counter for the * last vblank before the display pipeline was shut off. The simplest * way to achieve that is calling drm_crtc_send_vblank_event() * somewhen after drm_crtc_vblank_off() has been called. * * - For a CRTC which is enabled at the end of the commit (even when it * undergoes an full modeset) the vblank timestamp and counter must * be for the vblank right before the first frame that scans out the * new set of buffers. Again the event can only be sent out after the * hardware has stopped scanning out the old buffers. * * - Events for disabled CRTCs are not allowed, and drivers can ignore * that case. * * For very simple hardware without VBLANK interrupt, enabling * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers * send a fake VBLANK event at the end of the display update after all * hardware changes have been applied. See * drm_atomic_helper_fake_vblank(). * * For more complex hardware this * can be handled by the drm_crtc_send_vblank_event() function, * which the driver should call on the provided event upon completion of * the atomic commit. Note that if the driver supports vblank signalling * and timestamping the vblank counters and timestamps must agree with * the ones returned from page flip events. With the current vblank * helper infrastructure this can be achieved by holding a vblank * reference while the page flip is pending, acquired through * drm_crtc_vblank_get() and released with drm_crtc_vblank_put(). * Drivers are free to implement their own vblank counter and timestamp * tracking though, e.g. if they have accurate timestamp registers in * hardware. * * For hardware which supports some means to synchronize vblank * interrupt delivery with committing display state there's also * drm_crtc_arm_vblank_event(). See the documentation of that function * for a detailed discussion of the constraints it needs to be used * safely. * * If the device can't notify of flip completion in a race-free way * at all, then the event should be armed just after the page flip is * committed. In the worst case the driver will send the event to * userspace one frame too late. This doesn't allow for a real atomic * update, but it should avoid tearing. */ struct drm_pending_vblank_event *event; /** * @commit: * * This tracks how the commit for this update proceeds through the * various phases. This is never cleared, except when we destroy the * state, so that subsequent commits can synchronize with previous ones. */ struct drm_crtc_commit *commit; /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; }; /** * struct drm_crtc_funcs - control CRTCs for a given device * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. * connectors, not just CRTs). * * Each driver is responsible for filling out this structure at startup time, * in addition to providing other modesetting features, like i2c and DDC * bus accessors. */ struct drm_crtc_funcs { /** * @reset: * * Reset CRTC hardware and software state to off. This function isn't * called by the core directly, only through drm_mode_config_reset(). * It's not a helper hook only for historical reasons. * * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset * atomic state using this hook. */ void (*reset)(struct drm_crtc *crtc); /** * @cursor_set: * * Update the cursor image. The cursor position is relative to the CRTC * and can be partially or fully outside of the visible area. * * Note that contrary to all other KMS functions the legacy cursor entry * points don't take a framebuffer object, but instead take directly a * raw buffer object id from the driver's buffer manager (which is * either GEM or TTM for current drivers). * * This entry point is deprecated, drivers should instead implement * universal plane support and register a proper cursor plane using * drm_crtc_init_with_planes(). * * This callback is optional * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height); /** * @cursor_set2: * * Update the cursor image, including hotspot information. The hotspot * must not affect the cursor position in CRTC coordinates, but is only * meant as a hint for virtualized display hardware to coordinate the * guests and hosts cursor position. The cursor hotspot is relative to * the cursor image. Otherwise this works exactly like @cursor_set. * * This entry point is deprecated, drivers should instead implement * universal plane support and register a proper cursor plane using * drm_crtc_init_with_planes(). * * This callback is optional. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y); /** * @cursor_move: * * Update the cursor position. The cursor does not need to be visible * when this hook is called. * * This entry point is deprecated, drivers should instead implement * universal plane support and register a proper cursor plane using * drm_crtc_init_with_planes(). * * This callback is optional. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*cursor_move)(struct drm_crtc *crtc, int x, int y); /** * @gamma_set: * * Set gamma on the CRTC. * * This callback is optional. * * Atomic drivers who want to support gamma tables should implement the * atomic color management support, enabled by calling * drm_crtc_enable_color_mgmt(), which then supports the legacy gamma * interface through the drm_atomic_helper_legacy_gamma_set() * compatibility implementation. */ int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size, struct drm_modeset_acquire_ctx *ctx); /** * @destroy: * * Clean up CRTC resources. This is only called at driver unload time * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged * in DRM. */ void (*destroy)(struct drm_crtc *crtc); /** * @set_config: * * This is the main legacy entry point to change the modeset state on a * CRTC. All the details of the desired configuration are passed in a * &struct drm_mode_set - see there for details. * * Drivers implementing atomic modeset should use * drm_atomic_helper_set_config() to implement this hook. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*set_config)(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx); /** * @page_flip: * * Legacy entry point to schedule a flip to the given framebuffer. * * Page flipping is a synchronization mechanism that replaces the frame * buffer being scanned out by the CRTC with a new frame buffer during * vertical blanking, avoiding tearing (except when requested otherwise * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application * requests a page flip the DRM core verifies that the new frame buffer * is large enough to be scanned out by the CRTC in the currently * configured mode and then calls this hook with a pointer to the new * frame buffer. * * The driver must wait for any pending rendering to the new framebuffer * to complete before executing the flip. It should also wait for any * pending rendering from other drivers if the underlying buffer is a * shared dma-buf. * * An application can request to be notified when the page flip has * completed. The drm core will supply a &struct drm_event in the event * parameter in this case. This can be handled by the * drm_crtc_send_vblank_event() function, which the driver should call on * the provided event upon completion of the flip. Note that if * the driver supports vblank signalling and timestamping the vblank * counters and timestamps must agree with the ones returned from page * flip events. With the current vblank helper infrastructure this can * be achieved by holding a vblank reference while the page flip is * pending, acquired through drm_crtc_vblank_get() and released with * drm_crtc_vblank_put(). Drivers are free to implement their own vblank * counter and timestamp tracking though, e.g. if they have accurate * timestamp registers in hardware. * * This callback is optional. * * NOTE: * * Very early versions of the KMS ABI mandated that the driver must * block (but not reject) any rendering to the old framebuffer until the * flip operation has completed and the old framebuffer is no longer * visible. This requirement has been lifted, and userspace is instead * expected to request delivery of an event and wait with recycling old * buffers until such has been received. * * RETURNS: * * 0 on success or a negative error code on failure. Note that if a * page flip operation is already pending the callback should return * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode * or just runtime disabled through DPMS respectively the new atomic * "ACTIVE" state) should result in an -EINVAL error code. Note that * drm_atomic_helper_page_flip() checks this already for atomic drivers. */ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, struct drm_modeset_acquire_ctx *ctx); /** * @page_flip_target: * * Same as @page_flip but with an additional parameter specifying the * absolute target vertical blank period (as reported by * drm_crtc_vblank_count()) when the flip should take effect. * * Note that the core code calls drm_crtc_vblank_get before this entry * point, and will call drm_crtc_vblank_put if this entry point returns * any non-0 error code. It's the driver's responsibility to call * drm_crtc_vblank_put after this entry point returns 0, typically when * the flip completes. */ int (*page_flip_target)(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, uint32_t target, struct drm_modeset_acquire_ctx *ctx); /** * @set_property: * * This is the legacy entry point to update a property attached to the * CRTC. * * This callback is optional if the driver does not support any legacy * driver-private properties. For atomic drivers it is not used because * property handling is done entirely in the DRM core. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*set_property)(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); /** * @atomic_duplicate_state: * * Duplicate the current atomic state for this CRTC and return it. * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the * driver by calling &drm_mode_config_funcs.atomic_commit) will be * cleaned up by calling the @atomic_destroy_state hook in this * structure. * * This callback is mandatory for atomic drivers. * * Atomic drivers which don't subclass &struct drm_crtc_state should use * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * * It is an error to call this hook before &drm_crtc.state has been * initialized correctly. * * NOTE: * * If the duplicate state references refcounted resources this hook must * acquire a reference for each of them. The driver must release these * references again in @atomic_destroy_state. * * RETURNS: * * Duplicated atomic state or NULL when the allocation failed. */ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); /** * @atomic_destroy_state: * * Destroy a state duplicated with @atomic_duplicate_state and release * or unreference all resources it references * * This callback is mandatory for atomic drivers. */ void (*atomic_destroy_state)(struct drm_crtc *crtc, struct drm_crtc_state *state); /** * @atomic_set_property: * * Decode a driver-private property value and store the decoded value * into the passed-in state structure. Since the atomic core decodes all * standardized properties (even for extensions beyond the core set of * properties which might not be implemented by all drivers) this * requires drivers to subclass the state structure. * * Such driver-private properties should really only be implemented for * truly hardware/vendor specific state. Instead it is preferred to * standardize atomic extension and decode the properties used to expose * such an extension in the core. * * Do not call this function directly, use * drm_atomic_crtc_set_property() instead. * * This callback is optional if the driver does not support any * driver-private atomic properties. * * NOTE: * * This function is called in the state assembly phase of atomic * modesets, which can be aborted for any reason (including on * userspace's request to just check whether a configuration would be * possible). Drivers MUST NOT touch any persistent state (hardware or * software) or data structures except the passed in @state parameter. * * Also since userspace controls in which order properties are set this * function must not do any input validation (since the state update is * incomplete and hence likely inconsistent). Instead any such input * validation must be done in the various atomic_check callbacks. * * RETURNS: * * 0 if the property has been found, -EINVAL if the property isn't * implemented by the driver (which should never happen, the core only * asks for properties attached to this CRTC). No other validation is * allowed by the driver. The core already checks that the property * value is within the range (integer, valid enum value, ...) the driver * set when registering the property. */ int (*atomic_set_property)(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val); /** * @atomic_get_property: * * Reads out the decoded driver-private property. This is used to * implement the GETCRTC IOCTL. * * Do not call this function directly, use * drm_atomic_crtc_get_property() instead. * * This callback is optional if the driver does not support any * driver-private atomic properties. * * RETURNS: * * 0 on success, -EINVAL if the property isn't implemented by the * driver (which should never happen, the core only asks for * properties attached to this CRTC). */ int (*atomic_get_property)(struct drm_crtc *crtc, const struct drm_crtc_state *state, struct drm_property *property, uint64_t *val); /** * @late_register: * * This optional hook can be used to register additional userspace * interfaces attached to the crtc like debugfs interfaces. * It is called late in the driver load sequence from drm_dev_register(). * Everything added from this callback should be unregistered in * the early_unregister callback. * * Returns: * * 0 on success, or a negative error code on failure. */ int (*late_register)(struct drm_crtc *crtc); /** * @early_unregister: * * This optional hook should be used to unregister the additional * userspace interfaces attached to the crtc from * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ void (*early_unregister)(struct drm_crtc *crtc); /** * @set_crc_source: * * Changes the source of CRC checksums of frames at the request of * userspace, typically for testing purposes. The sources available are * specific of each driver and a %NULL value indicates that CRC * generation is to be switched off. * * When CRC generation is enabled, the driver should call * drm_crtc_add_crc_entry() at each frame, providing any information * that characterizes the frame contents in the crcN arguments, as * provided from the configured source. Drivers must accept an "auto" * source name that will select a default source for this CRTC. * * This may trigger an atomic modeset commit if necessary, to enable CRC * generation. * * Note that "auto" can depend upon the current modeset configuration, * e.g. it could pick an encoder or output specific CRC sampling point. * * This callback is optional if the driver does not support any CRC * generation functionality. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*set_crc_source)(struct drm_crtc *crtc, const char *source); /** * @verify_crc_source: * * verifies the source of CRC checksums of frames before setting the * source for CRC and during crc open. Source parameter can be NULL * while disabling crc source. * * This callback is optional if the driver does not support any CRC * generation functionality. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*verify_crc_source)(struct drm_crtc *crtc, const char *source, size_t *values_cnt); /** * @get_crc_sources: * * Driver callback for getting a list of all the available sources for * CRC generation. This callback depends upon verify_crc_source, So * verify_crc_source callback should be implemented before implementing * this. Driver can pass full list of available crc sources, this * callback does the verification on each crc-source before passing it * to userspace. * * This callback is optional if the driver does not support exporting of * possible CRC sources list. * * RETURNS: * * a constant character pointer to the list of all the available CRC * sources. On failure driver should return NULL. count should be * updated with number of sources in list. if zero we don't process any * source from the list. */ const char *const *(*get_crc_sources)(struct drm_crtc *crtc, size_t *count); /** * @atomic_print_state: * * If driver subclasses &struct drm_crtc_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_crtc_print_state() * instead. */ void (*atomic_print_state)(struct drm_printer *p, const struct drm_crtc_state *state); /** * @get_vblank_counter: * * Driver callback for fetching a raw hardware vblank counter for the * CRTC. It's meant to be used by new drivers as the replacement of * &drm_driver.get_vblank_counter hook. * * This callback is optional. If a device doesn't have a hardware * counter, the driver can simply leave the hook as NULL. The DRM core * will account for missed vblank events while interrupts where disabled * based on system timestamps. * * Wraparound handling and loss of events due to modesetting is dealt * with in the DRM core code, as long as drivers call * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or * enabling a CRTC. * * See also &drm_device.vblank_disable_immediate and * &drm_device.max_vblank_count. * * Returns: * * Raw vblank counter value. */ u32 (*get_vblank_counter)(struct drm_crtc *crtc); /** * @enable_vblank: * * Enable vblank interrupts for the CRTC. It's meant to be used by * new drivers as the replacement of &drm_driver.enable_vblank hook. * * Returns: * * Zero on success, appropriate errno if the vblank interrupt cannot * be enabled. */ int (*enable_vblank)(struct drm_crtc *crtc); /** * @disable_vblank: * * Disable vblank interrupts for the CRTC. It's meant to be used by * new drivers as the replacement of &drm_driver.disable_vblank hook. */ void (*disable_vblank)(struct drm_crtc *crtc); /** * @get_vblank_timestamp: * * Called by drm_get_last_vbltimestamp(). Should return a precise * timestamp when the most recent vblank interval ended or will end. * * Specifically, the timestamp in @vblank_time should correspond as * closely as possible to the time when the first video scanline of * the video frame after the end of vblank will start scanning out, * the time immediately after end of the vblank interval. If the * @crtc is currently inside vblank, this will be a time in the future. * If the @crtc is currently scanning out a frame, this will be the * past start time of the current scanout. This is meant to adhere * to the OpenML OML_sync_control extension specification. * * Parameters: * * crtc: * CRTC for which timestamp should be returned. * max_error: * Maximum allowable timestamp error in nanoseconds. * Implementation should strive to provide timestamp * with an error of at most max_error nanoseconds. * Returns true upper bound on error for timestamp. * vblank_time: * Target location for returned vblank timestamp. * in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers * need to apply some workarounds for gpu-specific vblank irq quirks * if flag is set. * * Returns: * * True on success, false on failure, which means the core should * fallback to a simple timestamp taken in drm_crtc_handle_vblank(). */ bool (*get_vblank_timestamp)(struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time, bool in_vblank_irq); }; /** * struct drm_crtc - central CRTC control structure * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. */ struct drm_crtc { /** @dev: parent DRM device */ struct drm_device *dev; /** @port: OF node used by drm_of_find_possible_crtcs(). */ struct device_node *port; /** * @head: * * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list. * Invariant over the lifetime of @dev and therefore does not need * locking. */ struct list_head head; /** @name: human readable name, can be overwritten by the driver */ char *name; /** * @mutex: * * This provides a read lock for the overall CRTC state (mode, dpms * state, ...) and a write lock for everything which can be update * without a full modeset (fb, cursor data, CRTC properties ...). A full * modeset also need to grab &drm_mode_config.connection_mutex. * * For atomic drivers specifically this protects @state. */ struct drm_modeset_lock mutex; /** @base: base KMS object for ID tracking etc. */ struct drm_mode_object base; /** * @primary: * Primary plane for this CRTC. Note that this is only * relevant for legacy IOCTL, it specifies the plane implicitly used by * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance * beyond that. */ struct drm_plane *primary; /** * @cursor: * Cursor plane for this CRTC. Note that this is only relevant for * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR * and SETCURSOR2 IOCTLs. It does not have any significance * beyond that. */ struct drm_plane *cursor; /** * @index: Position inside the mode_config.list, can be used as an array * index. It is invariant over the lifetime of the CRTC. */ unsigned index; /** * @cursor_x: Current x position of the cursor, used for universal * cursor planes because the SETCURSOR IOCTL only can update the * framebuffer without supplying the coordinates. Drivers should not use * this directly, atomic drivers should look at &drm_plane_state.crtc_x * of the cursor plane instead. */ int cursor_x; /** * @cursor_y: Current y position of the cursor, used for universal * cursor planes because the SETCURSOR IOCTL only can update the * framebuffer without supplying the coordinates. Drivers should not use * this directly, atomic drivers should look at &drm_plane_state.crtc_y * of the cursor plane instead. */ int cursor_y; /** * @enabled: * * Is this CRTC enabled? Should only be used by legacy drivers, atomic * drivers should instead consult &drm_crtc_state.enable and * &drm_crtc_state.active. Atomic drivers can update this by calling * drm_atomic_helper_update_legacy_modeset_state(). */ bool enabled; /** * @mode: * * Current mode timings. Should only be used by legacy drivers, atomic * drivers should instead consult &drm_crtc_state.mode. Atomic drivers * can update this by calling * drm_atomic_helper_update_legacy_modeset_state(). */ struct drm_display_mode mode; /** * @hwmode: * * Programmed mode in hw, after adjustments for encoders, crtc, panel * scaling etc. Should only be used by legacy drivers, for high * precision vblank timestamps in * drm_crtc_vblank_helper_get_vblank_timestamp(). * * Note that atomic drivers should not use this, but instead use * &drm_crtc_state.adjusted_mode. And for high-precision timestamps * drm_crtc_vblank_helper_get_vblank_timestamp() used * &drm_vblank_crtc.hwmode, * which is filled out by calling drm_calc_timestamping_constants(). */ struct drm_display_mode hwmode; /** * @x: * x position on screen. Should only be used by legacy drivers, atomic * drivers should look at &drm_plane_state.crtc_x of the primary plane * instead. Updated by calling * drm_atomic_helper_update_legacy_modeset_state(). */ int x; /** * @y: * y position on screen. Should only be used by legacy drivers, atomic * drivers should look at &drm_plane_state.crtc_y of the primary plane * instead. Updated by calling * drm_atomic_helper_update_legacy_modeset_state(). */ int y; /** @funcs: CRTC control functions */ const struct drm_crtc_funcs *funcs; /** * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up * by calling drm_mode_crtc_set_gamma_size(). * * Note that atomic drivers need to instead use * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt(). */ uint32_t gamma_size; /** * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size(). * * Note that atomic drivers need to instead use * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt(). */ uint16_t *gamma_store; /** @helper_private: mid-layer private data */ const struct drm_crtc_helper_funcs *helper_private; /** @properties: property tracking for this CRTC */ struct drm_object_properties properties; /** * @scaling_filter_property: property to apply a particular filter while * scaling. */ struct drm_property *scaling_filter_property; /** * @sharpness_strength_property: property to apply * the intensity of the sharpness requested. */ struct drm_property *sharpness_strength_property; /** * @state: * * Current atomic state for this CRTC. * * This is protected by @mutex. Note that nonblocking atomic commits * access the current CRTC state without taking locks. Either by going * through the &struct drm_atomic_state pointers, see * for_each_oldnew_crtc_in_state(), for_each_old_crtc_in_state() and * for_each_new_crtc_in_state(). Or through careful ordering of atomic * commit operations as implemented in the atomic helpers, see * &struct drm_crtc_commit. */ struct drm_crtc_state *state; /** * @commit_list: * * List of &drm_crtc_commit structures tracking pending commits. * Protected by @commit_lock. This list holds its own full reference, * as does the ongoing commit. * * "Note that the commit for a state change is also tracked in * &drm_crtc_state.commit. For accessing the immediately preceding * commit in an atomic update it is recommended to just use that * pointer in the old CRTC state, since accessing that doesn't need * any locking or list-walking. @commit_list should only be used to * stall for framebuffer cleanup that's signalled through * &drm_crtc_commit.cleanup_done." */ struct list_head commit_list; /** * @commit_lock: * * Spinlock to protect @commit_list. */ spinlock_t commit_lock; /** * @debugfs_entry: * * Debugfs directory for this CRTC. */ struct dentry *debugfs_entry; /** * @crc: * * Configuration settings of CRC capture. */ struct drm_crtc_crc crc; /** * @fence_context: * * timeline context used for fence operations. */ unsigned int fence_context; /** * @fence_lock: * * spinlock to protect the fences in the fence_context. */ spinlock_t fence_lock; /** * @fence_seqno: * * Seqno variable used as monotonic counter for the fences * created on the CRTC's timeline. */ unsigned long fence_seqno; /** * @timeline_name: * * The name of the CRTC's fence timeline. */ char timeline_name[32]; /** * @self_refresh_data: Holds the state for the self refresh helpers * * Initialized via drm_self_refresh_helper_init(). */ struct drm_self_refresh_data *self_refresh_data; }; /** * struct drm_mode_set - new values for a CRTC config change * @fb: framebuffer to use for new config * @crtc: CRTC whose configuration we're about to change * @mode: mode timings to use * @x: position of this CRTC relative to @fb * @y: position of this CRTC relative to @fb * @connectors: array of connectors to drive with this CRTC if possible * @num_connectors: size of @connectors array * * This represents a modeset configuration for the legacy SETCRTC ioctl and is * also used internally. Atomic drivers instead use &drm_atomic_state. */ struct drm_mode_set { struct drm_framebuffer *fb; struct drm_crtc *crtc; struct drm_display_mode *mode; uint32_t x; uint32_t y; struct drm_connector **connectors; size_t num_connectors; }; #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) __printf(6, 7) int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs, const char *name, ...); __printf(6, 7) int drmm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs, const char *name, ...); void drm_crtc_cleanup(struct drm_crtc *crtc); __printf(7, 8) void *__drmm_crtc_alloc_with_planes(struct drm_device *dev, size_t size, size_t offset, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs, const char *name, ...); /** * drmm_crtc_alloc_with_planes - Allocate and initialize a new CRTC object with * specified primary and cursor planes. * @dev: DRM device * @type: the type of the struct which contains struct &drm_crtc * @member: the name of the &drm_crtc within @type. * @primary: Primary plane for CRTC * @cursor: Cursor plane for CRTC * @funcs: callbacks for the new CRTC * @name: printf style format string for the CRTC name, or NULL for default name * * Allocates and initializes a new crtc object. Cleanup is automatically * handled through registering drmm_crtc_cleanup() with drmm_add_action(). * * The @drm_crtc_funcs.destroy hook must be NULL. * * Returns: * Pointer to new crtc, or ERR_PTR on failure. */ #define drmm_crtc_alloc_with_planes(dev, type, member, primary, cursor, funcs, name, ...) \ ((type *)__drmm_crtc_alloc_with_planes(dev, sizeof(type), \ offsetof(type, member), \ primary, cursor, funcs, \ name, ##__VA_ARGS__)) /** * drm_crtc_index - find the index of a registered CRTC * @crtc: CRTC to find index for * * Given a registered CRTC, return the index of that CRTC within a DRM * device's list of CRTCs. */ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc) { return crtc->index; } /** * drm_crtc_mask - find the mask of a registered CRTC * @crtc: CRTC to find mask for * * Given a registered CRTC, return the mask bit of that CRTC for the * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields. */ static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) { return 1 << drm_crtc_index(crtc); } int drm_mode_set_config_internal(struct drm_mode_set *set); struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); /** * drm_crtc_find - look up a CRTC object from its ID * @dev: DRM device * @file_priv: drm file to check for lease against. * @id: &drm_mode_object ID * * This can be used to look up a CRTC from its userspace ID. Only used by * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS * userspace interface should be done using &drm_property. */ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC); return mo ? obj_to_crtc(mo) : NULL; } /** * drm_for_each_crtc - iterate over all CRTCs * @crtc: a &struct drm_crtc as the loop cursor * @dev: the &struct drm_device * * Iterate over all CRTCs of @dev. */ #define drm_for_each_crtc(crtc, dev) \ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) /** * drm_for_each_crtc_reverse - iterate over all CRTCs in reverse order * @crtc: a &struct drm_crtc as the loop cursor * @dev: the &struct drm_device * * Iterate over all CRTCs of @dev. */ #define drm_for_each_crtc_reverse(crtc, dev) \ list_for_each_entry_reverse(crtc, &(dev)->mode_config.crtc_list, head) int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc, unsigned int supported_filters); bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state); int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc); #endif /* __DRM_CRTC_H__ */
28 33 133 160 191 70 26 26 26 88 26 26 22 22 25 26 26 26 26 25 26 26 26 26 26 94 94 94 94 88 26 94 86 82 21 21 31 25 94 26 26 26 21 26 94 29 94 26 21 26 26 26 21 26 26 26 26 26 26 26 21 26 26 26 26 26 26 26 26 21 17 17 21 21 21 21 21 21 21 21 21 25 17 17 17 17 17 17 17 17 17 17 17 28 28 28 28 28 28 119 119 119 119 119 119 127 127 127 122 121 122 122 127 127 126 127 127 26 26 26 127 122 17 17 17 122 28 127 55 5 5 22 5 5 102 103 102 103 103 103 103 120 120 121 121 121 120 120 120 80 80 22 89 29 22 20 21 21 21 20 22 22 22 29 29 29 25 25 24 24 25 24 1 25 30 31 15 16 13 12 12 16 11 11 11 11 11 11 11 3 3 3 3 3 3 3 3 8 8 13 12 12 12 12 5 12 5 12 31 786 709 708 711 707 835 838 835 834 836 838 837 832 828 833 830 236 787 423 323 402 455 225 224 44 44 44 321 738 66 738 742 740 740 740 737 738 45 708 711 711 708 708 709 707 57 67 68 68 67 55 67 67 67 5 105 105 79 12 79 70 69 67 64 68 68 67 68 68 68 68 68 67 68 68 68 68 68 1 68 67 68 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 25 17 25 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 97 97 96 97 97 97 97 97 97 97 97 97 96 97 97 97 97 1 97 97 5 5 97 97 2 2 2 2 2 2 2 2 2 2 2 2 2 2 17 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 9 14 14 14 14 14 12 21 21 21 21 6 6 6 6 6 6 6 6 6 6 6 6 5 6 2 6 6 6 6 5 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 5 2 5 6 6 2 2 2 2 2 2 2 2 2 2 2 2 4 4 3 4 4 4 4 4 4 4 4 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 2 1 1 1 2 2 1 2 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 433 434 434 434 2 2 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet * & Swedish University of Agricultural Sciences. * * Jens Laas <jens.laas@data.slu.se> Swedish University of * Agricultural Sciences. * * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet * * This work is based on the LPC-trie which is originally described in: * * An experimental study of compression methods for dynamic tries * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. * https://www.csc.kth.se/~snilsson/software/dyntrie2/ * * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 * * Code from fib_hash has been reused which includes the following header: * * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 FIB: lookup engine and maintenance routines. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Substantial contributions to this work comes from: * * David S. Miller, <davem@davemloft.net> * Stephen Hemminger <shemminger@osdl.org> * Paul E. McKenney <paulmck@us.ibm.com> * Patrick McHardy <kaber@trash.net> */ #include <linux/cache.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/rcupdate_wait.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/notifier.h> #include <net/net_namespace.h> #include <net/inet_dscp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/route.h> #include <net/tcp.h> #include <net/sock.h> #include <net/ip_fib.h> #include <net/fib_notifier.h> #include <trace/events/fib.h> #include "fib_lookup.h" static int call_fib_entry_notifier(struct notifier_block *nb, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_alias *fa, struct netlink_ext_ack *extack) { struct fib_entry_notifier_info info = { .info.extack = extack, .dst = dst, .dst_len = dst_len, .fi = fa->fa_info, .dscp = fa->fa_dscp, .type = fa->fa_type, .tb_id = fa->tb_id, }; return call_fib4_notifier(nb, event_type, &info.info); } static int call_fib_entry_notifiers(struct net *net, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_alias *fa, struct netlink_ext_ack *extack) { struct fib_entry_notifier_info info = { .info.extack = extack, .dst = dst, .dst_len = dst_len, .fi = fa->fa_info, .dscp = fa->fa_dscp, .type = fa->fa_type, .tb_id = fa->tb_id, }; return call_fib4_notifiers(net, event_type, &info.info); } #define MAX_STAT_DEPTH 32 #define KEYLENGTH (8*sizeof(t_key)) #define KEY_MAX ((t_key)~0) typedef unsigned int t_key; #define IS_TRIE(n) ((n)->pos >= KEYLENGTH) #define IS_TNODE(n) ((n)->bits) #define IS_LEAF(n) (!(n)->bits) struct key_vector { t_key key; unsigned char pos; /* 2log(KEYLENGTH) bits needed */ unsigned char bits; /* 2log(KEYLENGTH) bits needed */ unsigned char slen; union { /* This list pointer if valid if (pos | bits) == 0 (LEAF) */ struct hlist_head leaf; /* This array is valid if (pos | bits) > 0 (TNODE) */ DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode); }; }; struct tnode { struct rcu_head rcu; t_key empty_children; /* KEYLENGTH bits needed */ t_key full_children; /* KEYLENGTH bits needed */ struct key_vector __rcu *parent; struct key_vector kv[1]; #define tn_bits kv[0].bits }; #define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n]) #define LEAF_SIZE TNODE_SIZE(1) #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats { unsigned int gets; unsigned int backtrack; unsigned int semantic_match_passed; unsigned int semantic_match_miss; unsigned int null_node_hit; unsigned int resize_node_skipped; }; #endif struct trie_stat { unsigned int totdepth; unsigned int maxdepth; unsigned int tnodes; unsigned int leaves; unsigned int nullpointers; unsigned int prefixes; unsigned int nodesizes[MAX_STAT_DEPTH]; }; struct trie { struct key_vector kv[1]; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats; #endif }; static struct key_vector *resize(struct trie *t, struct key_vector *tn); static unsigned int tnode_free_size; /* * synchronize_rcu after call_rcu for outstanding dirty memory; it should be * especially useful before resizing the root node with PREEMPT_NONE configs; * the value was obtained experimentally, aiming to avoid visible slowdown. */ unsigned int sysctl_fib_sync_mem = 512 * 1024; unsigned int sysctl_fib_sync_mem_min = 64 * 1024; unsigned int sysctl_fib_sync_mem_max = 64 * 1024 * 1024; static struct kmem_cache *fn_alias_kmem __ro_after_init; static struct kmem_cache *trie_leaf_kmem __ro_after_init; static inline struct tnode *tn_info(struct key_vector *kv) { return container_of(kv, struct tnode, kv[0]); } /* caller must hold RTNL */ #define node_parent(tn) rtnl_dereference(tn_info(tn)->parent) #define get_child(tn, i) rtnl_dereference((tn)->tnode[i]) /* caller must hold RCU read lock or RTNL */ #define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent) #define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i]) /* wrapper for rcu_assign_pointer */ static inline void node_set_parent(struct key_vector *n, struct key_vector *tp) { if (n) rcu_assign_pointer(tn_info(n)->parent, tp); } #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p) /* This provides us with the number of children in this node, in the case of a * leaf this will return 0 meaning none of the children are accessible. */ static inline unsigned long child_length(const struct key_vector *tn) { return (1ul << tn->bits) & ~(1ul); } #define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos) static inline unsigned long get_index(t_key key, struct key_vector *kv) { unsigned long index = key ^ kv->key; if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos)) return 0; return index >> kv->pos; } /* To understand this stuff, an understanding of keys and all their bits is * necessary. Every node in the trie has a key associated with it, but not * all of the bits in that key are significant. * * Consider a node 'n' and its parent 'tp'. * * If n is a leaf, every bit in its key is significant. Its presence is * necessitated by path compression, since during a tree traversal (when * searching for a leaf - unless we are doing an insertion) we will completely * ignore all skipped bits we encounter. Thus we need to verify, at the end of * a potentially successful search, that we have indeed been walking the * correct key path. * * Note that we can never "miss" the correct key in the tree if present by * following the wrong path. Path compression ensures that segments of the key * that are the same for all keys with a given prefix are skipped, but the * skipped part *is* identical for each node in the subtrie below the skipped * bit! trie_insert() in this implementation takes care of that. * * if n is an internal node - a 'tnode' here, the various parts of its key * have many different meanings. * * Example: * _________________________________________________________________ * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | * ----------------------------------------------------------------- * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 * * _________________________________________________________________ * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | * ----------------------------------------------------------------- * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 * * tp->pos = 22 * tp->bits = 3 * n->pos = 13 * n->bits = 4 * * First, let's just ignore the bits that come before the parent tp, that is * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this * point we do not use them for anything. * * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the * index into the parent's child array. That is, they will be used to find * 'n' among tp's children. * * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits * for the node n. * * All the bits we have seen so far are significant to the node n. The rest * of the bits are really not needed or indeed known in n->key. * * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into * n's child array, and will of course be different for each child. * * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown * at this point. */ static const int halve_threshold = 25; static const int inflate_threshold = 50; static const int halve_threshold_root = 15; static const int inflate_threshold_root = 30; static inline void alias_free_mem_rcu(struct fib_alias *fa) { kfree_rcu(fa, rcu); } #define TNODE_VMALLOC_MAX \ ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *)) static void __node_free_rcu(struct rcu_head *head) { struct tnode *n = container_of(head, struct tnode, rcu); if (!n->tn_bits) kmem_cache_free(trie_leaf_kmem, n); else kvfree(n); } #define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu) static struct tnode *tnode_alloc(int bits) { size_t size; /* verify bits is within bounds */ if (bits > TNODE_VMALLOC_MAX) return NULL; /* determine size and verify it is non-zero and didn't overflow */ size = TNODE_SIZE(1ul << bits); if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else return vzalloc(size); } static inline void empty_child_inc(struct key_vector *n) { tn_info(n)->empty_children++; if (!tn_info(n)->empty_children) tn_info(n)->full_children++; } static inline void empty_child_dec(struct key_vector *n) { if (!tn_info(n)->empty_children) tn_info(n)->full_children--; tn_info(n)->empty_children--; } static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) { struct key_vector *l; struct tnode *kv; kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); if (!kv) return NULL; /* initialize key vector */ l = kv->kv; l->key = key; l->pos = 0; l->bits = 0; l->slen = fa->fa_slen; /* link leaf to fib alias */ INIT_HLIST_HEAD(&l->leaf); hlist_add_head(&fa->fa_list, &l->leaf); return l; } static struct key_vector *tnode_new(t_key key, int pos, int bits) { unsigned int shift = pos + bits; struct key_vector *tn; struct tnode *tnode; /* verify bits and pos their msb bits clear and values are valid */ BUG_ON(!bits || (shift > KEYLENGTH)); tnode = tnode_alloc(bits); if (!tnode) return NULL; pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0), sizeof(struct key_vector *) << bits); if (bits == KEYLENGTH) tnode->full_children = 1; else tnode->empty_children = 1ul << bits; tn = tnode->kv; tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; tn->pos = pos; tn->bits = bits; tn->slen = pos; return tn; } /* Check whether a tnode 'n' is "full", i.e. it is an internal node * and no bits are skipped. See discussion in dyntree paper p. 6 */ static inline int tnode_full(struct key_vector *tn, struct key_vector *n) { return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n); } /* Add a child at position i overwriting the old value. * Update the value of full_children and empty_children. */ static void put_child(struct key_vector *tn, unsigned long i, struct key_vector *n) { struct key_vector *chi = get_child(tn, i); int isfull, wasfull; BUG_ON(i >= child_length(tn)); /* update emptyChildren, overflow into fullChildren */ if (!n && chi) empty_child_inc(tn); if (n && !chi) empty_child_dec(tn); /* update fullChildren */ wasfull = tnode_full(tn, chi); isfull = tnode_full(tn, n); if (wasfull && !isfull) tn_info(tn)->full_children--; else if (!wasfull && isfull) tn_info(tn)->full_children++; if (n && (tn->slen < n->slen)) tn->slen = n->slen; rcu_assign_pointer(tn->tnode[i], n); } static void update_children(struct key_vector *tn) { unsigned long i; /* update all of the child parent pointers */ for (i = child_length(tn); i;) { struct key_vector *inode = get_child(tn, --i); if (!inode) continue; /* Either update the children of a tnode that * already belongs to us or update the child * to point to ourselves. */ if (node_parent(inode) == tn) update_children(inode); else node_set_parent(inode, tn); } } static inline void put_child_root(struct key_vector *tp, t_key key, struct key_vector *n) { if (IS_TRIE(tp)) rcu_assign_pointer(tp->tnode[0], n); else put_child(tp, get_index(key, tp), n); } static inline void tnode_free_init(struct key_vector *tn) { tn_info(tn)->rcu.next = NULL; } static inline void tnode_free_append(struct key_vector *tn, struct key_vector *n) { tn_info(n)->rcu.next = tn_info(tn)->rcu.next; tn_info(tn)->rcu.next = &tn_info(n)->rcu; } static void tnode_free(struct key_vector *tn) { struct callback_head *head = &tn_info(tn)->rcu; while (head) { head = head->next; tnode_free_size += TNODE_SIZE(1ul << tn->bits); node_free(tn); tn = container_of(head, struct tnode, rcu)->kv; } if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) { tnode_free_size = 0; synchronize_net(); } } static struct key_vector *replace(struct trie *t, struct key_vector *oldtnode, struct key_vector *tn) { struct key_vector *tp = node_parent(oldtnode); unsigned long i; /* setup the parent pointer out of and back into this node */ NODE_INIT_PARENT(tn, tp); put_child_root(tp, tn->key, tn); /* update all of the child parent pointers */ update_children(tn); /* all pointers should be clean so we are done */ tnode_free(oldtnode); /* resize children now that oldtnode is freed */ for (i = child_length(tn); i;) { struct key_vector *inode = get_child(tn, --i); /* resize child node */ if (tnode_full(tn, inode)) tn = resize(t, inode); } return tp; } static struct key_vector *inflate(struct trie *t, struct key_vector *oldtnode) { struct key_vector *tn; unsigned long i; t_key m; pr_debug("In inflate\n"); tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1); if (!tn) goto notnode; /* prepare oldtnode to be freed */ tnode_free_init(oldtnode); /* Assemble all of the pointers in our cluster, in this case that * represents all of the pointers out of our allocated nodes that * point to existing tnodes and the links between our allocated * nodes. */ for (i = child_length(oldtnode), m = 1u << tn->pos; i;) { struct key_vector *inode = get_child(oldtnode, --i); struct key_vector *node0, *node1; unsigned long j, k; /* An empty child */ if (!inode) continue; /* A leaf or an internal node with skipped bits */ if (!tnode_full(oldtnode, inode)) { put_child(tn, get_index(inode->key, tn), inode); continue; } /* drop the node in the old tnode free list */ tnode_free_append(oldtnode, inode); /* An internal node with two children */ if (inode->bits == 1) { put_child(tn, 2 * i + 1, get_child(inode, 1)); put_child(tn, 2 * i, get_child(inode, 0)); continue; } /* We will replace this node 'inode' with two new * ones, 'node0' and 'node1', each with half of the * original children. The two new nodes will have * a position one bit further down the key and this * means that the "significant" part of their keys * (see the discussion near the top of this file) * will differ by one bit, which will be "0" in * node0's key and "1" in node1's key. Since we are * moving the key position by one step, the bit that * we are moving away from - the bit at position * (tn->pos) - is the one that will differ between * node0 and node1. So... we synthesize that bit in the * two new keys. */ node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1); if (!node1) goto nomem; node0 = tnode_new(inode->key, inode->pos, inode->bits - 1); tnode_free_append(tn, node1); if (!node0) goto nomem; tnode_free_append(tn, node0); /* populate child pointers in new nodes */ for (k = child_length(inode), j = k / 2; j;) { put_child(node1, --j, get_child(inode, --k)); put_child(node0, j, get_child(inode, j)); put_child(node1, --j, get_child(inode, --k)); put_child(node0, j, get_child(inode, j)); } /* link new nodes to parent */ NODE_INIT_PARENT(node1, tn); NODE_INIT_PARENT(node0, tn); /* link parent to nodes */ put_child(tn, 2 * i + 1, node1); put_child(tn, 2 * i, node0); } /* setup the parent pointers into and out of this node */ return replace(t, oldtnode, tn); nomem: /* all pointers should be clean so we are done */ tnode_free(tn); notnode: return NULL; } static struct key_vector *halve(struct trie *t, struct key_vector *oldtnode) { struct key_vector *tn; unsigned long i; pr_debug("In halve\n"); tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1); if (!tn) goto notnode; /* prepare oldtnode to be freed */ tnode_free_init(oldtnode); /* Assemble all of the pointers in our cluster, in this case that * represents all of the pointers out of our allocated nodes that * point to existing tnodes and the links between our allocated * nodes. */ for (i = child_length(oldtnode); i;) { struct key_vector *node1 = get_child(oldtnode, --i); struct key_vector *node0 = get_child(oldtnode, --i); struct key_vector *inode; /* At least one of the children is empty */ if (!node1 || !node0) { put_child(tn, i / 2, node1 ? : node0); continue; } /* Two nonempty children */ inode = tnode_new(node0->key, oldtnode->pos, 1); if (!inode) goto nomem; tnode_free_append(tn, inode); /* initialize pointers out of node */ put_child(inode, 1, node1); put_child(inode, 0, node0); NODE_INIT_PARENT(inode, tn); /* link parent to node */ put_child(tn, i / 2, inode); } /* setup the parent pointers into and out of this node */ return replace(t, oldtnode, tn); nomem: /* all pointers should be clean so we are done */ tnode_free(tn); notnode: return NULL; } static struct key_vector *collapse(struct trie *t, struct key_vector *oldtnode) { struct key_vector *n, *tp; unsigned long i; /* scan the tnode looking for that one child that might still exist */ for (n = NULL, i = child_length(oldtnode); !n && i;) n = get_child(oldtnode, --i); /* compress one level */ tp = node_parent(oldtnode); put_child_root(tp, oldtnode->key, n); node_set_parent(n, tp); /* drop dead node */ node_free(oldtnode); return tp; } static unsigned char update_suffix(struct key_vector *tn) { unsigned char slen = tn->pos; unsigned long stride, i; unsigned char slen_max; /* only vector 0 can have a suffix length greater than or equal to * tn->pos + tn->bits, the second highest node will have a suffix * length at most of tn->pos + tn->bits - 1 */ slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen); /* search though the list of children looking for nodes that might * have a suffix greater than the one we currently have. This is * why we start with a stride of 2 since a stride of 1 would * represent the nodes with suffix length equal to tn->pos */ for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) { struct key_vector *n = get_child(tn, i); if (!n || (n->slen <= slen)) continue; /* update stride and slen based on new value */ stride <<= (n->slen - slen); slen = n->slen; i &= ~(stride - 1); /* stop searching if we have hit the maximum possible value */ if (slen >= slen_max) break; } tn->slen = slen; return slen; } /* From "Implementing a dynamic compressed trie" by Stefan Nilsson of * the Helsinki University of Technology and Matti Tikkanen of Nokia * Telecommunications, page 6: * "A node is doubled if the ratio of non-empty children to all * children in the *doubled* node is at least 'high'." * * 'high' in this instance is the variable 'inflate_threshold'. It * is expressed as a percentage, so we multiply it with * child_length() and instead of multiplying by 2 (since the * child array will be doubled by inflate()) and multiplying * the left-hand side by 100 (to handle the percentage thing) we * multiply the left-hand side by 50. * * The left-hand side may look a bit weird: child_length(tn) * - tn->empty_children is of course the number of non-null children * in the current node. tn->full_children is the number of "full" * children, that is non-null tnodes with a skip value of 0. * All of those will be doubled in the resulting inflated tnode, so * we just count them one extra time here. * * A clearer way to write this would be: * * to_be_doubled = tn->full_children; * not_to_be_doubled = child_length(tn) - tn->empty_children - * tn->full_children; * * new_child_length = child_length(tn) * 2; * * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) / * new_child_length; * if (new_fill_factor >= inflate_threshold) * * ...and so on, tho it would mess up the while () loop. * * anyway, * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >= * inflate_threshold * * avoid a division: * 100 * (not_to_be_doubled + 2*to_be_doubled) >= * inflate_threshold * new_child_length * * expand not_to_be_doubled and to_be_doubled, and shorten: * 100 * (child_length(tn) - tn->empty_children + * tn->full_children) >= inflate_threshold * new_child_length * * expand new_child_length: * 100 * (child_length(tn) - tn->empty_children + * tn->full_children) >= * inflate_threshold * child_length(tn) * 2 * * shorten again: * 50 * (tn->full_children + child_length(tn) - * tn->empty_children) >= inflate_threshold * * child_length(tn) * */ static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn) { unsigned long used = child_length(tn); unsigned long threshold = used; /* Keep root node larger */ threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold; used -= tn_info(tn)->empty_children; used += tn_info(tn)->full_children; /* if bits == KEYLENGTH then pos = 0, and will fail below */ return (used > 1) && tn->pos && ((50 * used) >= threshold); } static inline bool should_halve(struct key_vector *tp, struct key_vector *tn) { unsigned long used = child_length(tn); unsigned long threshold = used; /* Keep root node larger */ threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold; used -= tn_info(tn)->empty_children; /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */ return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold); } static inline bool should_collapse(struct key_vector *tn) { unsigned long used = child_length(tn); used -= tn_info(tn)->empty_children; /* account for bits == KEYLENGTH case */ if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children) used -= KEY_MAX; /* One child or none, time to drop us from the trie */ return used < 2; } #define MAX_WORK 10 static struct key_vector *resize(struct trie *t, struct key_vector *tn) { #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats = t->stats; #endif struct key_vector *tp = node_parent(tn); unsigned long cindex = get_index(tn->key, tp); int max_work = MAX_WORK; pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n", tn, inflate_threshold, halve_threshold); /* track the tnode via the pointer from the parent instead of * doing it ourselves. This way we can let RCU fully do its * thing without us interfering */ BUG_ON(tn != get_child(tp, cindex)); /* Double as long as the resulting node has a number of * nonempty nodes that are above the threshold. */ while (should_inflate(tp, tn) && max_work) { tp = inflate(t, tn); if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->resize_node_skipped); #endif break; } max_work--; tn = get_child(tp, cindex); } /* update parent in case inflate failed */ tp = node_parent(tn); /* Return if at least one inflate is run */ if (max_work != MAX_WORK) return tp; /* Halve as long as the number of empty children in this * node is above threshold. */ while (should_halve(tp, tn) && max_work) { tp = halve(t, tn); if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->resize_node_skipped); #endif break; } max_work--; tn = get_child(tp, cindex); } /* Only one child remains */ if (should_collapse(tn)) return collapse(t, tn); /* update parent in case halve failed */ return node_parent(tn); } static void node_pull_suffix(struct key_vector *tn, unsigned char slen) { unsigned char node_slen = tn->slen; while ((node_slen > tn->pos) && (node_slen > slen)) { slen = update_suffix(tn); if (node_slen == slen) break; tn = node_parent(tn); node_slen = tn->slen; } } static void node_push_suffix(struct key_vector *tn, unsigned char slen) { while (tn->slen < slen) { tn->slen = slen; tn = node_parent(tn); } } /* rcu_read_lock needs to be hold by caller from readside */ static struct key_vector *fib_find_node(struct trie *t, struct key_vector **tp, u32 key) { struct key_vector *pn, *n = t->kv; unsigned long index = 0; do { pn = n; n = get_child_rcu(n, index); if (!n) break; index = get_cindex(key, n); /* This bit of code is a bit tricky but it combines multiple * checks into a single check. The prefix consists of the * prefix plus zeros for the bits in the cindex. The index * is the difference between the key and this value. From * this we can actually derive several pieces of data. * if (index >= (1ul << bits)) * we have a mismatch in skip bits and failed * else * we know the value is cindex * * This check is safe even if bits == KEYLENGTH due to the * fact that we can only allocate a node with 32 bits if a * long is greater than 32 bits. */ if (index >= (1ul << n->bits)) { n = NULL; break; } /* keep searching until we find a perfect match leaf or NULL */ } while (IS_TNODE(n)); *tp = pn; return n; } /* Return the first fib alias matching DSCP with * priority less than or equal to PRIO. * If 'find_first' is set, return the first matching * fib alias, regardless of DSCP and priority. */ static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen, dscp_t dscp, u32 prio, u32 tb_id, bool find_first) { struct fib_alias *fa; if (!fah) return NULL; hlist_for_each_entry(fa, fah, fa_list) { /* Avoid Sparse warning when using dscp_t in inequalities */ u8 __fa_dscp = inet_dscp_to_dsfield(fa->fa_dscp); u8 __dscp = inet_dscp_to_dsfield(dscp); if (fa->fa_slen < slen) continue; if (fa->fa_slen != slen) break; if (fa->tb_id > tb_id) continue; if (fa->tb_id != tb_id) break; if (find_first) return fa; if (__fa_dscp > __dscp) continue; if (fa->fa_info->fib_priority >= prio || __fa_dscp < __dscp) return fa; } return NULL; } static struct fib_alias * fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri) { u8 slen = KEYLENGTH - fri->dst_len; struct key_vector *l, *tp; struct fib_table *tb; struct fib_alias *fa; struct trie *t; tb = fib_get_table(net, fri->tb_id); if (!tb) return NULL; t = (struct trie *)tb->tb_data; l = fib_find_node(t, &tp, be32_to_cpu(fri->dst)); if (!l) return NULL; hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { if (fa->fa_slen == slen && fa->tb_id == fri->tb_id && fa->fa_dscp == fri->dscp && fa->fa_info == fri->fi && fa->fa_type == fri->type) return fa; } return NULL; } void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri) { u8 fib_notify_on_flag_change; struct fib_alias *fa_match; struct sk_buff *skb; int err; rcu_read_lock(); fa_match = fib_find_matching_alias(net, fri); if (!fa_match) goto out; /* These are paired with the WRITE_ONCE() happening in this function. * The reason is that we are only protected by RCU at this point. */ if (READ_ONCE(fa_match->offload) == fri->offload && READ_ONCE(fa_match->trap) == fri->trap && READ_ONCE(fa_match->offload_failed) == fri->offload_failed) goto out; WRITE_ONCE(fa_match->offload, fri->offload); WRITE_ONCE(fa_match->trap, fri->trap); fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change); /* 2 means send notifications only if offload_failed was changed. */ if (fib_notify_on_flag_change == 2 && READ_ONCE(fa_match->offload_failed) == fri->offload_failed) goto out; WRITE_ONCE(fa_match->offload_failed, fri->offload_failed); if (!fib_notify_on_flag_change) goto out; skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC); if (!skb) { err = -ENOBUFS; goto errout; } err = fib_dump_info(skb, 0, 0, RTM_NEWROUTE, fri, 0); if (err < 0) { /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_IPV4_ROUTE, NULL, GFP_ATOMIC); goto out; errout: rtnl_set_sk_err(net, RTNLGRP_IPV4_ROUTE, err); out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(fib_alias_hw_flags_set); static void trie_rebalance(struct trie *t, struct key_vector *tn) { while (!IS_TRIE(tn)) tn = resize(t, tn); } static int fib_insert_node(struct trie *t, struct key_vector *tp, struct fib_alias *new, t_key key) { struct key_vector *n, *l; l = leaf_new(key, new); if (!l) goto noleaf; /* retrieve child from parent node */ n = get_child(tp, get_index(key, tp)); /* Case 2: n is a LEAF or a TNODE and the key doesn't match. * * Add a new tnode here * first tnode need some special handling * leaves us in position for handling as case 3 */ if (n) { struct key_vector *tn; tn = tnode_new(key, __fls(key ^ n->key), 1); if (!tn) goto notnode; /* initialize routes out of node */ NODE_INIT_PARENT(tn, tp); put_child(tn, get_index(key, tn) ^ 1, n); /* start adding routes into the node */ put_child_root(tp, key, tn); node_set_parent(n, tn); /* parent now has a NULL spot where the leaf can go */ tp = tn; } /* Case 3: n is NULL, and will just insert a new leaf */ node_push_suffix(tp, new->fa_slen); NODE_INIT_PARENT(l, tp); put_child_root(tp, key, l); trie_rebalance(t, tp); return 0; notnode: node_free(l); noleaf: return -ENOMEM; } static int fib_insert_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *new, struct fib_alias *fa, t_key key) { if (!l) return fib_insert_node(t, tp, new, key); if (fa) { hlist_add_before_rcu(&new->fa_list, &fa->fa_list); } else { struct fib_alias *last; hlist_for_each_entry(last, &l->leaf, fa_list) { if (new->fa_slen < last->fa_slen) break; if ((new->fa_slen == last->fa_slen) && (new->tb_id > last->tb_id)) break; fa = last; } if (fa) hlist_add_behind_rcu(&new->fa_list, &fa->fa_list); else hlist_add_head_rcu(&new->fa_list, &l->leaf); } /* if we added to the tail node then we need to update slen */ if (l->slen < new->fa_slen) { l->slen = new->fa_slen; node_push_suffix(tp, new->fa_slen); } return 0; } static void fib_remove_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *old); /* Caller must hold RTNL. */ int fib_table_insert(struct net *net, struct fib_table *tb, struct fib_config *cfg, struct netlink_ext_ack *extack) { struct trie *t = (struct trie *)tb->tb_data; struct fib_alias *fa, *new_fa; struct key_vector *l, *tp; u16 nlflags = NLM_F_EXCL; struct fib_info *fi; u8 plen = cfg->fc_dst_len; u8 slen = KEYLENGTH - plen; dscp_t dscp; u32 key; int err; key = ntohl(cfg->fc_dst); pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); fi = fib_create_info(cfg, extack); if (IS_ERR(fi)) { err = PTR_ERR(fi); goto err; } dscp = cfg->fc_dscp; l = fib_find_node(t, &tp, key); fa = l ? fib_find_alias(&l->leaf, slen, dscp, fi->fib_priority, tb->tb_id, false) : NULL; /* Now fa, if non-NULL, points to the first fib alias * with the same keys [prefix,dscp,priority], if such key already * exists or to the node before which we will insert new one. * * If fa is NULL, we will need to allocate a new one and * insert to the tail of the section matching the suffix length * of the new alias. */ if (fa && fa->fa_dscp == dscp && fa->fa_info->fib_priority == fi->fib_priority) { struct fib_alias *fa_first, *fa_match; err = -EEXIST; if (cfg->fc_nlflags & NLM_F_EXCL) goto out; nlflags &= ~NLM_F_EXCL; /* We have 2 goals: * 1. Find exact match for type, scope, fib_info to avoid * duplicate routes * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it */ fa_match = NULL; fa_first = fa; hlist_for_each_entry_from(fa, fa_list) { if ((fa->fa_slen != slen) || (fa->tb_id != tb->tb_id) || (fa->fa_dscp != dscp)) break; if (fa->fa_info->fib_priority != fi->fib_priority) break; if (fa->fa_type == cfg->fc_type && fa->fa_info == fi) { fa_match = fa; break; } } if (cfg->fc_nlflags & NLM_F_REPLACE) { struct fib_info *fi_drop; u8 state; nlflags |= NLM_F_REPLACE; fa = fa_first; if (fa_match) { if (fa == fa_match) err = 0; goto out; } err = -ENOBUFS; new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (!new_fa) goto out; fi_drop = fa->fa_info; new_fa->fa_dscp = fa->fa_dscp; new_fa->fa_info = fi; new_fa->fa_type = cfg->fc_type; state = fa->fa_state; new_fa->fa_state = state & ~FA_S_ACCESSED; new_fa->fa_slen = fa->fa_slen; new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; new_fa->offload = 0; new_fa->trap = 0; new_fa->offload_failed = 0; hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list); if (fib_find_alias(&l->leaf, fa->fa_slen, 0, 0, tb->tb_id, true) == new_fa) { enum fib_event_type fib_event; fib_event = FIB_EVENT_ENTRY_REPLACE; err = call_fib_entry_notifiers(net, fib_event, key, plen, new_fa, extack); if (err) { hlist_replace_rcu(&new_fa->fa_list, &fa->fa_list); goto out_free_new_fa; } } rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, nlflags); alias_free_mem_rcu(fa); fib_release_info(fi_drop); if (state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net); goto succeeded; } /* Error if we find a perfect match which * uses the same scope, type, and nexthop * information. */ if (fa_match) goto out; if (cfg->fc_nlflags & NLM_F_APPEND) nlflags |= NLM_F_APPEND; else fa = fa_first; } err = -ENOENT; if (!(cfg->fc_nlflags & NLM_F_CREATE)) goto out; nlflags |= NLM_F_CREATE; err = -ENOBUFS; new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (!new_fa) goto out; new_fa->fa_info = fi; new_fa->fa_dscp = dscp; new_fa->fa_type = cfg->fc_type; new_fa->fa_state = 0; new_fa->fa_slen = slen; new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; new_fa->offload = 0; new_fa->trap = 0; new_fa->offload_failed = 0; /* Insert new entry to the list. */ err = fib_insert_alias(t, tp, l, new_fa, fa, key); if (err) goto out_free_new_fa; /* The alias was already inserted, so the node must exist. */ l = l ? l : fib_find_node(t, &tp, key); if (WARN_ON_ONCE(!l)) { err = -ENOENT; goto out_free_new_fa; } if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) == new_fa) { enum fib_event_type fib_event; fib_event = FIB_EVENT_ENTRY_REPLACE; err = call_fib_entry_notifiers(net, fib_event, key, plen, new_fa, extack); if (err) goto out_remove_new_fa; } if (!plen) tb->tb_num_default++; rt_cache_flush(cfg->fc_nlinfo.nl_net); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id, &cfg->fc_nlinfo, nlflags); succeeded: return 0; out_remove_new_fa: fib_remove_alias(t, tp, l, new_fa); out_free_new_fa: kmem_cache_free(fn_alias_kmem, new_fa); out: fib_release_info(fi); err: return err; } static inline t_key prefix_mismatch(t_key key, struct key_vector *n) { t_key prefix = n->key; return (key ^ prefix) & (prefix | -prefix); } bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, const struct flowi4 *flp) { if (nhc->nhc_flags & RTNH_F_DEAD) return false; if (ip_ignore_linkdown(nhc->nhc_dev) && nhc->nhc_flags & RTNH_F_LINKDOWN && !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) return false; if (flp->flowi4_oif && flp->flowi4_oif != nhc->nhc_oif) return false; return true; } /* should be called with rcu_read_lock */ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags) { struct trie *t = (struct trie *) tb->tb_data; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats = t->stats; #endif const t_key key = ntohl(flp->daddr); struct key_vector *n, *pn; struct fib_alias *fa; unsigned long index; t_key cindex; pn = t->kv; cindex = 0; n = get_child_rcu(pn, cindex); if (!n) { trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN); return -EAGAIN; } #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->gets); #endif /* Step 1: Travel to the longest prefix match in the trie */ for (;;) { index = get_cindex(key, n); /* This bit of code is a bit tricky but it combines multiple * checks into a single check. The prefix consists of the * prefix plus zeros for the "bits" in the prefix. The index * is the difference between the key and this value. From * this we can actually derive several pieces of data. * if (index >= (1ul << bits)) * we have a mismatch in skip bits and failed * else * we know the value is cindex * * This check is safe even if bits == KEYLENGTH due to the * fact that we can only allocate a node with 32 bits if a * long is greater than 32 bits. */ if (index >= (1ul << n->bits)) break; /* we have found a leaf. Prefixes have already been compared */ if (IS_LEAF(n)) goto found; /* only record pn and cindex if we are going to be chopping * bits later. Otherwise we are just wasting cycles. */ if (n->slen > n->pos) { pn = n; cindex = index; } n = get_child_rcu(n, index); if (unlikely(!n)) goto backtrace; } /* Step 2: Sort out leaves and begin backtracing for longest prefix */ for (;;) { /* record the pointer where our next node pointer is stored */ struct key_vector __rcu **cptr = n->tnode; /* This test verifies that none of the bits that differ * between the key and the prefix exist in the region of * the lsb and higher in the prefix. */ if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos)) goto backtrace; /* exit out and process leaf */ if (unlikely(IS_LEAF(n))) break; /* Don't bother recording parent info. Since we are in * prefix match mode we will have to come back to wherever * we started this traversal anyway */ while ((n = rcu_dereference(*cptr)) == NULL) { backtrace: #ifdef CONFIG_IP_FIB_TRIE_STATS if (!n) this_cpu_inc(stats->null_node_hit); #endif /* If we are at cindex 0 there are no more bits for * us to strip at this level so we must ascend back * up one level to see if there are any more bits to * be stripped there. */ while (!cindex) { t_key pkey = pn->key; /* If we don't have a parent then there is * nothing for us to do as we do not have any * further nodes to parse. */ if (IS_TRIE(pn)) { trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN); return -EAGAIN; } #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->backtrack); #endif /* Get Child's index */ pn = node_parent_rcu(pn); cindex = get_index(pkey, pn); } /* strip the least significant bit from the cindex */ cindex &= cindex - 1; /* grab pointer for next child node */ cptr = &pn->tnode[cindex]; } } found: /* this line carries forward the xor from earlier in the function */ index = key ^ n->key; /* Step 3: Process the leaf, if that fails fall back to backtracing */ hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; struct fib_nh_common *nhc; int nhsel, err; if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { if (index >= (1ul << fa->fa_slen)) continue; } if (fa->fa_dscp && !fib_dscp_masked_match(fa->fa_dscp, flp)) continue; /* Paired with WRITE_ONCE() in fib_release_info() */ if (READ_ONCE(fi->fib_dead)) continue; if (fa->fa_info->fib_scope < flp->flowi4_scope) continue; fib_alias_accessed(fa); err = fib_props[fa->fa_type].error; if (unlikely(err < 0)) { out_reject: #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->semantic_match_passed); #endif trace_fib_table_lookup(tb->tb_id, flp, NULL, err); return err; } if (fi->fib_flags & RTNH_F_DEAD) continue; if (unlikely(fi->nh)) { if (nexthop_is_blackhole(fi->nh)) { err = fib_props[RTN_BLACKHOLE].error; goto out_reject; } nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp, &nhsel); if (nhc) goto set_result; goto miss; } for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { nhc = fib_info_nhc(fi, nhsel); if (!fib_lookup_good_nhc(nhc, fib_flags, flp)) continue; set_result: if (!(fib_flags & FIB_LOOKUP_NOREF)) refcount_inc(&fi->fib_clntref); res->prefix = htonl(n->key); res->prefixlen = KEYLENGTH - fa->fa_slen; res->nh_sel = nhsel; res->nhc = nhc; res->type = fa->fa_type; res->scope = fi->fib_scope; res->dscp = fa->fa_dscp; res->fi = fi; res->table = tb; res->fa_head = &n->leaf; #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->semantic_match_passed); #endif trace_fib_table_lookup(tb->tb_id, flp, nhc, err); return err; } } miss: #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->semantic_match_miss); #endif goto backtrace; } EXPORT_SYMBOL_GPL(fib_table_lookup); static void fib_remove_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *old) { /* record the location of the previous list_info entry */ struct hlist_node **pprev = old->fa_list.pprev; struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next); /* remove the fib_alias from the list */ hlist_del_rcu(&old->fa_list); /* if we emptied the list this leaf will be freed and we can sort * out parent suffix lengths as a part of trie_rebalance */ if (hlist_empty(&l->leaf)) { if (tp->slen == l->slen) node_pull_suffix(tp, tp->pos); put_child_root(tp, l->key, NULL); node_free(l); trie_rebalance(t, tp); return; } /* only access fa if it is pointing at the last valid hlist_node */ if (*pprev) return; /* update the trie with the latest suffix length */ l->slen = fa->fa_slen; node_pull_suffix(tp, fa->fa_slen); } static void fib_notify_alias_delete(struct net *net, u32 key, struct hlist_head *fah, struct fib_alias *fa_to_delete, struct netlink_ext_ack *extack) { struct fib_alias *fa_next, *fa_to_notify; u32 tb_id = fa_to_delete->tb_id; u8 slen = fa_to_delete->fa_slen; enum fib_event_type fib_event; /* Do not notify if we do not care about the route. */ if (fib_find_alias(fah, slen, 0, 0, tb_id, true) != fa_to_delete) return; /* Determine if the route should be replaced by the next route in the * list. */ fa_next = hlist_entry_safe(fa_to_delete->fa_list.next, struct fib_alias, fa_list); if (fa_next && fa_next->fa_slen == slen && fa_next->tb_id == tb_id) { fib_event = FIB_EVENT_ENTRY_REPLACE; fa_to_notify = fa_next; } else { fib_event = FIB_EVENT_ENTRY_DEL; fa_to_notify = fa_to_delete; } call_fib_entry_notifiers(net, fib_event, key, KEYLENGTH - slen, fa_to_notify, extack); } /* Caller must hold RTNL. */ int fib_table_delete(struct net *net, struct fib_table *tb, struct fib_config *cfg, struct netlink_ext_ack *extack) { struct trie *t = (struct trie *) tb->tb_data; struct fib_alias *fa, *fa_to_delete; struct key_vector *l, *tp; u8 plen = cfg->fc_dst_len; u8 slen = KEYLENGTH - plen; dscp_t dscp; u32 key; key = ntohl(cfg->fc_dst); l = fib_find_node(t, &tp, key); if (!l) return -ESRCH; dscp = cfg->fc_dscp; fa = fib_find_alias(&l->leaf, slen, dscp, 0, tb->tb_id, false); if (!fa) return -ESRCH; pr_debug("Deleting %08x/%d dsfield=0x%02x t=%p\n", key, plen, inet_dscp_to_dsfield(dscp), t); fa_to_delete = NULL; hlist_for_each_entry_from(fa, fa_list) { struct fib_info *fi = fa->fa_info; if ((fa->fa_slen != slen) || (fa->tb_id != tb->tb_id) || (fa->fa_dscp != dscp)) break; if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && (cfg->fc_scope == RT_SCOPE_NOWHERE || fa->fa_info->fib_scope == cfg->fc_scope) && (!cfg->fc_prefsrc || fi->fib_prefsrc == cfg->fc_prefsrc) && (!cfg->fc_protocol || fi->fib_protocol == cfg->fc_protocol) && fib_nh_match(net, cfg, fi, extack) == 0 && fib_metrics_match(cfg, fi)) { fa_to_delete = fa; break; } } if (!fa_to_delete) return -ESRCH; fib_notify_alias_delete(net, key, &l->leaf, fa_to_delete, extack); rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id, &cfg->fc_nlinfo, 0); if (!plen) tb->tb_num_default--; fib_remove_alias(t, tp, l, fa_to_delete); if (fa_to_delete->fa_state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net); fib_release_info(fa_to_delete->fa_info); alias_free_mem_rcu(fa_to_delete); return 0; } /* Scan for the next leaf starting at the provided key value */ static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key) { struct key_vector *pn, *n = *tn; unsigned long cindex; /* this loop is meant to try and find the key in the trie */ do { /* record parent and next child index */ pn = n; cindex = (key > pn->key) ? get_index(key, pn) : 0; if (cindex >> pn->bits) break; /* descend into the next child */ n = get_child_rcu(pn, cindex++); if (!n) break; /* guarantee forward progress on the keys */ if (IS_LEAF(n) && (n->key >= key)) goto found; } while (IS_TNODE(n)); /* this loop will search for the next leaf with a greater key */ while (!IS_TRIE(pn)) { /* if we exhausted the parent node we will need to climb */ if (cindex >= (1ul << pn->bits)) { t_key pkey = pn->key; pn = node_parent_rcu(pn); cindex = get_index(pkey, pn) + 1; continue; } /* grab the next available node */ n = get_child_rcu(pn, cindex++); if (!n) continue; /* no need to compare keys since we bumped the index */ if (IS_LEAF(n)) goto found; /* Rescan start scanning in new node */ pn = n; cindex = 0; } *tn = pn; return NULL; /* Root of trie */ found: /* if we are at the limit for keys just return NULL for the tnode */ *tn = pn; return n; } static void fib_trie_free(struct fib_table *tb) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *pn = t->kv; unsigned long cindex = 1; struct hlist_node *tmp; struct fib_alias *fa; /* walk trie in reverse order and free everything */ for (;;) { struct key_vector *n; if (!(cindex--)) { t_key pkey = pn->key; if (IS_TRIE(pn)) break; n = pn; pn = node_parent(pn); /* drop emptied tnode */ put_child_root(pn, n->key, NULL); node_free(n); cindex = get_index(pkey, pn); continue; } /* grab the next available node */ n = get_child(pn, cindex); if (!n) continue; if (IS_TNODE(n)) { /* record pn and cindex for leaf walking */ pn = n; cindex = 1ul << n->bits; continue; } hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { hlist_del_rcu(&fa->fa_list); alias_free_mem_rcu(fa); } put_child_root(pn, n->key, NULL); node_free(n); } #ifdef CONFIG_IP_FIB_TRIE_STATS free_percpu(t->stats); #endif kfree(tb); } struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) { struct trie *ot = (struct trie *)oldtb->tb_data; struct key_vector *l, *tp = ot->kv; struct fib_table *local_tb; struct fib_alias *fa; struct trie *lt; t_key key = 0; if (oldtb->tb_data == oldtb->__data) return oldtb; local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL); if (!local_tb) return NULL; lt = (struct trie *)local_tb->tb_data; while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) continue; /* clone fa for new local table */ new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (!new_fa) goto out; memcpy(new_fa, fa, sizeof(*fa)); /* insert clone into table */ if (!local_l) local_l = fib_find_node(lt, &local_tp, l->key);