Total coverage: 454860 (26%)of 1806754
95 675 2870 1 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 /* SPDX-License-Identifier: GPL-2.0 */ /* * sysfs.h - definitions for the device driver filesystem * * Copyright (c) 2001,2002 Patrick Mochel * Copyright (c) 2004 Silicon Graphics, Inc. * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * Please see Documentation/filesystems/sysfs.rst for more information. */ #ifndef _SYSFS_H_ #define _SYSFS_H_ #include <linux/kernfs.h> #include <linux/compiler.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/kobject_ns.h> #include <linux/stat.h> #include <linux/atomic.h> struct kobject; struct module; struct bin_attribute; enum kobj_ns_type; struct attribute { const char *name; umode_t mode; #ifdef CONFIG_DEBUG_LOCK_ALLOC bool ignore_lockdep:1; struct lock_class_key *key; struct lock_class_key skey; #endif }; /** * sysfs_attr_init - initialize a dynamically allocated sysfs attribute * @attr: struct attribute to initialize * * Initialize a dynamically allocated struct attribute so we can * make lockdep happy. This is a new requirement for attributes * and initially this is only needed when lockdep is enabled. * Lockdep gives a nice error when your attribute is added to * sysfs if you don't have this. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC #define sysfs_attr_init(attr) \ do { \ static struct lock_class_key __key; \ \ (attr)->key = &__key; \ } while (0) #else #define sysfs_attr_init(attr) do {} while (0) #endif /** * struct attribute_group - data structure used to declare an attribute group. * @name: Optional: Attribute group name * If specified, the attribute group will be created in a * new subdirectory with this name. Additionally when a * group is named, @is_visible and @is_bin_visible may * return SYSFS_GROUP_INVISIBLE to control visibility of * the directory itself. * @is_visible: Optional: Function to return permissions associated with an * attribute of the group. Will be called repeatedly for * each non-binary attribute in the group. Only read/write * permissions as well as SYSFS_PREALLOC are accepted. Must * return 0 if an attribute is not visible. The returned * value will replace static permissions defined in struct * attribute. Use SYSFS_GROUP_VISIBLE() when assigning this * callback to specify separate _group_visible() and * _attr_visible() handlers. * @is_bin_visible: * Optional: Function to return permissions associated with a * binary attribute of the group. Will be called repeatedly * for each binary attribute in the group. Only read/write * permissions as well as SYSFS_PREALLOC (and the * visibility flags for named groups) are accepted. Must * return 0 if a binary attribute is not visible. The * returned value will replace static permissions defined * in struct bin_attribute. If @is_visible is not set, Use * SYSFS_GROUP_VISIBLE() when assigning this callback to * specify separate _group_visible() and _attr_visible() * handlers. * @bin_size: * Optional: Function to return the size of a binary attribute * of the group. Will be called repeatedly for each binary * attribute in the group. Overwrites the size field embedded * inside the attribute itself. * @attrs: Pointer to NULL terminated list of attributes. * @bin_attrs: Pointer to NULL terminated list of binary attributes. * Either attrs or bin_attrs or both must be provided. */ struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); struct attribute **attrs; union { struct bin_attribute **bin_attrs; const struct bin_attribute *const *bin_attrs_new; }; }; #define SYSFS_PREALLOC 010000 #define SYSFS_GROUP_INVISIBLE 020000 /* * DEFINE_SYSFS_GROUP_VISIBLE(name): * A helper macro to pair with the assignment of ".is_visible = * SYSFS_GROUP_VISIBLE(name)", that arranges for the directory * associated with a named attribute_group to optionally be hidden. * This allows for static declaration of attribute_groups, and the * simplification of attribute visibility lifetime that implies, * without polluting sysfs with empty attribute directories. * Ex. * * static umode_t example_attr_visible(struct kobject *kobj, * struct attribute *attr, int n) * { * if (example_attr_condition) * return 0; * else if (ro_attr_condition) * return 0444; * return a->mode; * } * * static bool example_group_visible(struct kobject *kobj) * { * if (example_group_condition) * return false; * return true; * } * * DEFINE_SYSFS_GROUP_VISIBLE(example); * * static struct attribute_group example_group = { * .name = "example", * .is_visible = SYSFS_GROUP_VISIBLE(example), * .attrs = &example_attrs, * }; * * Note that it expects <name>_attr_visible and <name>_group_visible to * be defined. For cases where individual attributes do not need * separate visibility consideration, only entire group visibility at * once, see DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(). */ #define DEFINE_SYSFS_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, struct attribute *attr, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return name##_attr_visible(kobj, attr, n); \ } /* * DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(name): * A helper macro to pair with SYSFS_GROUP_VISIBLE() that like * DEFINE_SYSFS_GROUP_VISIBLE() controls group visibility, but does * not require the implementation of a per-attribute visibility * callback. * Ex. * * static bool example_group_visible(struct kobject *kobj) * { * if (example_group_condition) * return false; * return true; * } * * DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(example); * * static struct attribute_group example_group = { * .name = "example", * .is_visible = SYSFS_GROUP_VISIBLE(example), * .attrs = &example_attrs, * }; */ #define DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, struct attribute *a, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return a->mode; \ } /* * Same as DEFINE_SYSFS_GROUP_VISIBLE, but for groups with only binary * attributes. If an attribute_group defines both text and binary * attributes, the group visibility is determined by the function * specified to is_visible() not is_bin_visible() */ #define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, const struct bin_attribute *attr, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return name##_attr_visible(kobj, attr, n); \ } #define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, const struct bin_attribute *a, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return a->mode; \ } #define SYSFS_GROUP_VISIBLE(fn) sysfs_group_visible_##fn /* * Use these macros to make defining attributes easier. * See include/linux/device.h for examples.. */ #define __ATTR(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _show, \ .store = _store, \ } #define __ATTR_PREALLOC(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), \ .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\ .show = _show, \ .store = _store, \ } #define __ATTR_RO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = _name##_show, \ } #define __ATTR_RO_MODE(_name, _mode) { \ .attr = { .name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _name##_show, \ } #define __ATTR_RW_MODE(_name, _mode) { \ .attr = { .name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _name##_show, \ .store = _name##_store, \ } #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ } #define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) #define __ATTR_NULL { .attr = { .name = NULL } } #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), .mode = _mode, \ .ignore_lockdep = true }, \ .show = _show, \ .store = _store, \ } #else #define __ATTR_IGNORE_LOCKDEP __ATTR #endif #define __ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group *_name##_groups[] = { \ &_name##_group, \ NULL, \ } #define ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ .attrs = _name##_attrs, \ }; \ __ATTRIBUTE_GROUPS(_name) #define BIN_ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ .bin_attrs_new = _name##_attrs, \ }; \ __ATTRIBUTE_GROUPS(_name) struct file; struct vm_area_struct; struct address_space; struct bin_attribute { struct attribute attr; size_t size; void *private; struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr, struct vm_area_struct *vma); }; /** * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute * @attr: struct bin_attribute to initialize * * Initialize a dynamically allocated struct bin_attribute so we * can make lockdep happy. This is a new requirement for * attributes and initially this is only needed when lockdep is * enabled. Lockdep gives a nice error when your attribute is * added to sysfs if you don't have this. */ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) typedef ssize_t __sysfs_bin_rw_handler_new(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); /* macros to create static binary attributes easier */ #define __BIN_ATTR(_name, _mode, _read, _write, _size) { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ .read = _Generic(_read, \ __sysfs_bin_rw_handler_new * : NULL, \ default : _read \ ), \ .read_new = _Generic(_read, \ __sysfs_bin_rw_handler_new * : _read, \ default : NULL \ ), \ .write = _Generic(_write, \ __sysfs_bin_rw_handler_new * : NULL, \ default : _write \ ), \ .write_new = _Generic(_write, \ __sysfs_bin_rw_handler_new * : _write, \ default : NULL \ ), \ .size = _size, \ } #define __BIN_ATTR_RO(_name, _size) \ __BIN_ATTR(_name, 0444, _name##_read, NULL, _size) #define __BIN_ATTR_WO(_name, _size) \ __BIN_ATTR(_name, 0200, NULL, _name##_write, _size) #define __BIN_ATTR_RW(_name, _size) \ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) #define __BIN_ATTR_NULL __ATTR_NULL #define BIN_ATTR(_name, _mode, _read, _write, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ _write, _size) #define BIN_ATTR_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) #define BIN_ATTR_WO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) #define BIN_ATTR_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) #define __BIN_ATTR_ADMIN_RO(_name, _size) \ __BIN_ATTR(_name, 0400, _name##_read, NULL, _size) #define __BIN_ATTR_ADMIN_RW(_name, _size) \ __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size) #define BIN_ATTR_ADMIN_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size) #define BIN_ATTR_ADMIN_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size) #define __BIN_ATTR_SIMPLE_RO(_name, _mode) \ __BIN_ATTR(_name, _mode, sysfs_bin_attr_simple_read, NULL, 0) #define BIN_ATTR_SIMPLE_RO(_name) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0444) #define BIN_ATTR_SIMPLE_ADMIN_RO(_name) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0400) struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); }; #ifdef CONFIG_SYSFS int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns); void sysfs_remove_dir(struct kobject *kobj); int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, const void *new_ns); int __must_check sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, const void *new_ns); int __must_check sysfs_create_mount_point(struct kobject *parent_kobj, const char *name); void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name); int __must_check sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); int __must_check sysfs_create_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, const struct attribute *attr); void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr); void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr); int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name); int __must_check sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name); void sysfs_remove_link(struct kobject *kobj, const char *name); int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target, const char *old_name, const char *new_name, const void *new_ns); void sysfs_delete_link(struct kobject *dir, struct kobject *targ, const char *name); int __must_check sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); int __must_check sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups); int __must_check sysfs_update_groups(struct kobject *kobj, const struct attribute_group **groups); int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_groups(struct kobject *kobj, const struct attribute_group **groups); int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group); void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group); int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp); int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name, const char *symlink_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); int __must_check sysfs_init(void); static inline void sysfs_enable_ns(struct kernfs_node *kn) { return kernfs_enable_ns(kn); } int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, kgid_t kgid); int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid); int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, const char *name, kuid_t kuid, kgid_t kgid); int sysfs_groups_change_owner(struct kobject *kobj, const struct attribute_group **groups, kuid_t kuid, kgid_t kgid); int sysfs_group_change_owner(struct kobject *kobj, const struct attribute_group *groups, kuid_t kuid, kgid_t kgid); __printf(2, 3) int sysfs_emit(char *buf, const char *fmt, ...); __printf(3, 4) int sysfs_emit_at(char *buf, int at, const char *fmt, ...); ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count); #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) { return 0; } static inline void sysfs_remove_dir(struct kobject *kobj) { } static inline int sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, const void *new_ns) { return 0; } static inline int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, const void *new_ns) { return 0; } static inline int sysfs_create_mount_point(struct kobject *parent_kobj, const char *name) { return 0; } static inline void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name) { } static inline int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { return 0; } static inline int sysfs_create_files(struct kobject *kobj, const struct attribute * const *attr) { return 0; } static inline int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode) { return 0; } static inline struct kernfs_node * sysfs_break_active_protection(struct kobject *kobj, const struct attribute *attr) { return NULL; } static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) { } static inline void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { } static inline bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr) { return false; } static inline void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr) { } static inline int sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { return 0; } static inline void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { } static inline int sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline void sysfs_remove_link(struct kobject *kobj, const char *name) { } static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t, const char *old_name, const char *new_name, const void *ns) { return 0; } static inline void sysfs_delete_link(struct kobject *k, struct kobject *t, const char *name) { } static inline int sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline int sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups) { return 0; } static inline int sysfs_update_groups(struct kobject *kobj, const struct attribute_group **groups) { return 0; } static inline int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp) { } static inline void sysfs_remove_groups(struct kobject *kobj, const struct attribute_group **groups) { } static inline int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) { return 0; } static inline void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group) { } static inline int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline void sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp) { } static inline int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name) { return 0; } static inline void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name) { } static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name, const char *symlink_name) { return 0; } static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { } static inline int __must_check sysfs_init(void) { return 0; } static inline void sysfs_enable_ns(struct kernfs_node *kn) { } static inline int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, const char *name, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_groups_change_owner(struct kobject *kobj, const struct attribute_group **groups, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_group_change_owner(struct kobject *kobj, const struct attribute_group *groups, kuid_t kuid, kgid_t kgid) { return 0; } __printf(2, 3) static inline int sysfs_emit(char *buf, const char *fmt, ...) { return 0; } __printf(3, 4) static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) { return 0; } static inline ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return 0; } #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, const struct attribute *attr) { return sysfs_create_file_ns(kobj, attr, NULL); } static inline void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr) { sysfs_remove_file_ns(kobj, attr, NULL); } static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target, const char *old_name, const char *new_name) { return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL); } static inline void sysfs_notify_dirent(struct kernfs_node *kn) { kernfs_notify(kn); } static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent, const char *name) { return kernfs_find_and_get(parent, name); } static inline struct kernfs_node *sysfs_get(struct kernfs_node *kn) { kernfs_get(kn); return kn; } static inline void sysfs_put(struct kernfs_node *kn) { kernfs_put(kn); } #endif /* _SYSFS_H_ */
53 53 67 104 104 104 104 67 67 67 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006, Thomas Gleixner * * This file contains the IRQ-resend code * * If the interrupt is waiting to be processed, we try to re-run it. * We can't directly run it from here since the caller might be in an * interrupt-protected region. Not all irq controller chips can * retrigger interrupts at the hardware level, so in those cases * we allow the resending of IRQs via a tasklet. */ #include <linux/irq.h> #include <linux/module.h> #include <linux/random.h> #include <linux/interrupt.h> #include "internals.h" #ifdef CONFIG_HARDIRQS_SW_RESEND /* hlist_head to handle software resend of interrupts: */ static HLIST_HEAD(irq_resend_list); static DEFINE_RAW_SPINLOCK(irq_resend_lock); /* * Run software resends of IRQ's */ static void resend_irqs(struct tasklet_struct *unused) { struct irq_desc *desc; raw_spin_lock_irq(&irq_resend_lock); while (!hlist_empty(&irq_resend_list)) { desc = hlist_entry(irq_resend_list.first, struct irq_desc, resend_node); hlist_del_init(&desc->resend_node); raw_spin_unlock(&irq_resend_lock); desc->handle_irq(desc); raw_spin_lock(&irq_resend_lock); } raw_spin_unlock_irq(&irq_resend_lock); } /* Tasklet to handle resend: */ static DECLARE_TASKLET(resend_tasklet, resend_irqs); static int irq_sw_resend(struct irq_desc *desc) { /* * Validate whether this interrupt can be safely injected from * non interrupt context */ if (irqd_is_handle_enforce_irqctx(&desc->irq_data)) return -EINVAL; /* * If the interrupt is running in the thread context of the parent * irq we need to be careful, because we cannot trigger it * directly. */ if (irq_settings_is_nested_thread(desc)) { /* * If the parent_irq is valid, we retrigger the parent, * otherwise we do nothing. */ if (!desc->parent_irq) return -EINVAL; desc = irq_to_desc(desc->parent_irq); if (!desc) return -EINVAL; } /* Add to resend_list and activate the softirq: */ raw_spin_lock(&irq_resend_lock); if (hlist_unhashed(&desc->resend_node)) hlist_add_head(&desc->resend_node, &irq_resend_list); raw_spin_unlock(&irq_resend_lock); tasklet_schedule(&resend_tasklet); return 0; } void clear_irq_resend(struct irq_desc *desc) { raw_spin_lock(&irq_resend_lock); hlist_del_init(&desc->resend_node); raw_spin_unlock(&irq_resend_lock); } void irq_resend_init(struct irq_desc *desc) { INIT_HLIST_NODE(&desc->resend_node); } #else void clear_irq_resend(struct irq_desc *desc) {} void irq_resend_init(struct irq_desc *desc) {} static int irq_sw_resend(struct irq_desc *desc) { return -EINVAL; } #endif static int try_retrigger(struct irq_desc *desc) { if (desc->irq_data.chip->irq_retrigger) return desc->irq_data.chip->irq_retrigger(&desc->irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY return irq_chip_retrigger_hierarchy(&desc->irq_data); #else return 0; #endif } /* * IRQ resend * * Is called with interrupts disabled and desc->lock held. */ int check_irq_resend(struct irq_desc *desc, bool inject) { int err = 0; /* * We do not resend level type interrupts. Level type interrupts * are resent by hardware when they are still active. Clear the * pending bit so suspend/resume does not get confused. */ if (irq_settings_is_level(desc)) { desc->istate &= ~IRQS_PENDING; return -EINVAL; } if (desc->istate & IRQS_REPLAY) return -EBUSY; if (!(desc->istate & IRQS_PENDING) && !inject) return 0; desc->istate &= ~IRQS_PENDING; if (!try_retrigger(desc)) err = irq_sw_resend(desc); /* If the retrigger was successful, mark it with the REPLAY bit */ if (!err) desc->istate |= IRQS_REPLAY; return err; } #ifdef CONFIG_GENERIC_IRQ_INJECTION /** * irq_inject_interrupt - Inject an interrupt for testing/error injection * @irq: The interrupt number * * This function must only be used for debug and testing purposes! * * Especially on x86 this can cause a premature completion of an interrupt * affinity change causing the interrupt line to become stale. Very * unlikely, but possible. * * The injection can fail for various reasons: * - Interrupt is not activated * - Interrupt is NMI type or currently replaying * - Interrupt is level type * - Interrupt does not support hardware retrigger and software resend is * either not enabled or not possible for the interrupt. */ int irq_inject_interrupt(unsigned int irq) { struct irq_desc *desc; unsigned long flags; int err; /* Try the state injection hardware interface first */ if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true)) return 0; /* That failed, try via the resend mechanism */ desc = irq_get_desc_buslock(irq, &flags, 0); if (!desc) return -EINVAL; /* * Only try to inject when the interrupt is: * - not NMI type * - activated */ if (irq_is_nmi(desc) || !irqd_is_activated(&desc->irq_data)) err = -EINVAL; else err = check_irq_resend(desc, true); irq_put_desc_busunlock(desc, flags); return err; } EXPORT_SYMBOL_GPL(irq_inject_interrupt); #endif
23170 17231 20117 20127 17221 20684 20677 20750 20690 20666 1306 1305 16589 16580 16645 13 13 13 16595 16581 979 977 16580 20112 20134 2 2 2 3 3 3 2 2 12 8 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 // SPDX-License-Identifier: GPL-2.0 /* * Lockless hierarchical page accounting & limiting * * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner */ #include <linux/page_counter.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/bug.h> #include <asm/page.h> static bool track_protection(struct page_counter *c) { return c->protection_support; } static void propagate_protected_usage(struct page_counter *c, unsigned long usage) { unsigned long protected, old_protected; long delta; if (!c->parent) return; protected = min(usage, READ_ONCE(c->min)); old_protected = atomic_long_read(&c->min_usage); if (protected != old_protected) { old_protected = atomic_long_xchg(&c->min_usage, protected); delta = protected - old_protected; if (delta) atomic_long_add(delta, &c->parent->children_min_usage); } protected = min(usage, READ_ONCE(c->low)); old_protected = atomic_long_read(&c->low_usage); if (protected != old_protected) { old_protected = atomic_long_xchg(&c->low_usage, protected); delta = protected - old_protected; if (delta) atomic_long_add(delta, &c->parent->children_low_usage); } } /** * page_counter_cancel - take pages out of the local counter * @counter: counter * @nr_pages: number of pages to cancel */ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) { long new; new = atomic_long_sub_return(nr_pages, &counter->usage); /* More uncharges than charges? */ if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n", new, nr_pages)) { new = 0; atomic_long_set(&counter->usage, new); } if (track_protection(counter)) propagate_protected_usage(counter, new); } /** * page_counter_charge - hierarchically charge pages * @counter: counter * @nr_pages: number of pages to charge * * NOTE: This does not consider any configured counter limits. */ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; bool protection = track_protection(counter); for (c = counter; c; c = c->parent) { long new; new = atomic_long_add_return(nr_pages, &c->usage); if (protection) propagate_protected_usage(c, new); /* * This is indeed racy, but we can live with some * inaccuracy in the watermark. * * Notably, we have two watermarks to allow for both a globally * visible peak and one that can be reset at a smaller scope. * * Since we reset both watermarks when the global reset occurs, * we can guarantee that watermark >= local_watermark, so we * don't need to do both comparisons every time. * * On systems with branch predictors, the inner condition should * be almost free. */ if (new > READ_ONCE(c->local_watermark)) { WRITE_ONCE(c->local_watermark, new); if (new > READ_ONCE(c->watermark)) WRITE_ONCE(c->watermark, new); } } } /** * page_counter_try_charge - try to hierarchically charge pages * @counter: counter * @nr_pages: number of pages to charge * @fail: points first counter to hit its limit, if any * * Returns %true on success, or %false and @fail if the counter or one * of its ancestors has hit its configured limit. */ bool page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail) { struct page_counter *c; bool protection = track_protection(counter); bool track_failcnt = counter->track_failcnt; for (c = counter; c; c = c->parent) { long new; /* * Charge speculatively to avoid an expensive CAS. If * a bigger charge fails, it might falsely lock out a * racing smaller charge and send it into reclaim * early, but the error is limited to the difference * between the two sizes, which is less than 2M/4M in * case of a THP locking out a regular page charge. * * The atomic_long_add_return() implies a full memory * barrier between incrementing the count and reading * the limit. When racing with page_counter_set_max(), * we either see the new limit or the setter sees the * counter has changed and retries. */ new = atomic_long_add_return(nr_pages, &c->usage); if (new > c->max) { atomic_long_sub(nr_pages, &c->usage); /* * This is racy, but we can live with some * inaccuracy in the failcnt which is only used * to report stats. */ if (track_failcnt) data_race(c->failcnt++); *fail = c; goto failed; } if (protection) propagate_protected_usage(c, new); /* see comment on page_counter_charge */ if (new > READ_ONCE(c->local_watermark)) { WRITE_ONCE(c->local_watermark, new); if (new > READ_ONCE(c->watermark)) WRITE_ONCE(c->watermark, new); } } return true; failed: for (c = counter; c != *fail; c = c->parent) page_counter_cancel(c, nr_pages); return false; } /** * page_counter_uncharge - hierarchically uncharge pages * @counter: counter * @nr_pages: number of pages to uncharge */ void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; for (c = counter; c; c = c->parent) page_counter_cancel(c, nr_pages); } /** * page_counter_set_max - set the maximum number of pages allowed * @counter: counter * @nr_pages: limit to set * * Returns 0 on success, -EBUSY if the current number of pages on the * counter already exceeds the specified limit. * * The caller must serialize invocations on the same counter. */ int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages) { for (;;) { unsigned long old; long usage; /* * Update the limit while making sure that it's not * below the concurrently-changing counter value. * * The xchg implies two full memory barriers before * and after, so the read-swap-read is ordered and * ensures coherency with page_counter_try_charge(): * that function modifies the count before checking * the limit, so if it sees the old limit, we see the * modified counter and retry. */ usage = page_counter_read(counter); if (usage > nr_pages) return -EBUSY; old = xchg(&counter->max, nr_pages); if (page_counter_read(counter) <= usage || nr_pages >= old) return 0; counter->max = old; cond_resched(); } } /** * page_counter_set_min - set the amount of protected memory * @counter: counter * @nr_pages: value to set * * The caller must serialize invocations on the same counter. */ void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; WRITE_ONCE(counter->min, nr_pages); for (c = counter; c; c = c->parent) propagate_protected_usage(c, atomic_long_read(&c->usage)); } /** * page_counter_set_low - set the amount of protected memory * @counter: counter * @nr_pages: value to set * * The caller must serialize invocations on the same counter. */ void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; WRITE_ONCE(counter->low, nr_pages); for (c = counter; c; c = c->parent) propagate_protected_usage(c, atomic_long_read(&c->usage)); } /** * page_counter_memparse - memparse() for page counter limits * @buf: string to parse * @max: string meaning maximum possible value * @nr_pages: returns the result in number of pages * * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be * limited to %PAGE_COUNTER_MAX. */ int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages) { char *end; u64 bytes; if (!strcmp(buf, max)) { *nr_pages = PAGE_COUNTER_MAX; return 0; } bytes = memparse(buf, &end); if (*end != '\0') return -EINVAL; *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); return 0; } #if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM) /* * This function calculates an individual page counter's effective * protection which is derived from its own memory.min/low, its * parent's and siblings' settings, as well as the actual memory * distribution in the tree. * * The following rules apply to the effective protection values: * * 1. At the first level of reclaim, effective protection is equal to * the declared protection in memory.min and memory.low. * * 2. To enable safe delegation of the protection configuration, at * subsequent levels the effective protection is capped to the * parent's effective protection. * * 3. To make complex and dynamic subtrees easier to configure, the * user is allowed to overcommit the declared protection at a given * level. If that is the case, the parent's effective protection is * distributed to the children in proportion to how much protection * they have declared and how much of it they are utilizing. * * This makes distribution proportional, but also work-conserving: * if one counter claims much more protection than it uses memory, * the unused remainder is available to its siblings. * * 4. Conversely, when the declared protection is undercommitted at a * given level, the distribution of the larger parental protection * budget is NOT proportional. A counter's protection from a sibling * is capped to its own memory.min/low setting. * * 5. However, to allow protecting recursive subtrees from each other * without having to declare each individual counter's fixed share * of the ancestor's claim to protection, any unutilized - * "floating" - protection from up the tree is distributed in * proportion to each counter's *usage*. This makes the protection * neutral wrt sibling cgroups and lets them compete freely over * the shared parental protection budget, but it protects the * subtree as a whole from neighboring subtrees. * * Note that 4. and 5. are not in conflict: 4. is about protecting * against immediate siblings whereas 5. is about protecting against * neighboring subtrees. */ static unsigned long effective_protection(unsigned long usage, unsigned long parent_usage, unsigned long setting, unsigned long parent_effective, unsigned long siblings_protected, bool recursive_protection) { unsigned long protected; unsigned long ep; protected = min(usage, setting); /* * If all cgroups at this level combined claim and use more * protection than what the parent affords them, distribute * shares in proportion to utilization. * * We are using actual utilization rather than the statically * claimed protection in order to be work-conserving: claimed * but unused protection is available to siblings that would * otherwise get a smaller chunk than what they claimed. */ if (siblings_protected > parent_effective) return protected * parent_effective / siblings_protected; /* * Ok, utilized protection of all children is within what the * parent affords them, so we know whatever this child claims * and utilizes is effectively protected. * * If there is unprotected usage beyond this value, reclaim * will apply pressure in proportion to that amount. * * If there is unutilized protection, the cgroup will be fully * shielded from reclaim, but we do return a smaller value for * protection than what the group could enjoy in theory. This * is okay. With the overcommit distribution above, effective * protection is always dependent on how memory is actually * consumed among the siblings anyway. */ ep = protected; /* * If the children aren't claiming (all of) the protection * afforded to them by the parent, distribute the remainder in * proportion to the (unprotected) memory of each cgroup. That * way, cgroups that aren't explicitly prioritized wrt each * other compete freely over the allowance, but they are * collectively protected from neighboring trees. * * We're using unprotected memory for the weight so that if * some cgroups DO claim explicit protection, we don't protect * the same bytes twice. * * Check both usage and parent_usage against the respective * protected values. One should imply the other, but they * aren't read atomically - make sure the division is sane. */ if (!recursive_protection) return ep; if (parent_effective > siblings_protected && parent_usage > siblings_protected && usage > protected) { unsigned long unclaimed; unclaimed = parent_effective - siblings_protected; unclaimed *= usage - protected; unclaimed /= parent_usage - siblings_protected; ep += unclaimed; } return ep; } /** * page_counter_calculate_protection - check if memory consumption is in the normal range * @root: the top ancestor of the sub-tree being checked * @counter: the page_counter the counter to update * @recursive_protection: Whether to use memory_recursiveprot behavior. * * Calculates elow/emin thresholds for given page_counter. * * WARNING: This function is not stateless! It can only be used as part * of a top-down tree iteration, not for isolated queries. */ void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection) { unsigned long usage, parent_usage; struct page_counter *parent = counter->parent; /* * Effective values of the reclaim targets are ignored so they * can be stale. Have a look at mem_cgroup_protection for more * details. * TODO: calculation should be more robust so that we do not need * that special casing. */ if (root == counter) return; usage = page_counter_read(counter); if (!usage) return; if (parent == root) { counter->emin = READ_ONCE(counter->min); counter->elow = READ_ONCE(counter->low); return; } parent_usage = page_counter_read(parent); WRITE_ONCE(counter->emin, effective_protection(usage, parent_usage, READ_ONCE(counter->min), READ_ONCE(parent->emin), atomic_long_read(&parent->children_min_usage), recursive_protection)); WRITE_ONCE(counter->elow, effective_protection(usage, parent_usage, READ_ONCE(counter->low), READ_ONCE(parent->elow), atomic_long_read(&parent->children_low_usage), recursive_protection)); } #endif /* CONFIG_MEMCG || CONFIG_CGROUP_DMEM */
1043 1043 1046 1042 1043 1038 1041 95 246 246 95 198 198 6 86 86 86 86 86 86 86 86 86 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Pseudo-driver for the loopback interface. * * Version: @(#)loopback.c 1.0.4b 08/16/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Donald Becker, <becker@scyld.com> * * Alan Cox : Fixed oddments for NET3.014 * Alan Cox : Rejig for NET3.029 snap #3 * Alan Cox : Fixed NET3.029 bugs and sped up * Larry McVoy : Tiny tweak to double performance * Alan Cox : Backed out LMV's tweak - the linux mm * can't take it... * Michael Griffith: Don't bother computing the checksums * on packets received on the loopback * interface. * Alexey Kuznetsov: Potential hang under some extreme * cases removed. */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/in.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <net/sch_generic.h> #include <net/sock.h> #include <net/checksum.h> #include <linux/if_ether.h> /* For the statistics structure. */ #include <linux/if_arp.h> /* For ARPHRD_ETHER */ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/percpu.h> #include <linux/net_tstamp.h> #include <net/net_namespace.h> #include <net/netdev_lock.h> #include <linux/u64_stats_sync.h> /* blackhole_netdev - a device used for dsts that are marked expired! * This is global device (instead of per-net-ns) since it's not needed * to be per-ns and gets initialized at boot time. */ struct net_device *blackhole_netdev; EXPORT_SYMBOL(blackhole_netdev); /* The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ static netdev_tx_t loopback_xmit(struct sk_buff *skb, struct net_device *dev) { int len; skb_tx_timestamp(skb); /* do not fool net_timestamp_check() with various clock bases */ skb_clear_tstamp(skb); skb_orphan(skb); /* Before queueing this packet to __netif_rx(), * make sure dst is refcounted. */ skb_dst_force(skb); skb->protocol = eth_type_trans(skb, dev); len = skb->len; if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) dev_lstats_add(dev, len); return NETDEV_TX_OK; } void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes) { int i; *packets = 0; *bytes = 0; for_each_possible_cpu(i) { const struct pcpu_lstats *lb_stats; u64 tbytes, tpackets; unsigned int start; lb_stats = per_cpu_ptr(dev->lstats, i); do { start = u64_stats_fetch_begin(&lb_stats->syncp); tpackets = u64_stats_read(&lb_stats->packets); tbytes = u64_stats_read(&lb_stats->bytes); } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); *bytes += tbytes; *packets += tpackets; } } EXPORT_SYMBOL(dev_lstats_read); static void loopback_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u64 packets, bytes; dev_lstats_read(dev, &packets, &bytes); stats->rx_packets = packets; stats->tx_packets = packets; stats->rx_bytes = bytes; stats->tx_bytes = bytes; } static u32 always_on(struct net_device *dev) { return 1; } static const struct ethtool_ops loopback_ethtool_ops = { .get_link = always_on, .get_ts_info = ethtool_op_get_ts_info, }; static int loopback_dev_init(struct net_device *dev) { netdev_lockdep_set_classes(dev); return 0; } static void loopback_dev_free(struct net_device *dev) { dev_net(dev)->loopback_dev = NULL; } static const struct net_device_ops loopback_ops = { .ndo_init = loopback_dev_init, .ndo_start_xmit = loopback_xmit, .ndo_get_stats64 = loopback_get_stats64, .ndo_set_mac_address = eth_mac_addr, }; static void gen_lo_setup(struct net_device *dev, unsigned int mtu, const struct ethtool_ops *eth_ops, const struct header_ops *hdr_ops, const struct net_device_ops *dev_ops, void (*dev_destructor)(struct net_device *dev)) { dev->mtu = mtu; dev->hard_header_len = ETH_HLEN; /* 14 */ dev->min_header_len = ETH_HLEN; /* 14 */ dev->addr_len = ETH_ALEN; /* 6 */ dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->lltx = true; dev->netns_immutable = true; netif_keep_dst(dev); dev->hw_features = NETIF_F_GSO_SOFTWARE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | NETIF_F_LOOPBACK; dev->ethtool_ops = eth_ops; dev->header_ops = hdr_ops; dev->netdev_ops = dev_ops; dev->needs_free_netdev = true; dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS; dev->priv_destructor = dev_destructor; netif_set_tso_max_size(dev, GSO_MAX_SIZE); } /* The loopback device is special. There is only one instance * per network namespace. */ static void loopback_setup(struct net_device *dev) { gen_lo_setup(dev, (64 * 1024), &loopback_ethtool_ops, &eth_header_ops, &loopback_ops, loopback_dev_free); } /* Setup and register the loopback device. */ static __net_init int loopback_net_init(struct net *net) { struct net_device *dev; int err; err = -ENOMEM; dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup); if (!dev) goto out; dev_net_set(dev, net); err = register_netdev(dev); if (err) goto out_free_netdev; BUG_ON(dev->ifindex != LOOPBACK_IFINDEX); net->loopback_dev = dev; return 0; out_free_netdev: free_netdev(dev); out: if (net_eq(net, &init_net)) panic("loopback: Failed to register netdevice: %d\n", err); return err; } /* Registered in net/core/dev.c */ struct pernet_operations __net_initdata loopback_net_ops = { .init = loopback_net_init, }; /* blackhole netdevice */ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb, struct net_device *dev) { kfree_skb(skb); net_warn_ratelimited("%s(): Dropping skb.\n", __func__); return NETDEV_TX_OK; } static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb) { kfree_skb(skb); return 0; } static int blackhole_neigh_construct(struct net_device *dev, struct neighbour *n) { n->output = blackhole_neigh_output; return 0; } static const struct net_device_ops blackhole_netdev_ops = { .ndo_start_xmit = blackhole_netdev_xmit, .ndo_neigh_construct = blackhole_neigh_construct, }; /* This is a dst-dummy device used specifically for invalidated * DSTs and unlike loopback, this is not per-ns. */ static void blackhole_netdev_setup(struct net_device *dev) { gen_lo_setup(dev, ETH_MIN_MTU, NULL, NULL, &blackhole_netdev_ops, NULL); } /* Setup and register the blackhole_netdev. */ static int __init blackhole_netdev_init(void) { blackhole_netdev = alloc_netdev(0, "blackhole_dev", NET_NAME_UNKNOWN, blackhole_netdev_setup); if (!blackhole_netdev) return -ENOMEM; rtnl_net_lock(&init_net); dev_init_scheduler(blackhole_netdev); dev_activate(blackhole_netdev); rtnl_net_unlock(&init_net); blackhole_netdev->flags |= IFF_UP | IFF_RUNNING; return 0; } device_initcall(blackhole_netdev_init);
7 505 244 298 352 67 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef BTRFS_SPACE_INFO_H #define BTRFS_SPACE_INFO_H #include <trace/events/btrfs.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/kobject.h> #include <linux/lockdep.h> #include <linux/wait.h> #include <linux/rwsem.h> #include "volumes.h" struct btrfs_fs_info; struct btrfs_block_group; /* * Different levels for to flush space when doing space reservations. * * The higher the level, the more methods we try to reclaim space. */ enum btrfs_reserve_flush_enum { /* If we are in the transaction, we can't flush anything.*/ BTRFS_RESERVE_NO_FLUSH, /* * Flush space by: * - Running delayed inode items * - Allocating a new chunk */ BTRFS_RESERVE_FLUSH_LIMIT, /* * Flush space by: * - Running delayed inode items * - Running delayed refs * - Running delalloc and waiting for ordered extents * - Allocating a new chunk * - Committing transaction */ BTRFS_RESERVE_FLUSH_EVICT, /* * Flush space by above mentioned methods and by: * - Running delayed iputs * - Committing transaction * * Can be interrupted by a fatal signal. */ BTRFS_RESERVE_FLUSH_DATA, BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE, BTRFS_RESERVE_FLUSH_ALL, /* * Pretty much the same as FLUSH_ALL, but can also steal space from * global rsv. * * Can be interrupted by a fatal signal. */ BTRFS_RESERVE_FLUSH_ALL_STEAL, /* * This is for btrfs_use_block_rsv only. We have exhausted our block * rsv and our global block rsv. This can happen for things like * delalloc where we are overwriting a lot of extents with a single * extent and didn't reserve enough space. Alternatively it can happen * with delalloc where we reserve 1 extents worth for a large extent but * fragmentation leads to multiple extents being created. This will * give us the reservation in the case of * * if (num_bytes < (space_info->total_bytes - * btrfs_space_info_used(space_info, false)) * * Which ignores bytes_may_use. This is potentially dangerous, but our * reservation system is generally pessimistic so is able to absorb this * style of mistake. */ BTRFS_RESERVE_FLUSH_EMERGENCY, }; /* * Please be aware that the order of enum values will be the order of the reclaim * process in btrfs_async_reclaim_metadata_space(). */ enum btrfs_flush_state { FLUSH_DELAYED_ITEMS_NR = 1, FLUSH_DELAYED_ITEMS = 2, FLUSH_DELAYED_REFS_NR = 3, FLUSH_DELAYED_REFS = 4, FLUSH_DELALLOC = 5, FLUSH_DELALLOC_WAIT = 6, FLUSH_DELALLOC_FULL = 7, ALLOC_CHUNK = 8, ALLOC_CHUNK_FORCE = 9, RUN_DELAYED_IPUTS = 10, COMMIT_TRANS = 11, RESET_ZONES = 12, }; struct btrfs_space_info { struct btrfs_fs_info *fs_info; spinlock_t lock; u64 total_bytes; /* total bytes in the space, this doesn't take mirrors into account */ u64 bytes_used; /* total bytes used, this doesn't take mirrors into account */ u64 bytes_pinned; /* total bytes pinned, will be freed when the transaction finishes */ u64 bytes_reserved; /* total bytes the allocator has reserved for current allocations */ u64 bytes_may_use; /* number of bytes that may be used for delalloc/allocations */ u64 bytes_readonly; /* total bytes that are read only */ u64 bytes_zone_unusable; /* total bytes that are unusable until resetting the device zone */ u64 max_extent_size; /* This will hold the maximum extent size of the space info if we had an ENOSPC in the allocator. */ /* Chunk size in bytes */ u64 chunk_size; /* * Once a block group drops below this threshold (percents) we'll * schedule it for reclaim. */ int bg_reclaim_threshold; int clamp; /* Used to scale our threshold for preemptive flushing. The value is >> clamp, so turns out to be a 2^clamp divisor. */ unsigned int full:1; /* indicates that we cannot allocate any more chunks for this space */ unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ unsigned int flush:1; /* set if we are trying to make space */ unsigned int force_alloc; /* set if we need to force a chunk alloc for this space */ u64 disk_used; /* total bytes used on disk */ u64 disk_total; /* total bytes on disk, takes mirrors into account */ u64 flags; struct list_head list; /* Protected by the spinlock 'lock'. */ struct list_head ro_bgs; struct list_head priority_tickets; struct list_head tickets; /* * Size of space that needs to be reclaimed in order to satisfy pending * tickets */ u64 reclaim_size; /* * tickets_id just indicates the next ticket will be handled, so note * it's not stored per ticket. */ u64 tickets_id; struct rw_semaphore groups_sem; /* for block groups in our same type */ struct list_head block_groups[BTRFS_NR_RAID_TYPES]; struct kobject kobj; struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; /* * Monotonically increasing counter of block group reclaim attempts * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_count */ u64 reclaim_count; /* * Monotonically increasing counter of reclaimed bytes * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_bytes */ u64 reclaim_bytes; /* * Monotonically increasing counter of reclaim errors * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_errors */ u64 reclaim_errors; /* * If true, use the dynamic relocation threshold, instead of the * fixed bg_reclaim_threshold. */ bool dynamic_reclaim; /* * Periodically check all block groups against the reclaim * threshold in the cleaner thread. */ bool periodic_reclaim; /* * Periodic reclaim should be a no-op if a space_info hasn't * freed any space since the last time we tried. */ bool periodic_reclaim_ready; /* * Net bytes freed or allocated since the last reclaim pass. */ s64 reclaimable_bytes; }; struct reserve_ticket { u64 bytes; int error; bool steal; struct list_head list; wait_queue_head_t wait; }; static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info) { return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); } /* * * Declare a helper function to detect underflow of various space info members */ #define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \ static inline void \ btrfs_space_info_update_##name(struct btrfs_space_info *sinfo, \ s64 bytes) \ { \ struct btrfs_fs_info *fs_info = sinfo->fs_info; \ const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \ lockdep_assert_held(&sinfo->lock); \ trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \ trace_btrfs_space_reservation(fs_info, trace_name, \ sinfo->flags, abs_bytes, \ bytes > 0); \ if (bytes < 0 && sinfo->name < -bytes) { \ WARN_ON(1); \ sinfo->name = 0; \ return; \ } \ sinfo->name += bytes; \ } DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info"); DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned"); DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable"); int btrfs_init_space_info(struct btrfs_fs_info *fs_info); void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, struct btrfs_block_group *block_group); void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, u64 chunk_size); struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, u64 flags); u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, bool may_use_included); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *info, u64 bytes, int dump_block_groups); int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 orig_bytes, enum btrfs_reserve_flush_enum flush); void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, const struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush); static inline void btrfs_space_info_free_bytes_may_use( struct btrfs_space_info *space_info, u64 num_bytes) { spin_lock(&space_info->lock); btrfs_space_info_update_bytes_may_use(space_info, -num_bytes); btrfs_try_granting_tickets(space_info->fs_info, space_info); spin_unlock(&space_info->lock); } int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, enum btrfs_reserve_flush_enum flush); void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info); void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes); void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready); bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info); int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info); void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info); void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len); #endif /* BTRFS_SPACE_INFO_H */
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 // SPDX-License-Identifier: GPL-2.0+ /* * PlayStation 2 Trance Vibrator driver * * Copyright (C) 2006 Sam Hocevar <sam@zoy.org> */ /* Standard include files */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #define DRIVER_AUTHOR "Sam Hocevar, sam@zoy.org" #define DRIVER_DESC "PlayStation 2 Trance Vibrator driver" #define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */ #define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */ static const struct usb_device_id id_table[] = { { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) }, { }, }; MODULE_DEVICE_TABLE (usb, id_table); /* Driver-local specific stuff */ struct trancevibrator { struct usb_device *udev; unsigned int speed; }; static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); return sprintf(buf, "%d\n", tv->speed); } static ssize_t speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); int temp, retval, old; retval = kstrtoint(buf, 10, &temp); if (retval) return retval; if (temp > 255) temp = 255; else if (temp < 0) temp = 0; old = tv->speed; tv->speed = temp; dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed); /* Set speed */ retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0), 0x01, /* vendor request: set speed */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, tv->speed, /* speed value */ 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { tv->speed = old; dev_dbg(&tv->udev->dev, "retval = %d\n", retval); return retval; } return count; } static DEVICE_ATTR_RW(speed); static struct attribute *tv_attrs[] = { &dev_attr_speed.attr, NULL, }; ATTRIBUTE_GROUPS(tv); static int tv_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct trancevibrator *dev; int retval; dev = kzalloc(sizeof(struct trancevibrator), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->udev = usb_get_dev(udev); usb_set_intfdata(interface, dev); return 0; error: kfree(dev); return retval; } static void tv_disconnect(struct usb_interface *interface) { struct trancevibrator *dev; dev = usb_get_intfdata (interface); usb_set_intfdata(interface, NULL); usb_put_dev(dev->udev); kfree(dev); } /* USB subsystem object */ static struct usb_driver tv_driver = { .name = "trancevibrator", .probe = tv_probe, .disconnect = tv_disconnect, .id_table = id_table, .dev_groups = tv_groups, }; module_usb_driver(tv_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
10 10 19 10 10 10 10 10 10 10 10 10 10 10 1 19 3 3 3 23 15 13 1 1 1 1 1 1 1 23 11 11 11 11 9 9 11 11 60 1 1 15 1 1 2 1 20 1 8 1 4 6 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 2 8 1 1 1 1 3 3 2 3 2 3 1 2 2 2 1 2 1 2 1 1 60 1 1 1 5 1 1 3 5 19 1 1 1 1 2 1 9 2 9 9 2 9 4 9 1 1 2 1 1 1 19 6 6 1 6 1 1 1 10 1 1 1 1 1 1 4 1 10 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 // SPDX-License-Identifier: GPL-2.0-only /* * vivid-ctrls.c - control support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/videodev2.h> #include <media/v4l2-event.h> #include <media/v4l2-common.h> #include "vivid-core.h" #include "vivid-vid-cap.h" #include "vivid-vid-out.h" #include "vivid-vid-common.h" #include "vivid-radio-common.h" #include "vivid-osd.h" #include "vivid-ctrls.h" #define VIVID_CID_CUSTOM_BASE (V4L2_CID_USER_BASE | 0xf000) #define VIVID_CID_BUTTON (VIVID_CID_CUSTOM_BASE + 0) #define VIVID_CID_BOOLEAN (VIVID_CID_CUSTOM_BASE + 1) #define VIVID_CID_INTEGER (VIVID_CID_CUSTOM_BASE + 2) #define VIVID_CID_INTEGER64 (VIVID_CID_CUSTOM_BASE + 3) #define VIVID_CID_MENU (VIVID_CID_CUSTOM_BASE + 4) #define VIVID_CID_STRING (VIVID_CID_CUSTOM_BASE + 5) #define VIVID_CID_BITMASK (VIVID_CID_CUSTOM_BASE + 6) #define VIVID_CID_INTMENU (VIVID_CID_CUSTOM_BASE + 7) #define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8) #define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9) #define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10) #define VIVID_CID_AREA (VIVID_CID_CUSTOM_BASE + 11) #define VIVID_CID_RO_INTEGER (VIVID_CID_CUSTOM_BASE + 12) #define VIVID_CID_U32_DYN_ARRAY (VIVID_CID_CUSTOM_BASE + 13) #define VIVID_CID_U8_PIXEL_ARRAY (VIVID_CID_CUSTOM_BASE + 14) #define VIVID_CID_S32_ARRAY (VIVID_CID_CUSTOM_BASE + 15) #define VIVID_CID_S64_ARRAY (VIVID_CID_CUSTOM_BASE + 16) #define VIVID_CID_RECT (VIVID_CID_CUSTOM_BASE + 17) #define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000) #define VIVID_CID_VIVID_CLASS (0x00f00000 | 1) #define VIVID_CID_TEST_PATTERN (VIVID_CID_VIVID_BASE + 0) #define VIVID_CID_OSD_TEXT_MODE (VIVID_CID_VIVID_BASE + 1) #define VIVID_CID_HOR_MOVEMENT (VIVID_CID_VIVID_BASE + 2) #define VIVID_CID_VERT_MOVEMENT (VIVID_CID_VIVID_BASE + 3) #define VIVID_CID_SHOW_BORDER (VIVID_CID_VIVID_BASE + 4) #define VIVID_CID_SHOW_SQUARE (VIVID_CID_VIVID_BASE + 5) #define VIVID_CID_INSERT_SAV (VIVID_CID_VIVID_BASE + 6) #define VIVID_CID_INSERT_EAV (VIVID_CID_VIVID_BASE + 7) #define VIVID_CID_VBI_CAP_INTERLACED (VIVID_CID_VIVID_BASE + 8) #define VIVID_CID_INSERT_HDMI_VIDEO_GUARD_BAND (VIVID_CID_VIVID_BASE + 9) #define VIVID_CID_HFLIP (VIVID_CID_VIVID_BASE + 20) #define VIVID_CID_VFLIP (VIVID_CID_VIVID_BASE + 21) #define VIVID_CID_STD_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 22) #define VIVID_CID_DV_TIMINGS_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 23) #define VIVID_CID_TSTAMP_SRC (VIVID_CID_VIVID_BASE + 24) #define VIVID_CID_COLORSPACE (VIVID_CID_VIVID_BASE + 25) #define VIVID_CID_XFER_FUNC (VIVID_CID_VIVID_BASE + 26) #define VIVID_CID_YCBCR_ENC (VIVID_CID_VIVID_BASE + 27) #define VIVID_CID_QUANTIZATION (VIVID_CID_VIVID_BASE + 28) #define VIVID_CID_LIMITED_RGB_RANGE (VIVID_CID_VIVID_BASE + 29) #define VIVID_CID_ALPHA_MODE (VIVID_CID_VIVID_BASE + 30) #define VIVID_CID_HAS_CROP_CAP (VIVID_CID_VIVID_BASE + 31) #define VIVID_CID_HAS_COMPOSE_CAP (VIVID_CID_VIVID_BASE + 32) #define VIVID_CID_HAS_SCALER_CAP (VIVID_CID_VIVID_BASE + 33) #define VIVID_CID_HAS_CROP_OUT (VIVID_CID_VIVID_BASE + 34) #define VIVID_CID_HAS_COMPOSE_OUT (VIVID_CID_VIVID_BASE + 35) #define VIVID_CID_HAS_SCALER_OUT (VIVID_CID_VIVID_BASE + 36) #define VIVID_CID_SEQ_WRAP (VIVID_CID_VIVID_BASE + 38) #define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 39) #define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 40) #define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 41) #define VIVID_CID_REDUCED_FPS (VIVID_CID_VIVID_BASE + 42) #define VIVID_CID_HSV_ENC (VIVID_CID_VIVID_BASE + 43) #define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60) #define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61) #define VIVID_CID_DV_TIMINGS_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 62) #define VIVID_CID_DV_TIMINGS (VIVID_CID_VIVID_BASE + 63) #define VIVID_CID_PERC_DROPPED (VIVID_CID_VIVID_BASE + 64) #define VIVID_CID_DISCONNECT (VIVID_CID_VIVID_BASE + 65) #define VIVID_CID_DQBUF_ERROR (VIVID_CID_VIVID_BASE + 66) #define VIVID_CID_QUEUE_SETUP_ERROR (VIVID_CID_VIVID_BASE + 67) #define VIVID_CID_BUF_PREPARE_ERROR (VIVID_CID_VIVID_BASE + 68) #define VIVID_CID_START_STR_ERROR (VIVID_CID_VIVID_BASE + 69) #define VIVID_CID_QUEUE_ERROR (VIVID_CID_VIVID_BASE + 70) #define VIVID_CID_CLEAR_FB (VIVID_CID_VIVID_BASE + 71) #define VIVID_CID_REQ_VALIDATE_ERROR (VIVID_CID_VIVID_BASE + 72) #define VIVID_CID_RADIO_SEEK_MODE (VIVID_CID_VIVID_BASE + 90) #define VIVID_CID_RADIO_SEEK_PROG_LIM (VIVID_CID_VIVID_BASE + 91) #define VIVID_CID_RADIO_RX_RDS_RBDS (VIVID_CID_VIVID_BASE + 92) #define VIVID_CID_RADIO_RX_RDS_BLOCKIO (VIVID_CID_VIVID_BASE + 93) #define VIVID_CID_RADIO_TX_RDS_BLOCKIO (VIVID_CID_VIVID_BASE + 94) #define VIVID_CID_SDR_CAP_FM_DEVIATION (VIVID_CID_VIVID_BASE + 110) #define VIVID_CID_META_CAP_GENERATE_PTS (VIVID_CID_VIVID_BASE + 111) #define VIVID_CID_META_CAP_GENERATE_SCR (VIVID_CID_VIVID_BASE + 112) /* HDMI inputs are in the range 0-14. The next available CID is VIVID_CID_VIVID_BASE + 128 */ #define VIVID_CID_HDMI_IS_CONNECTED_TO_OUTPUT(input) (VIVID_CID_VIVID_BASE + 113 + (input)) /* S-Video inputs are in the range 0-15. The next available CID is VIVID_CID_VIVID_BASE + 144 */ #define VIVID_CID_SVID_IS_CONNECTED_TO_OUTPUT(input) (VIVID_CID_VIVID_BASE + 128 + (input)) /* General User Controls */ static void vivid_unregister_dev(bool valid, struct video_device *vdev) { if (!valid) return; clear_bit(V4L2_FL_REGISTERED, &vdev->flags); v4l2_event_wake_all(vdev); } static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_gen); switch (ctrl->id) { case VIVID_CID_DISCONNECT: v4l2_info(&dev->v4l2_dev, "disconnect\n"); dev->disconnect_error = true; vivid_unregister_dev(dev->has_vid_cap, &dev->vid_cap_dev); vivid_unregister_dev(dev->has_vid_out, &dev->vid_out_dev); vivid_unregister_dev(dev->has_vbi_cap, &dev->vbi_cap_dev); vivid_unregister_dev(dev->has_vbi_out, &dev->vbi_out_dev); vivid_unregister_dev(dev->has_radio_rx, &dev->radio_rx_dev); vivid_unregister_dev(dev->has_radio_tx, &dev->radio_tx_dev); vivid_unregister_dev(dev->has_sdr_cap, &dev->sdr_cap_dev); vivid_unregister_dev(dev->has_meta_cap, &dev->meta_cap_dev); vivid_unregister_dev(dev->has_meta_out, &dev->meta_out_dev); vivid_unregister_dev(dev->has_touch_cap, &dev->touch_cap_dev); break; case VIVID_CID_BUTTON: dev->button_pressed = 30; break; } return 0; } static const struct v4l2_ctrl_ops vivid_user_gen_ctrl_ops = { .s_ctrl = vivid_user_gen_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_button = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_BUTTON, .name = "Button", .type = V4L2_CTRL_TYPE_BUTTON, }; static const struct v4l2_ctrl_config vivid_ctrl_boolean = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_BOOLEAN, .name = "Boolean", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, .def = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_int32 = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_INTEGER, .name = "Integer 32 Bits", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0xffffffff80000000ULL, .max = 0x7fffffff, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_int64 = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_INTEGER64, .name = "Integer 64 Bits", .type = V4L2_CTRL_TYPE_INTEGER64, .min = 0x8000000000000000ULL, .max = 0x7fffffffffffffffLL, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_u32_array = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_U32_ARRAY, .name = "U32 1 Element Array", .type = V4L2_CTRL_TYPE_U32, .def = 0x18, .min = 0x10, .max = 0x20000, .step = 1, .dims = { 1 }, }; static const struct v4l2_ctrl_config vivid_ctrl_u32_dyn_array = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_U32_DYN_ARRAY, .name = "U32 Dynamic Array", .type = V4L2_CTRL_TYPE_U32, .flags = V4L2_CTRL_FLAG_DYNAMIC_ARRAY, .def = 50, .min = 10, .max = 90, .step = 1, .dims = { 100 }, }; static const struct v4l2_ctrl_config vivid_ctrl_u16_matrix = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_U16_MATRIX, .name = "U16 8x16 Matrix", .type = V4L2_CTRL_TYPE_U16, .def = 0x18, .min = 0x10, .max = 0x2000, .step = 1, .dims = { 8, 16 }, }; static const struct v4l2_ctrl_config vivid_ctrl_u8_4d_array = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_U8_4D_ARRAY, .name = "U8 2x3x4x5 Array", .type = V4L2_CTRL_TYPE_U8, .def = 0x18, .min = 0x10, .max = 0x20, .step = 1, .dims = { 2, 3, 4, 5 }, }; static const struct v4l2_ctrl_config vivid_ctrl_u8_pixel_array = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_U8_PIXEL_ARRAY, .name = "U8 Pixel Array", .type = V4L2_CTRL_TYPE_U8, .def = 0x80, .min = 0x00, .max = 0xff, .step = 1, .dims = { 640 / PIXEL_ARRAY_DIV, 360 / PIXEL_ARRAY_DIV }, }; static const struct v4l2_ctrl_config vivid_ctrl_s32_array = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_S32_ARRAY, .name = "S32 2 Element Array", .type = V4L2_CTRL_TYPE_INTEGER, .def = 2, .min = -10, .max = 10, .step = 1, .dims = { 2 }, }; static const struct v4l2_ctrl_config vivid_ctrl_s64_array = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_S64_ARRAY, .name = "S64 5 Element Array", .type = V4L2_CTRL_TYPE_INTEGER64, .def = 4, .min = -10, .max = 10, .step = 1, .dims = { 5 }, }; static const char * const vivid_ctrl_menu_strings[] = { "Menu Item 0 (Skipped)", "Menu Item 1", "Menu Item 2 (Skipped)", "Menu Item 3", "Menu Item 4", "Menu Item 5 (Skipped)", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_menu = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_MENU, .name = "Menu", .type = V4L2_CTRL_TYPE_MENU, .min = 1, .max = 4, .def = 3, .menu_skip_mask = 0x04, .qmenu = vivid_ctrl_menu_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_string = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_STRING, .name = "String", .type = V4L2_CTRL_TYPE_STRING, .min = 2, .max = 4, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_bitmask = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_BITMASK, .name = "Bitmask", .type = V4L2_CTRL_TYPE_BITMASK, .def = 0x80002000, .min = 0, .max = 0x80402010, .step = 0, }; static const s64 vivid_ctrl_int_menu_values[] = { 1, 1, 2, 3, 5, 8, 13, 21, 42, }; static const struct v4l2_ctrl_config vivid_ctrl_int_menu = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_INTMENU, .name = "Integer Menu", .type = V4L2_CTRL_TYPE_INTEGER_MENU, .min = 1, .max = 8, .def = 4, .menu_skip_mask = 0x02, .qmenu_int = vivid_ctrl_int_menu_values, }; static const struct v4l2_ctrl_config vivid_ctrl_disconnect = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_DISCONNECT, .name = "Disconnect", .type = V4L2_CTRL_TYPE_BUTTON, }; static const struct v4l2_area area = { .width = 1000, .height = 2000, }; static const struct v4l2_ctrl_config vivid_ctrl_area = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_AREA, .name = "Area", .type = V4L2_CTRL_TYPE_AREA, .p_def.p_const = &area, }; static const struct v4l2_ctrl_config vivid_ctrl_ro_int32 = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_RO_INTEGER, .name = "Read-Only Integer 32 Bits", .type = V4L2_CTRL_TYPE_INTEGER, .flags = V4L2_CTRL_FLAG_READ_ONLY, .min = 0, .max = 255, .step = 1, }; static const struct v4l2_rect rect_def = { .top = 100, .left = 200, .width = 300, .height = 400, }; static const struct v4l2_rect rect_min = { .top = 0, .left = 0, .width = 1, .height = 1, }; static const struct v4l2_rect rect_max = { .top = 0, .left = 0, .width = 1000, .height = 2000, }; static const struct v4l2_ctrl_config vivid_ctrl_rect = { .ops = &vivid_user_gen_ctrl_ops, .id = VIVID_CID_RECT, .name = "Rect", .type = V4L2_CTRL_TYPE_RECT, .flags = V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX, .p_def.p_const = &rect_def, .p_min.p_const = &rect_min, .p_max.p_const = &rect_max, }; /* Framebuffer Controls */ static int vivid_fb_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_fb); switch (ctrl->id) { case VIVID_CID_CLEAR_FB: vivid_fb_clear(dev); break; } return 0; } static const struct v4l2_ctrl_ops vivid_fb_ctrl_ops = { .s_ctrl = vivid_fb_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_clear_fb = { .ops = &vivid_fb_ctrl_ops, .id = VIVID_CID_CLEAR_FB, .name = "Clear Framebuffer", .type = V4L2_CTRL_TYPE_BUTTON, }; /* Video User Controls */ static int vivid_user_vid_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_vid); switch (ctrl->id) { case V4L2_CID_AUTOGAIN: dev->gain->val = (jiffies_to_msecs(jiffies) / 1000) & 0xff; break; } return 0; } static int vivid_user_vid_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_vid); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: dev->input_brightness[dev->input] = ctrl->val - dev->input * 128; tpg_s_brightness(&dev->tpg, dev->input_brightness[dev->input]); break; case V4L2_CID_CONTRAST: tpg_s_contrast(&dev->tpg, ctrl->val); break; case V4L2_CID_SATURATION: tpg_s_saturation(&dev->tpg, ctrl->val); break; case V4L2_CID_HUE: tpg_s_hue(&dev->tpg, ctrl->val); break; case V4L2_CID_HFLIP: dev->hflip = ctrl->val; tpg_s_hflip(&dev->tpg, dev->sensor_hflip ^ dev->hflip); break; case V4L2_CID_VFLIP: dev->vflip = ctrl->val; tpg_s_vflip(&dev->tpg, dev->sensor_vflip ^ dev->vflip); break; case V4L2_CID_ALPHA_COMPONENT: tpg_s_alpha_component(&dev->tpg, ctrl->val); break; } return 0; } static const struct v4l2_ctrl_ops vivid_user_vid_ctrl_ops = { .g_volatile_ctrl = vivid_user_vid_g_volatile_ctrl, .s_ctrl = vivid_user_vid_s_ctrl, }; /* Video Capture Controls */ static void vivid_update_power_present(struct vivid_dev *dev) { unsigned int i, j; dev->power_present = 0; for (i = 0, j = 0; i < ARRAY_SIZE(dev->dv_timings_signal_mode); i++) { if (dev->input_type[i] != HDMI) continue; /* * If connected to TPG or HDMI output, and the signal * mode is not NO_SIGNAL, then there is power present. */ if (dev->input_is_connected_to_output[i] != 1 && dev->dv_timings_signal_mode[i] != NO_SIGNAL) dev->power_present |= (1 << j); j++; } __v4l2_ctrl_s_ctrl(dev->ctrl_rx_power_present, dev->power_present); v4l2_ctrl_activate(dev->ctrl_dv_timings, dev->dv_timings_signal_mode[dev->input] == SELECTED_DV_TIMINGS); } static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl) { static const u32 colorspaces[] = { V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709, V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_OPRGB, V4L2_COLORSPACE_BT2020, V4L2_COLORSPACE_DCI_P3, V4L2_COLORSPACE_SMPTE240M, V4L2_COLORSPACE_470_SYSTEM_M, V4L2_COLORSPACE_470_SYSTEM_BG, }; struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_cap); unsigned int i; struct vivid_dev *output_inst = NULL; int index = 0; int hdmi_index, svid_index; s32 input_index = 0; switch (ctrl->id) { case VIVID_CID_TEST_PATTERN: vivid_update_quality(dev); tpg_s_pattern(&dev->tpg, ctrl->val); break; case VIVID_CID_COLORSPACE: tpg_s_colorspace(&dev->tpg, colorspaces[ctrl->val]); vivid_send_source_change(dev, TV); vivid_send_source_change(dev, SVID); vivid_send_source_change(dev, HDMI); vivid_send_source_change(dev, WEBCAM); break; case VIVID_CID_XFER_FUNC: tpg_s_xfer_func(&dev->tpg, ctrl->val); vivid_send_source_change(dev, TV); vivid_send_source_change(dev, SVID); vivid_send_source_change(dev, HDMI); vivid_send_source_change(dev, WEBCAM); break; case VIVID_CID_YCBCR_ENC: tpg_s_ycbcr_enc(&dev->tpg, ctrl->val); vivid_send_source_change(dev, TV); vivid_send_source_change(dev, SVID); vivid_send_source_change(dev, HDMI); vivid_send_source_change(dev, WEBCAM); break; case VIVID_CID_HSV_ENC: tpg_s_hsv_enc(&dev->tpg, ctrl->val ? V4L2_HSV_ENC_256 : V4L2_HSV_ENC_180); vivid_send_source_change(dev, TV); vivid_send_source_change(dev, SVID); vivid_send_source_change(dev, HDMI); vivid_send_source_change(dev, WEBCAM); break; case VIVID_CID_QUANTIZATION: tpg_s_quantization(&dev->tpg, ctrl->val); vivid_send_source_change(dev, TV); vivid_send_source_change(dev, SVID); vivid_send_source_change(dev, HDMI); vivid_send_source_change(dev, WEBCAM); break; case V4L2_CID_DV_RX_RGB_RANGE: if (!vivid_is_hdmi_cap(dev)) break; tpg_s_rgb_range(&dev->tpg, ctrl->val); break; case VIVID_CID_LIMITED_RGB_RANGE: tpg_s_real_rgb_range(&dev->tpg, ctrl->val ? V4L2_DV_RGB_RANGE_LIMITED : V4L2_DV_RGB_RANGE_FULL); break; case VIVID_CID_ALPHA_MODE: tpg_s_alpha_mode(&dev->tpg, ctrl->val); break; case VIVID_CID_HOR_MOVEMENT: tpg_s_mv_hor_mode(&dev->tpg, ctrl->val); break; case VIVID_CID_VERT_MOVEMENT: tpg_s_mv_vert_mode(&dev->tpg, ctrl->val); break; case VIVID_CID_OSD_TEXT_MODE: dev->osd_mode = ctrl->val; break; case VIVID_CID_PERCENTAGE_FILL: tpg_s_perc_fill(&dev->tpg, ctrl->val); for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) dev->must_blank[i] = ctrl->val < 100; break; case VIVID_CID_INSERT_SAV: tpg_s_insert_sav(&dev->tpg, ctrl->val); break; case VIVID_CID_INSERT_EAV: tpg_s_insert_eav(&dev->tpg, ctrl->val); break; case VIVID_CID_INSERT_HDMI_VIDEO_GUARD_BAND: tpg_s_insert_hdmi_video_guard_band(&dev->tpg, ctrl->val); break; case VIVID_CID_HFLIP: dev->sensor_hflip = ctrl->val; tpg_s_hflip(&dev->tpg, dev->sensor_hflip ^ dev->hflip); break; case VIVID_CID_VFLIP: dev->sensor_vflip = ctrl->val; tpg_s_vflip(&dev->tpg, dev->sensor_vflip ^ dev->vflip); break; case VIVID_CID_REDUCED_FPS: dev->reduced_fps = ctrl->val; vivid_update_format_cap(dev, true); break; case VIVID_CID_HAS_CROP_CAP: dev->has_crop_cap = ctrl->val; vivid_update_format_cap(dev, true); break; case VIVID_CID_HAS_COMPOSE_CAP: dev->has_compose_cap = ctrl->val; vivid_update_format_cap(dev, true); break; case VIVID_CID_HAS_SCALER_CAP: dev->has_scaler_cap = ctrl->val; vivid_update_format_cap(dev, true); break; case VIVID_CID_SHOW_BORDER: tpg_s_show_border(&dev->tpg, ctrl->val); break; case VIVID_CID_SHOW_SQUARE: tpg_s_show_square(&dev->tpg, ctrl->val); break; case VIVID_CID_STD_ASPECT_RATIO: dev->std_aspect_ratio[dev->input] = ctrl->val; tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); break; case VIVID_CID_DV_TIMINGS_SIGNAL_MODE: dev->dv_timings_signal_mode[dev->input] = dev->ctrl_dv_timings_signal_mode->val; dev->query_dv_timings[dev->input] = dev->ctrl_dv_timings->val; vivid_update_power_present(dev); vivid_update_quality(dev); vivid_send_input_source_change(dev, dev->input); break; case VIVID_CID_DV_TIMINGS_ASPECT_RATIO: dev->dv_timings_aspect_ratio[dev->input] = ctrl->val; tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); break; case VIVID_CID_TSTAMP_SRC: dev->tstamp_src_is_soe = ctrl->val; dev->vb_vid_cap_q.timestamp_flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; if (dev->tstamp_src_is_soe) dev->vb_vid_cap_q.timestamp_flags |= V4L2_BUF_FLAG_TSTAMP_SRC_SOE; break; case VIVID_CID_MAX_EDID_BLOCKS: dev->edid_max_blocks = ctrl->val; if (dev->edid_blocks > dev->edid_max_blocks) dev->edid_blocks = dev->edid_max_blocks; break; case VIVID_CID_HDMI_IS_CONNECTED_TO_OUTPUT(0) ... VIVID_CID_HDMI_IS_CONNECTED_TO_OUTPUT(14): hdmi_index = ctrl->id - VIVID_CID_HDMI_IS_CONNECTED_TO_OUTPUT(0); output_inst = vivid_ctrl_hdmi_to_output_instance[ctrl->cur.val]; index = vivid_ctrl_hdmi_to_output_index[ctrl->cur.val]; input_index = dev->hdmi_index_to_input_index[hdmi_index]; dev->input_is_connected_to_output[input_index] = ctrl->val; if (output_inst) { output_inst->output_to_input_instance[index] = NULL; vivid_update_outputs(output_inst); cec_phys_addr_invalidate(output_inst->cec_tx_adap[index]); } if (ctrl->val >= FIXED_MENU_ITEMS) { output_inst = vivid_ctrl_hdmi_to_output_instance[ctrl->val]; index = vivid_ctrl_hdmi_to_output_index[ctrl->val]; output_inst->output_to_input_instance[index] = dev; output_inst->output_to_input_index[index] = dev->hdmi_index_to_input_index[hdmi_index]; } spin_lock(&hdmi_output_skip_mask_lock); hdmi_to_output_menu_skip_mask &= ~(1ULL << ctrl->cur.val); if (ctrl->val >= FIXED_MENU_ITEMS) hdmi_to_output_menu_skip_mask |= 1ULL << ctrl->val; spin_unlock(&hdmi_output_skip_mask_lock); vivid_update_power_present(dev); vivid_update_quality(dev); vivid_send_input_source_change(dev, dev->hdmi_index_to_input_index[hdmi_index]); if (ctrl->val < FIXED_MENU_ITEMS && ctrl->cur.val < FIXED_MENU_ITEMS) break; spin_lock(&hdmi_output_skip_mask_lock); hdmi_input_update_outputs_mask |= 1 << dev->inst; spin_unlock(&hdmi_output_skip_mask_lock); queue_work(update_hdmi_ctrls_workqueue, &dev->update_hdmi_ctrl_work); break; case VIVID_CID_SVID_IS_CONNECTED_TO_OUTPUT(0) ... VIVID_CID_SVID_IS_CONNECTED_TO_OUTPUT(15): svid_index = ctrl->id - VIVID_CID_SVID_IS_CONNECTED_TO_OUTPUT(0); output_inst = vivid_ctrl_svid_to_output_instance[ctrl->cur.val]; index = vivid_ctrl_svid_to_output_index[ctrl->cur.val]; input_index = dev->svid_index_to_input_index[svid_index]; dev->input_is_connected_to_output[input_index] = ctrl->val; if (output_inst) output_inst->output_to_input_instance[index] = NULL; if (ctrl->val >= FIXED_MENU_ITEMS) { output_inst = vivid_ctrl_svid_to_output_instance[ctrl->val]; index = vivid_ctrl_svid_to_output_index[ctrl->val]; output_inst->output_to_input_instance[index] = dev; output_inst->output_to_input_index[index] = dev->svid_index_to_input_index[svid_index]; } spin_lock(&svid_output_skip_mask_lock); svid_to_output_menu_skip_mask &= ~(1ULL << ctrl->cur.val); if (ctrl->val >= FIXED_MENU_ITEMS) svid_to_output_menu_skip_mask |= 1ULL << ctrl->val; spin_unlock(&svid_output_skip_mask_lock); vivid_update_quality(dev); vivid_send_input_source_change(dev, dev->svid_index_to_input_index[svid_index]); if (ctrl->val < FIXED_MENU_ITEMS && ctrl->cur.val < FIXED_MENU_ITEMS) break; queue_work(update_svid_ctrls_workqueue, &dev->update_svid_ctrl_work); break; } return 0; } static const struct v4l2_ctrl_ops vivid_vid_cap_ctrl_ops = { .s_ctrl = vivid_vid_cap_s_ctrl, }; static const char * const vivid_ctrl_hor_movement_strings[] = { "Move Left Fast", "Move Left", "Move Left Slow", "No Movement", "Move Right Slow", "Move Right", "Move Right Fast", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_hor_movement = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HOR_MOVEMENT, .name = "Horizontal Movement", .type = V4L2_CTRL_TYPE_MENU, .max = TPG_MOVE_POS_FAST, .def = TPG_MOVE_NONE, .qmenu = vivid_ctrl_hor_movement_strings, }; static const char * const vivid_ctrl_vert_movement_strings[] = { "Move Up Fast", "Move Up", "Move Up Slow", "No Movement", "Move Down Slow", "Move Down", "Move Down Fast", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_vert_movement = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_VERT_MOVEMENT, .name = "Vertical Movement", .type = V4L2_CTRL_TYPE_MENU, .max = TPG_MOVE_POS_FAST, .def = TPG_MOVE_NONE, .qmenu = vivid_ctrl_vert_movement_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_show_border = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_SHOW_BORDER, .name = "Show Border", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_show_square = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_SHOW_SQUARE, .name = "Show Square", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const char * const vivid_ctrl_osd_mode_strings[] = { "All", "Counters Only", "None", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_osd_mode = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_OSD_TEXT_MODE, .name = "OSD Text Mode", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_osd_mode_strings) - 2, .qmenu = vivid_ctrl_osd_mode_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_perc_fill = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_PERCENTAGE_FILL, .name = "Fill Percentage of Frame", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 100, .def = 100, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_insert_sav = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_INSERT_SAV, .name = "Insert SAV Code in Image", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_insert_eav = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_INSERT_EAV, .name = "Insert EAV Code in Image", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_insert_hdmi_video_guard_band = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_INSERT_HDMI_VIDEO_GUARD_BAND, .name = "Insert Video Guard Band", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_hflip = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HFLIP, .name = "Sensor Flipped Horizontally", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_vflip = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_VFLIP, .name = "Sensor Flipped Vertically", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_reduced_fps = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_REDUCED_FPS, .name = "Reduced Framerate", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_has_crop_cap = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HAS_CROP_CAP, .name = "Enable Capture Cropping", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_has_compose_cap = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HAS_COMPOSE_CAP, .name = "Enable Capture Composing", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_cap = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HAS_SCALER_CAP, .name = "Enable Capture Scaler", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; static const char * const vivid_ctrl_tstamp_src_strings[] = { "End of Frame", "Start of Exposure", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_tstamp_src = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_TSTAMP_SRC, .name = "Timestamp Source", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_tstamp_src_strings) - 2, .qmenu = vivid_ctrl_tstamp_src_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_std_aspect_ratio = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_STD_ASPECT_RATIO, .name = "Standard Aspect Ratio", .type = V4L2_CTRL_TYPE_MENU, .min = 1, .max = 4, .def = 1, .qmenu = tpg_aspect_strings, }; static const char * const vivid_ctrl_dv_timings_signal_mode_strings[] = { "Current DV Timings", "No Signal", "No Lock", "Out of Range", "Selected DV Timings", "Cycle Through All DV Timings", "Custom DV Timings", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_dv_timings_signal_mode = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_DV_TIMINGS_SIGNAL_MODE, .name = "DV Timings Signal Mode", .type = V4L2_CTRL_TYPE_MENU, .max = 5, .qmenu = vivid_ctrl_dv_timings_signal_mode_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_dv_timings_aspect_ratio = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_DV_TIMINGS_ASPECT_RATIO, .name = "DV Timings Aspect Ratio", .type = V4L2_CTRL_TYPE_MENU, .max = 3, .qmenu = tpg_aspect_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_max_edid_blocks = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_MAX_EDID_BLOCKS, .name = "Maximum EDID Blocks", .type = V4L2_CTRL_TYPE_INTEGER, .min = 1, .max = 256, .def = 2, .step = 1, }; static const char * const vivid_ctrl_colorspace_strings[] = { "SMPTE 170M", "Rec. 709", "sRGB", "opRGB", "BT.2020", "DCI-P3", "SMPTE 240M", "470 System M", "470 System BG", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_colorspace = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_COLORSPACE, .name = "Colorspace", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_colorspace_strings) - 2, .def = 2, .qmenu = vivid_ctrl_colorspace_strings, }; static const char * const vivid_ctrl_xfer_func_strings[] = { "Default", "Rec. 709", "sRGB", "opRGB", "SMPTE 240M", "None", "DCI-P3", "SMPTE 2084", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_xfer_func = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_XFER_FUNC, .name = "Transfer Function", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_xfer_func_strings) - 2, .qmenu = vivid_ctrl_xfer_func_strings, }; static const char * const vivid_ctrl_ycbcr_enc_strings[] = { "Default", "ITU-R 601", "Rec. 709", "xvYCC 601", "xvYCC 709", "", "BT.2020", "BT.2020 Constant Luminance", "SMPTE 240M", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_YCBCR_ENC, .name = "Y'CbCr Encoding", .type = V4L2_CTRL_TYPE_MENU, .menu_skip_mask = 1 << 5, .max = ARRAY_SIZE(vivid_ctrl_ycbcr_enc_strings) - 2, .qmenu = vivid_ctrl_ycbcr_enc_strings, }; static const char * const vivid_ctrl_hsv_enc_strings[] = { "Hue 0-179", "Hue 0-256", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_hsv_enc = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HSV_ENC, .name = "HSV Encoding", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_hsv_enc_strings) - 2, .qmenu = vivid_ctrl_hsv_enc_strings, }; static const char * const vivid_ctrl_quantization_strings[] = { "Default", "Full Range", "Limited Range", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_quantization = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_QUANTIZATION, .name = "Quantization", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_quantization_strings) - 2, .qmenu = vivid_ctrl_quantization_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_alpha_mode = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_ALPHA_MODE, .name = "Apply Alpha To Red Only", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_limited_rgb_range = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_LIMITED_RGB_RANGE, .name = "Limited RGB Range (16-235)", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; /* VBI Capture Control */ static int vivid_vbi_cap_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vbi_cap); switch (ctrl->id) { case VIVID_CID_VBI_CAP_INTERLACED: dev->vbi_cap_interlaced = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops vivid_vbi_cap_ctrl_ops = { .s_ctrl = vivid_vbi_cap_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_vbi_cap_interlaced = { .ops = &vivid_vbi_cap_ctrl_ops, .id = VIVID_CID_VBI_CAP_INTERLACED, .name = "Interlaced VBI Format", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; /* Video Output Controls */ static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_out); struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt; switch (ctrl->id) { case VIVID_CID_HAS_CROP_OUT: dev->has_crop_out = ctrl->val; vivid_update_format_out(dev); break; case VIVID_CID_HAS_COMPOSE_OUT: dev->has_compose_out = ctrl->val; vivid_update_format_out(dev); break; case VIVID_CID_HAS_SCALER_OUT: dev->has_scaler_out = ctrl->val; vivid_update_format_out(dev); break; case V4L2_CID_DV_TX_MODE: dev->dvi_d_out = ctrl->val == V4L2_DV_TX_MODE_DVI_D; if (!vivid_is_hdmi_out(dev)) break; if (!dev->dvi_d_out && (bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) { if (bt->width == 720 && bt->height <= 576) dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M; else dev->colorspace_out = V4L2_COLORSPACE_REC709; dev->quantization_out = V4L2_QUANTIZATION_DEFAULT; } else { dev->colorspace_out = V4L2_COLORSPACE_SRGB; dev->quantization_out = dev->dvi_d_out ? V4L2_QUANTIZATION_LIM_RANGE : V4L2_QUANTIZATION_DEFAULT; } if (vivid_output_is_connected_to(dev)) { struct vivid_dev *dev_rx = vivid_output_is_connected_to(dev); vivid_send_source_change(dev_rx, HDMI); } break; } return 0; } static const struct v4l2_ctrl_ops vivid_vid_out_ctrl_ops = { .s_ctrl = vivid_vid_out_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_has_crop_out = { .ops = &vivid_vid_out_ctrl_ops, .id = VIVID_CID_HAS_CROP_OUT, .name = "Enable Output Cropping", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_has_compose_out = { .ops = &vivid_vid_out_ctrl_ops, .id = VIVID_CID_HAS_COMPOSE_OUT, .name = "Enable Output Composing", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_out = { .ops = &vivid_vid_out_ctrl_ops, .id = VIVID_CID_HAS_SCALER_OUT, .name = "Enable Output Scaler", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; /* Streaming Controls */ static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_streaming); switch (ctrl->id) { case VIVID_CID_DQBUF_ERROR: dev->dqbuf_error = true; break; case VIVID_CID_PERC_DROPPED: dev->perc_dropped_buffers = ctrl->val; break; case VIVID_CID_QUEUE_SETUP_ERROR: dev->queue_setup_error = true; break; case VIVID_CID_BUF_PREPARE_ERROR: dev->buf_prepare_error = true; break; case VIVID_CID_START_STR_ERROR: dev->start_streaming_error = true; break; case VIVID_CID_REQ_VALIDATE_ERROR: dev->req_validate_error = true; break; case VIVID_CID_QUEUE_ERROR: if (vb2_start_streaming_called(&dev->vb_vid_cap_q)) vb2_queue_error(&dev->vb_vid_cap_q); if (vb2_start_streaming_called(&dev->vb_vbi_cap_q)) vb2_queue_error(&dev->vb_vbi_cap_q); if (vb2_start_streaming_called(&dev->vb_vid_out_q)) vb2_queue_error(&dev->vb_vid_out_q); if (vb2_start_streaming_called(&dev->vb_vbi_out_q)) vb2_queue_error(&dev->vb_vbi_out_q); if (vb2_start_streaming_called(&dev->vb_sdr_cap_q)) vb2_queue_error(&dev->vb_sdr_cap_q); break; case VIVID_CID_SEQ_WRAP: dev->seq_wrap = ctrl->val; break; case VIVID_CID_TIME_WRAP: dev->time_wrap = ctrl->val; if (dev->time_wrap == 1) dev->time_wrap = (1ULL << 63) - NSEC_PER_SEC * 16ULL; else if (dev->time_wrap == 2) dev->time_wrap = ((1ULL << 31) - 16) * NSEC_PER_SEC; break; } return 0; } static const struct v4l2_ctrl_ops vivid_streaming_ctrl_ops = { .s_ctrl = vivid_streaming_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_dqbuf_error = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_DQBUF_ERROR, .name = "Inject V4L2_BUF_FLAG_ERROR", .type = V4L2_CTRL_TYPE_BUTTON, }; static const struct v4l2_ctrl_config vivid_ctrl_perc_dropped = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_PERC_DROPPED, .name = "Percentage of Dropped Buffers", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 100, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_queue_setup_error = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_QUEUE_SETUP_ERROR, .name = "Inject VIDIOC_REQBUFS Error", .type = V4L2_CTRL_TYPE_BUTTON, }; static const struct v4l2_ctrl_config vivid_ctrl_buf_prepare_error = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_BUF_PREPARE_ERROR, .name = "Inject VIDIOC_QBUF Error", .type = V4L2_CTRL_TYPE_BUTTON, }; static const struct v4l2_ctrl_config vivid_ctrl_start_streaming_error = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_START_STR_ERROR, .name = "Inject VIDIOC_STREAMON Error", .type = V4L2_CTRL_TYPE_BUTTON, }; static const struct v4l2_ctrl_config vivid_ctrl_queue_error = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_QUEUE_ERROR, .name = "Inject Fatal Streaming Error", .type = V4L2_CTRL_TYPE_BUTTON, }; #ifdef CONFIG_MEDIA_CONTROLLER static const struct v4l2_ctrl_config vivid_ctrl_req_validate_error = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_REQ_VALIDATE_ERROR, .name = "Inject req_validate() Error", .type = V4L2_CTRL_TYPE_BUTTON, }; #endif static const struct v4l2_ctrl_config vivid_ctrl_seq_wrap = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_SEQ_WRAP, .name = "Wrap Sequence Number", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const char * const vivid_ctrl_time_wrap_strings[] = { "None", "64 Bit", "32 Bit", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_time_wrap = { .ops = &vivid_streaming_ctrl_ops, .id = VIVID_CID_TIME_WRAP, .name = "Wrap Timestamp", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_time_wrap_strings) - 2, .qmenu = vivid_ctrl_time_wrap_strings, }; /* SDTV Capture Controls */ static int vivid_sdtv_cap_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_sdtv_cap); switch (ctrl->id) { case VIVID_CID_STD_SIGNAL_MODE: dev->std_signal_mode[dev->input] = dev->ctrl_std_signal_mode->val; if (dev->std_signal_mode[dev->input] == SELECTED_STD) dev->query_std[dev->input] = vivid_standard[dev->ctrl_standard->val]; v4l2_ctrl_activate(dev->ctrl_standard, dev->std_signal_mode[dev->input] == SELECTED_STD); vivid_update_quality(dev); vivid_send_source_change(dev, TV); vivid_send_source_change(dev, SVID); break; } return 0; } static const struct v4l2_ctrl_ops vivid_sdtv_cap_ctrl_ops = { .s_ctrl = vivid_sdtv_cap_s_ctrl, }; static const char * const vivid_ctrl_std_signal_mode_strings[] = { "Current Standard", "No Signal", "No Lock", "", "Selected Standard", "Cycle Through All Standards", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_std_signal_mode = { .ops = &vivid_sdtv_cap_ctrl_ops, .id = VIVID_CID_STD_SIGNAL_MODE, .name = "Standard Signal Mode", .type = V4L2_CTRL_TYPE_MENU, .max = ARRAY_SIZE(vivid_ctrl_std_signal_mode_strings) - 2, .menu_skip_mask = 1 << 3, .qmenu = vivid_ctrl_std_signal_mode_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_standard = { .ops = &vivid_sdtv_cap_ctrl_ops, .id = VIVID_CID_STANDARD, .name = "Standard", .type = V4L2_CTRL_TYPE_MENU, .max = 14, .qmenu = vivid_ctrl_standard_strings, }; /* Radio Receiver Controls */ static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_radio_rx); switch (ctrl->id) { case VIVID_CID_RADIO_SEEK_MODE: dev->radio_rx_hw_seek_mode = ctrl->val; break; case VIVID_CID_RADIO_SEEK_PROG_LIM: dev->radio_rx_hw_seek_prog_lim = ctrl->val; break; case VIVID_CID_RADIO_RX_RDS_RBDS: dev->rds_gen.use_rbds = ctrl->val; break; case VIVID_CID_RADIO_RX_RDS_BLOCKIO: dev->radio_rx_rds_controls = ctrl->val; dev->radio_rx_caps &= ~V4L2_CAP_READWRITE; dev->radio_rx_rds_use_alternates = false; if (!dev->radio_rx_rds_controls) { dev->radio_rx_caps |= V4L2_CAP_READWRITE; __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, 0); __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, 0); __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, 0); __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, 0); __v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, ""); __v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, ""); } v4l2_ctrl_activate(dev->radio_rx_rds_pty, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_psname, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_radiotext, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls); dev->radio_rx_dev.device_caps = dev->radio_rx_caps; break; case V4L2_CID_RDS_RECEPTION: dev->radio_rx_rds_enabled = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops vivid_radio_rx_ctrl_ops = { .s_ctrl = vivid_radio_rx_s_ctrl, }; static const char * const vivid_ctrl_radio_rds_mode_strings[] = { "Block I/O", "Controls", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_radio_rx_rds_blockio = { .ops = &vivid_radio_rx_ctrl_ops, .id = VIVID_CID_RADIO_RX_RDS_BLOCKIO, .name = "RDS Rx I/O Mode", .type = V4L2_CTRL_TYPE_MENU, .qmenu = vivid_ctrl_radio_rds_mode_strings, .max = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_radio_rx_rds_rbds = { .ops = &vivid_radio_rx_ctrl_ops, .id = VIVID_CID_RADIO_RX_RDS_RBDS, .name = "Generate RBDS Instead of RDS", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; static const char * const vivid_ctrl_radio_hw_seek_mode_strings[] = { "Bounded", "Wrap Around", "Both", NULL, }; static const struct v4l2_ctrl_config vivid_ctrl_radio_hw_seek_mode = { .ops = &vivid_radio_rx_ctrl_ops, .id = VIVID_CID_RADIO_SEEK_MODE, .name = "Radio HW Seek Mode", .type = V4L2_CTRL_TYPE_MENU, .max = 2, .qmenu = vivid_ctrl_radio_hw_seek_mode_strings, }; static const struct v4l2_ctrl_config vivid_ctrl_radio_hw_seek_prog_lim = { .ops = &vivid_radio_rx_ctrl_ops, .id = VIVID_CID_RADIO_SEEK_PROG_LIM, .name = "Radio Programmable HW Seek", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, }; /* Radio Transmitter Controls */ static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_radio_tx); switch (ctrl->id) { case VIVID_CID_RADIO_TX_RDS_BLOCKIO: dev->radio_tx_rds_controls = ctrl->val; dev->radio_tx_caps &= ~V4L2_CAP_READWRITE; if (!dev->radio_tx_rds_controls) dev->radio_tx_caps |= V4L2_CAP_READWRITE; dev->radio_tx_dev.device_caps = dev->radio_tx_caps; break; case V4L2_CID_RDS_TX_PTY: if (dev->radio_rx_rds_controls) v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, ctrl->val); break; case V4L2_CID_RDS_TX_PS_NAME: if (dev->radio_rx_rds_controls) v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, ctrl->p_new.p_char); break; case V4L2_CID_RDS_TX_RADIO_TEXT: if (dev->radio_rx_rds_controls) v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, ctrl->p_new.p_char); break; case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: if (dev->radio_rx_rds_controls) v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, ctrl->val); break; case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: if (dev->radio_rx_rds_controls) v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, ctrl->val); break; case V4L2_CID_RDS_TX_MUSIC_SPEECH: if (dev->radio_rx_rds_controls) v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, ctrl->val); break; } return 0; } static const struct v4l2_ctrl_ops vivid_radio_tx_ctrl_ops = { .s_ctrl = vivid_radio_tx_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_radio_tx_rds_blockio = { .ops = &vivid_radio_tx_ctrl_ops, .id = VIVID_CID_RADIO_TX_RDS_BLOCKIO, .name = "RDS Tx I/O Mode", .type = V4L2_CTRL_TYPE_MENU, .qmenu = vivid_ctrl_radio_rds_mode_strings, .max = 1, .def = 1, }; /* SDR Capture Controls */ static int vivid_sdr_cap_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_sdr_cap); switch (ctrl->id) { case VIVID_CID_SDR_CAP_FM_DEVIATION: dev->sdr_fm_deviation = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops vivid_sdr_cap_ctrl_ops = { .s_ctrl = vivid_sdr_cap_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_sdr_cap_fm_deviation = { .ops = &vivid_sdr_cap_ctrl_ops, .id = VIVID_CID_SDR_CAP_FM_DEVIATION, .name = "FM Deviation", .type = V4L2_CTRL_TYPE_INTEGER, .min = 100, .max = 200000, .def = 75000, .step = 1, }; /* Metadata Capture Control */ static int vivid_meta_cap_s_ctrl(struct v4l2_ctrl *ctrl) { struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_meta_cap); switch (ctrl->id) { case VIVID_CID_META_CAP_GENERATE_PTS: dev->meta_pts = ctrl->val; break; case VIVID_CID_META_CAP_GENERATE_SCR: dev->meta_scr = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops vivid_meta_cap_ctrl_ops = { .s_ctrl = vivid_meta_cap_s_ctrl, }; static const struct v4l2_ctrl_config vivid_ctrl_meta_has_pts = { .ops = &vivid_meta_cap_ctrl_ops, .id = VIVID_CID_META_CAP_GENERATE_PTS, .name = "Generate PTS", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_meta_has_src_clk = { .ops = &vivid_meta_cap_ctrl_ops, .id = VIVID_CID_META_CAP_GENERATE_SCR, .name = "Generate SCR", .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .def = 1, .step = 1, }; static const struct v4l2_ctrl_config vivid_ctrl_class = { .ops = &vivid_user_gen_ctrl_ops, .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY, .id = VIVID_CID_VIVID_CLASS, .name = "Vivid Controls", .type = V4L2_CTRL_TYPE_CTRL_CLASS, }; int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap, bool show_ccs_out, bool no_error_inj, bool has_sdtv, bool has_hdmi) { struct v4l2_ctrl_handler *hdl_user_gen = &dev->ctrl_hdl_user_gen; struct v4l2_ctrl_handler *hdl_user_vid = &dev->ctrl_hdl_user_vid; struct v4l2_ctrl_handler *hdl_user_aud = &dev->ctrl_hdl_user_aud; struct v4l2_ctrl_handler *hdl_streaming = &dev->ctrl_hdl_streaming; struct v4l2_ctrl_handler *hdl_sdtv_cap = &dev->ctrl_hdl_sdtv_cap; struct v4l2_ctrl_handler *hdl_loop_cap = &dev->ctrl_hdl_loop_cap; struct v4l2_ctrl_handler *hdl_fb = &dev->ctrl_hdl_fb; struct v4l2_ctrl_handler *hdl_vid_cap = &dev->ctrl_hdl_vid_cap; struct v4l2_ctrl_handler *hdl_vid_out = &dev->ctrl_hdl_vid_out; struct v4l2_ctrl_handler *hdl_vbi_cap = &dev->ctrl_hdl_vbi_cap; struct v4l2_ctrl_handler *hdl_vbi_out = &dev->ctrl_hdl_vbi_out; struct v4l2_ctrl_handler *hdl_radio_rx = &dev->ctrl_hdl_radio_rx; struct v4l2_ctrl_handler *hdl_radio_tx = &dev->ctrl_hdl_radio_tx; struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap; struct v4l2_ctrl_handler *hdl_meta_cap = &dev->ctrl_hdl_meta_cap; struct v4l2_ctrl_handler *hdl_meta_out = &dev->ctrl_hdl_meta_out; struct v4l2_ctrl_handler *hdl_tch_cap = &dev->ctrl_hdl_touch_cap; struct v4l2_ctrl_config vivid_ctrl_dv_timings = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_DV_TIMINGS, .name = "DV Timings", .type = V4L2_CTRL_TYPE_MENU, }; int i; v4l2_ctrl_handler_init(hdl_user_gen, 10); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_user_vid, 9); v4l2_ctrl_new_custom(hdl_user_vid, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_user_aud, 2); v4l2_ctrl_new_custom(hdl_user_aud, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_streaming, 8); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_sdtv_cap, 2); v4l2_ctrl_new_custom(hdl_sdtv_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_loop_cap, 1); v4l2_ctrl_new_custom(hdl_loop_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_fb, 1); v4l2_ctrl_new_custom(hdl_fb, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vid_cap, 55); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vid_out, 26); if (!no_error_inj || dev->has_fb || dev->num_hdmi_outputs) v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vbi_cap, 21); v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vbi_out, 19); if (!no_error_inj) v4l2_ctrl_new_custom(hdl_vbi_out, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_radio_rx, 17); v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_radio_tx, 17); v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_sdr_cap, 19); v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_meta_cap, 2); v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_meta_out, 2); v4l2_ctrl_new_custom(hdl_meta_out, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_tch_cap, 2); v4l2_ctrl_new_custom(hdl_tch_cap, &vivid_ctrl_class, NULL); /* User Controls */ dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL, V4L2_CID_AUDIO_VOLUME, 0, 255, 1, 200); dev->mute = v4l2_ctrl_new_std(hdl_user_aud, NULL, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); if (dev->has_vid_cap) { dev->brightness = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); for (i = 0; i < MAX_INPUTS; i++) dev->input_brightness[i] = 128; dev->contrast = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 128); dev->saturation = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 128); dev->hue = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_HUE, -128, 128, 1, 0); v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); dev->autogain = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); dev->gain = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_GAIN, 0, 255, 1, 100); dev->alpha = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0); } dev->button = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_button, NULL); dev->int32 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int32, NULL); dev->int64 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int64, NULL); dev->boolean = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_boolean, NULL); dev->menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_menu, NULL); dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL); dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL); dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL); dev->ro_int32 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_ro_int32, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_rect, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_dyn_array, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL); dev->pixel_array = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_pixel_array, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_s32_array, NULL); v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_s64_array, NULL); if (dev->has_vid_cap) { /* Image Processing Controls */ struct v4l2_ctrl_config vivid_ctrl_test_pattern = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_TEST_PATTERN, .name = "Test Pattern", .type = V4L2_CTRL_TYPE_MENU, .max = TPG_PAT_NOISE, .qmenu = tpg_pattern_strings, }; dev->test_pattern = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_test_pattern, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_perc_fill, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hor_movement, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vert_movement, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_osd_mode, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_show_border, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_show_square, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hflip, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vflip, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_sav, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_eav, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_hdmi_video_guard_band, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_reduced_fps, NULL); WARN_ON(dev->num_hdmi_inputs > MAX_HDMI_INPUTS); WARN_ON(dev->num_svid_inputs > MAX_SVID_INPUTS); for (u8 i = 0; i < dev->num_hdmi_inputs; i++) { snprintf(dev->ctrl_hdmi_to_output_names[i], sizeof(dev->ctrl_hdmi_to_output_names[i]), "HDMI %03u-%u Is Connected To", dev->inst, i); } for (u8 i = 0; i < dev->num_hdmi_inputs; i++) { struct v4l2_ctrl_config ctrl_config = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_HDMI_IS_CONNECTED_TO_OUTPUT(i), .name = dev->ctrl_hdmi_to_output_names[i], .type = V4L2_CTRL_TYPE_MENU, .max = 1, .qmenu = (const char * const *)vivid_ctrl_hdmi_to_output_strings, }; dev->ctrl_hdmi_to_output[i] = v4l2_ctrl_new_custom(hdl_vid_cap, &ctrl_config, NULL); } for (u8 i = 0; i < dev->num_svid_inputs; i++) { snprintf(dev->ctrl_svid_to_output_names[i], sizeof(dev->ctrl_svid_to_output_names[i]), "S-Video %03u-%u Is Connected To", dev->inst, i); } for (u8 i = 0; i < dev->num_svid_inputs; i++) { struct v4l2_ctrl_config ctrl_config = { .ops = &vivid_vid_cap_ctrl_ops, .id = VIVID_CID_SVID_IS_CONNECTED_TO_OUTPUT(i), .name = dev->ctrl_svid_to_output_names[i], .type = V4L2_CTRL_TYPE_MENU, .max = 1, .qmenu = (const char * const *)vivid_ctrl_svid_to_output_strings, }; dev->ctrl_svid_to_output[i] = v4l2_ctrl_new_custom(hdl_vid_cap, &ctrl_config, NULL); } if (show_ccs_cap) { dev->ctrl_has_crop_cap = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_has_crop_cap, NULL); dev->ctrl_has_compose_cap = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_has_compose_cap, NULL); dev->ctrl_has_scaler_cap = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_has_scaler_cap, NULL); } v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_tstamp_src, NULL); dev->colorspace = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_colorspace, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_xfer_func, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_ycbcr_enc, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hsv_enc, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_quantization, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL); } if (dev->has_vid_out && show_ccs_out) { dev->ctrl_has_crop_out = v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_has_crop_out, NULL); dev->ctrl_has_compose_out = v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_has_compose_out, NULL); dev->ctrl_has_scaler_out = v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_has_scaler_out, NULL); } /* * Testing this driver with v4l2-compliance will trigger the error * injection controls, and after that nothing will work as expected. * So we have a module option to drop these error injecting controls * allowing us to run v4l2_compliance again. */ if (!no_error_inj) { v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_disconnect, NULL); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_dqbuf_error, NULL); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_perc_dropped, NULL); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_queue_setup_error, NULL); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_buf_prepare_error, NULL); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_start_streaming_error, NULL); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_queue_error, NULL); #ifdef CONFIG_MEDIA_CONTROLLER v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_req_validate_error, NULL); #endif v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_seq_wrap, NULL); v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_time_wrap, NULL); } if (has_sdtv && (dev->has_vid_cap || dev->has_vbi_cap)) { if (dev->has_vid_cap) v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_std_aspect_ratio, NULL); dev->ctrl_std_signal_mode = v4l2_ctrl_new_custom(hdl_sdtv_cap, &vivid_ctrl_std_signal_mode, NULL); dev->ctrl_standard = v4l2_ctrl_new_custom(hdl_sdtv_cap, &vivid_ctrl_standard, NULL); if (dev->ctrl_std_signal_mode) v4l2_ctrl_cluster(2, &dev->ctrl_std_signal_mode); if (dev->has_raw_vbi_cap) v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_vbi_cap_interlaced, NULL); } if (dev->num_hdmi_inputs) { s64 hdmi_input_mask = GENMASK(dev->num_hdmi_inputs - 1, 0); dev->ctrl_dv_timings_signal_mode = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_dv_timings_signal_mode, NULL); vivid_ctrl_dv_timings.max = dev->query_dv_timings_size - 1; vivid_ctrl_dv_timings.qmenu = (const char * const *)dev->query_dv_timings_qmenu; dev->ctrl_dv_timings = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_dv_timings, NULL); if (dev->ctrl_dv_timings_signal_mode) v4l2_ctrl_cluster(2, &dev->ctrl_dv_timings_signal_mode); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_dv_timings_aspect_ratio, NULL); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_max_edid_blocks, NULL); dev->real_rgb_range_cap = v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_limited_rgb_range, NULL); dev->rgb_range_cap = v4l2_ctrl_new_std_menu(hdl_vid_cap, &vivid_vid_cap_ctrl_ops, V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL, 0, V4L2_DV_RGB_RANGE_AUTO); dev->ctrl_rx_power_present = v4l2_ctrl_new_std(hdl_vid_cap, NULL, V4L2_CID_DV_RX_POWER_PRESENT, 0, hdmi_input_mask, 0, hdmi_input_mask); } if (dev->num_hdmi_outputs) { s64 hdmi_output_mask = GENMASK(dev->num_hdmi_outputs - 1, 0); /* * We aren't doing anything with this at the moment, but * HDMI outputs typically have this controls. */ dev->ctrl_tx_rgb_range = v4l2_ctrl_new_std_menu(hdl_vid_out, NULL, V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL, 0, V4L2_DV_RGB_RANGE_AUTO); dev->ctrl_tx_mode = v4l2_ctrl_new_std_menu(hdl_vid_out, NULL, V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI, 0, V4L2_DV_TX_MODE_HDMI); dev->ctrl_tx_hotplug = v4l2_ctrl_new_std(hdl_vid_out, NULL, V4L2_CID_DV_TX_HOTPLUG, 0, hdmi_output_mask, 0, 0); dev->ctrl_tx_rxsense = v4l2_ctrl_new_std(hdl_vid_out, NULL, V4L2_CID_DV_TX_RXSENSE, 0, hdmi_output_mask, 0, 0); dev->ctrl_tx_edid_present = v4l2_ctrl_new_std(hdl_vid_out, NULL, V4L2_CID_DV_TX_EDID_PRESENT, 0, hdmi_output_mask, 0, 0); } if (dev->has_fb) v4l2_ctrl_new_custom(hdl_fb, &vivid_ctrl_clear_fb, NULL); if (dev->has_radio_rx) { v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_hw_seek_mode, NULL); v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_hw_seek_prog_lim, NULL); v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_rx_rds_blockio, NULL); v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_rx_rds_rbds, NULL); v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops, V4L2_CID_RDS_RECEPTION, 0, 1, 1, 1); dev->radio_rx_rds_pty = v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops, V4L2_CID_RDS_RX_PTY, 0, 31, 1, 0); dev->radio_rx_rds_psname = v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops, V4L2_CID_RDS_RX_PS_NAME, 0, 8, 8, 0); dev->radio_rx_rds_radiotext = v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops, V4L2_CID_RDS_RX_RADIO_TEXT, 0, 64, 64, 0); dev->radio_rx_rds_ta = v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops, V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT, 0, 1, 1, 0); dev->radio_rx_rds_tp = v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops, V4L2_CID_RDS_RX_TRAFFIC_PROGRAM, 0, 1, 1, 0); dev->radio_rx_rds_ms = v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops, V4L2_CID_RDS_RX_MUSIC_SPEECH, 0, 1, 1, 1); } if (dev->has_radio_tx) { v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_radio_tx_rds_blockio, NULL); dev->radio_tx_rds_pi = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_PI, 0, 0xffff, 1, 0x8088); dev->radio_tx_rds_pty = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_PTY, 0, 31, 1, 3); dev->radio_tx_rds_psname = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_PS_NAME, 0, 8, 8, 0); if (dev->radio_tx_rds_psname) v4l2_ctrl_s_ctrl_string(dev->radio_tx_rds_psname, "VIVID-TX"); dev->radio_tx_rds_radiotext = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_RADIO_TEXT, 0, 64 * 2, 64, 0); if (dev->radio_tx_rds_radiotext) v4l2_ctrl_s_ctrl_string(dev->radio_tx_rds_radiotext, "This is a VIVID default Radio Text template text, change at will"); dev->radio_tx_rds_mono_stereo = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_MONO_STEREO, 0, 1, 1, 1); dev->radio_tx_rds_art_head = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_ARTIFICIAL_HEAD, 0, 1, 1, 0); dev->radio_tx_rds_compressed = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_COMPRESSED, 0, 1, 1, 0); dev->radio_tx_rds_dyn_pty = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_DYNAMIC_PTY, 0, 1, 1, 0); dev->radio_tx_rds_ta = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT, 0, 1, 1, 0); dev->radio_tx_rds_tp = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_TRAFFIC_PROGRAM, 0, 1, 1, 1); dev->radio_tx_rds_ms = v4l2_ctrl_new_std(hdl_radio_tx, &vivid_radio_tx_ctrl_ops, V4L2_CID_RDS_TX_MUSIC_SPEECH, 0, 1, 1, 1); } if (dev->has_sdr_cap) { v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_sdr_cap_fm_deviation, NULL); } if (dev->has_meta_cap) { v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_meta_has_pts, NULL); v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_meta_has_src_clk, NULL); } if (hdl_user_gen->error) return hdl_user_gen->error; if (hdl_user_vid->error) return hdl_user_vid->error; if (hdl_user_aud->error) return hdl_user_aud->error; if (hdl_streaming->error) return hdl_streaming->error; if (hdl_sdr_cap->error) return hdl_sdr_cap->error; if (hdl_loop_cap->error) return hdl_loop_cap->error; if (dev->autogain) v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true); if (dev->has_vid_cap) { v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL, false); v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL, false); v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL, false); v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL, false); v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL, false); v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL, false); if (hdl_vid_cap->error) return hdl_vid_cap->error; dev->vid_cap_dev.ctrl_handler = hdl_vid_cap; } if (dev->has_vid_out) { v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL, false); v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL, false); v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL, false); if (hdl_vid_out->error) return hdl_vid_out->error; dev->vid_out_dev.ctrl_handler = hdl_vid_out; } if (dev->has_vbi_cap) { v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL, false); v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL, false); v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL, false); if (hdl_vbi_cap->error) return hdl_vbi_cap->error; dev->vbi_cap_dev.ctrl_handler = hdl_vbi_cap; } if (dev->has_vbi_out) { v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL, false); if (hdl_vbi_out->error) return hdl_vbi_out->error; dev->vbi_out_dev.ctrl_handler = hdl_vbi_out; } if (dev->has_radio_rx) { v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL, false); if (hdl_radio_rx->error) return hdl_radio_rx->error; dev->radio_rx_dev.ctrl_handler = hdl_radio_rx; } if (dev->has_radio_tx) { v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL, false); if (hdl_radio_tx->error) return hdl_radio_tx->error; dev->radio_tx_dev.ctrl_handler = hdl_radio_tx; } if (dev->has_sdr_cap) { v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL, false); if (hdl_sdr_cap->error) return hdl_sdr_cap->error; dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap; } if (dev->has_meta_cap) { v4l2_ctrl_add_handler(hdl_meta_cap, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_meta_cap, hdl_streaming, NULL, false); if (hdl_meta_cap->error) return hdl_meta_cap->error; dev->meta_cap_dev.ctrl_handler = hdl_meta_cap; } if (dev->has_meta_out) { v4l2_ctrl_add_handler(hdl_meta_out, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_meta_out, hdl_streaming, NULL, false); if (hdl_meta_out->error) return hdl_meta_out->error; dev->meta_out_dev.ctrl_handler = hdl_meta_out; } if (dev->has_touch_cap) { v4l2_ctrl_add_handler(hdl_tch_cap, hdl_user_gen, NULL, false); v4l2_ctrl_add_handler(hdl_tch_cap, hdl_streaming, NULL, false); if (hdl_tch_cap->error) return hdl_tch_cap->error; dev->touch_cap_dev.ctrl_handler = hdl_tch_cap; } return 0; } void vivid_free_controls(struct vivid_dev *dev) { v4l2_ctrl_handler_free(&dev->ctrl_hdl_vid_cap); v4l2_ctrl_handler_free(&dev->ctrl_hdl_vid_out); v4l2_ctrl_handler_free(&dev->ctrl_hdl_vbi_cap); v4l2_ctrl_handler_free(&dev->ctrl_hdl_vbi_out); v4l2_ctrl_handler_free(&dev->ctrl_hdl_radio_rx); v4l2_ctrl_handler_free(&dev->ctrl_hdl_radio_tx); v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdr_cap); v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_gen); v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_vid); v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_aud); v4l2_ctrl_handler_free(&dev->ctrl_hdl_streaming); v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap); v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_cap); v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb); v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_cap); v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_out); v4l2_ctrl_handler_free(&dev->ctrl_hdl_touch_cap); }
78 78 56 70 78 70 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 /* * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/random.h> #include <linux/export.h> #include "rds.h" /* * All of connection management is simplified by serializing it through * work queues that execute in a connection managing thread. * * TCP wants to send acks through sendpage() in response to data_ready(), * but it needs a process context to do so. * * The receive paths need to allocate but can't drop packets (!) so we have * a thread around to block allocating if the receive fast path sees an * allocation failure. */ /* Grand Unified Theory of connection life cycle: * At any point in time, the connection can be in one of these states: * DOWN, CONNECTING, UP, DISCONNECTING, ERROR * * The following transitions are possible: * ANY -> ERROR * UP -> DISCONNECTING * ERROR -> DISCONNECTING * DISCONNECTING -> DOWN * DOWN -> CONNECTING * CONNECTING -> UP * * Transition to state DISCONNECTING/DOWN: * - Inside the shutdown worker; synchronizes with xmit path * through RDS_IN_XMIT, and with connection management callbacks * via c_cm_lock. * * For receive callbacks, we rely on the underlying transport * (TCP, IB/RDMA) to provide the necessary synchronisation. */ struct workqueue_struct *rds_wq; EXPORT_SYMBOL_GPL(rds_wq); void rds_connect_path_complete(struct rds_conn_path *cp, int curr) { if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) { printk(KERN_WARNING "%s: Cannot transition to state UP, " "current state is %d\n", __func__, atomic_read(&cp->cp_state)); rds_conn_path_drop(cp, false); return; } rdsdebug("conn %p for %pI6c to %pI6c complete\n", cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr); cp->cp_reconnect_jiffies = 0; set_bit(0, &cp->cp_conn->c_map_queued); rcu_read_lock(); if (!rds_destroy_pending(cp->cp_conn)) { queue_delayed_work(rds_wq, &cp->cp_send_w, 0); queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); } rcu_read_unlock(); cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION; } EXPORT_SYMBOL_GPL(rds_connect_path_complete); void rds_connect_complete(struct rds_connection *conn) { rds_connect_path_complete(&conn->c_path[0], RDS_CONN_CONNECTING); } EXPORT_SYMBOL_GPL(rds_connect_complete); /* * This random exponential backoff is relied on to eventually resolve racing * connects. * * If connect attempts race then both parties drop both connections and come * here to wait for a random amount of time before trying again. Eventually * the backoff range will be so much greater than the time it takes to * establish a connection that one of the pair will establish the connection * before the other's random delay fires. * * Connection attempts that arrive while a connection is already established * are also considered to be racing connects. This lets a connection from * a rebooted machine replace an existing stale connection before the transport * notices that the connection has failed. * * We should *always* start with a random backoff; otherwise a broken connection * will always take several iterations to be re-established. */ void rds_queue_reconnect(struct rds_conn_path *cp) { unsigned long rand; struct rds_connection *conn = cp->cp_conn; rdsdebug("conn %p for %pI6c to %pI6c reconnect jiffies %lu\n", conn, &conn->c_laddr, &conn->c_faddr, cp->cp_reconnect_jiffies); /* let peer with smaller addr initiate reconnect, to avoid duels */ if (conn->c_trans->t_type == RDS_TRANS_TCP && rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) >= 0) return; set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); if (cp->cp_reconnect_jiffies == 0) { cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies; rcu_read_lock(); if (!rds_destroy_pending(cp->cp_conn)) queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); rcu_read_unlock(); return; } get_random_bytes(&rand, sizeof(rand)); rdsdebug("%lu delay %lu ceil conn %p for %pI6c -> %pI6c\n", rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies, conn, &conn->c_laddr, &conn->c_faddr); rcu_read_lock(); if (!rds_destroy_pending(cp->cp_conn)) queue_delayed_work(rds_wq, &cp->cp_conn_w, rand % cp->cp_reconnect_jiffies); rcu_read_unlock(); cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2, rds_sysctl_reconnect_max_jiffies); } void rds_connect_worker(struct work_struct *work) { struct rds_conn_path *cp = container_of(work, struct rds_conn_path, cp_conn_w.work); struct rds_connection *conn = cp->cp_conn; int ret; if (cp->cp_index > 0 && rds_addr_cmp(&cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr) >= 0) return; clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING); if (ret) { ret = conn->c_trans->conn_path_connect(cp); rdsdebug("conn %p for %pI6c to %pI6c dispatched, ret %d\n", conn, &conn->c_laddr, &conn->c_faddr, ret); if (ret) { if (rds_conn_path_transition(cp, RDS_CONN_CONNECTING, RDS_CONN_DOWN)) rds_queue_reconnect(cp); else rds_conn_path_error(cp, "connect failed\n"); } } } void rds_send_worker(struct work_struct *work) { struct rds_conn_path *cp = container_of(work, struct rds_conn_path, cp_send_w.work); int ret; if (rds_conn_path_state(cp) == RDS_CONN_UP) { clear_bit(RDS_LL_SEND_FULL, &cp->cp_flags); ret = rds_send_xmit(cp); cond_resched(); rdsdebug("conn %p ret %d\n", cp->cp_conn, ret); switch (ret) { case -EAGAIN: rds_stats_inc(s_send_immediate_retry); queue_delayed_work(rds_wq, &cp->cp_send_w, 0); break; case -ENOMEM: rds_stats_inc(s_send_delayed_retry); queue_delayed_work(rds_wq, &cp->cp_send_w, 2); break; default: break; } } } void rds_recv_worker(struct work_struct *work) { struct rds_conn_path *cp = container_of(work, struct rds_conn_path, cp_recv_w.work); int ret; if (rds_conn_path_state(cp) == RDS_CONN_UP) { ret = cp->cp_conn->c_trans->recv_path(cp); rdsdebug("conn %p ret %d\n", cp->cp_conn, ret); switch (ret) { case -EAGAIN: rds_stats_inc(s_recv_immediate_retry); queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); break; case -ENOMEM: rds_stats_inc(s_recv_delayed_retry); queue_delayed_work(rds_wq, &cp->cp_recv_w, 2); break; default: break; } } } void rds_shutdown_worker(struct work_struct *work) { struct rds_conn_path *cp = container_of(work, struct rds_conn_path, cp_down_w); rds_conn_shutdown(cp); } void rds_threads_exit(void) { destroy_workqueue(rds_wq); } int rds_threads_init(void) { rds_wq = create_singlethread_workqueue("krdsd"); if (!rds_wq) return -ENOMEM; return 0; } /* Compare two IPv6 addresses. Return 0 if the two addresses are equal. * Return 1 if the first is greater. Return -1 if the second is greater. */ int rds_addr_cmp(const struct in6_addr *addr1, const struct in6_addr *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const __be64 *a1, *a2; u64 x, y; a1 = (__be64 *)addr1; a2 = (__be64 *)addr2; if (*a1 != *a2) { if (be64_to_cpu(*a1) < be64_to_cpu(*a2)) return -1; else return 1; } else { x = be64_to_cpu(*++a1); y = be64_to_cpu(*++a2); if (x < y) return -1; else if (x > y) return 1; else return 0; } #else u32 a, b; int i; for (i = 0; i < 4; i++) { if (addr1->s6_addr32[i] != addr2->s6_addr32[i]) { a = ntohl(addr1->s6_addr32[i]); b = ntohl(addr2->s6_addr32[i]); if (a < b) return -1; else if (a > b) return 1; } } return 0; #endif } EXPORT_SYMBOL_GPL(rds_addr_cmp);
2 2 2 1 1 2 12 12 12 12 12 12 6 10 10 6 6 2 2 2 2 6 8 5 8 4 4 4 2 2 1 4 4 2 2 2 2 2 2 2 2 1 1 1 1 1 2 2 2 2 2 2 2 2 2 1 1 2 1 1 1 7 7 7 1 1 1 1 12 12 89 88 89 23 23 23 23 23 23 23 23 23 23 23 23 23 23 12 12 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Lauro Ramos Venancio <lauro.venancio@openbossa.org> * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/rfkill.h> #include <linux/nfc.h> #include <net/genetlink.h> #include "nfc.h" #define VERSION "0.1" #define NFC_CHECK_PRES_FREQ_MS 2000 int nfc_devlist_generation; DEFINE_MUTEX(nfc_devlist_mutex); /* NFC device ID bitmap */ static DEFINE_IDA(nfc_index_ida); int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name) { int rc = 0; pr_debug("%s do firmware %s\n", dev_name(&dev->dev), firmware_name); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->dev_up) { rc = -EBUSY; goto error; } if (!dev->ops->fw_download) { rc = -EOPNOTSUPP; goto error; } dev->fw_download_in_progress = true; rc = dev->ops->fw_download(dev, firmware_name); if (rc) dev->fw_download_in_progress = false; error: device_unlock(&dev->dev); return rc; } /** * nfc_fw_download_done - inform that a firmware download was completed * * @dev: The nfc device to which firmware was downloaded * @firmware_name: The firmware filename * @result: The positive value of a standard errno value */ int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name, u32 result) { dev->fw_download_in_progress = false; return nfc_genl_fw_download_done(dev, firmware_name, result); } EXPORT_SYMBOL(nfc_fw_download_done); /** * nfc_dev_up - turn on the NFC device * * @dev: The nfc device to be turned on * * The device remains up until the nfc_dev_down function is called. */ int nfc_dev_up(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->rfkill && rfkill_blocked(dev->rfkill)) { rc = -ERFKILL; goto error; } if (dev->fw_download_in_progress) { rc = -EBUSY; goto error; } if (dev->dev_up) { rc = -EALREADY; goto error; } if (dev->ops->dev_up) rc = dev->ops->dev_up(dev); if (!rc) dev->dev_up = true; /* We have to enable the device before discovering SEs */ if (dev->ops->discover_se && dev->ops->discover_se(dev)) pr_err("SE discovery failed\n"); error: device_unlock(&dev->dev); return rc; } /** * nfc_dev_down - turn off the NFC device * * @dev: The nfc device to be turned off */ int nfc_dev_down(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -EALREADY; goto error; } if (dev->polling || dev->active_target) { rc = -EBUSY; goto error; } if (dev->ops->dev_down) dev->ops->dev_down(dev); dev->dev_up = false; error: device_unlock(&dev->dev); return rc; } static int nfc_rfkill_set_block(void *data, bool blocked) { struct nfc_dev *dev = data; pr_debug("%s blocked %d", dev_name(&dev->dev), blocked); if (!blocked) return 0; nfc_dev_down(dev); return 0; } static const struct rfkill_ops nfc_rfkill_ops = { .set_block = nfc_rfkill_set_block, }; /** * nfc_start_poll - start polling for nfc targets * * @dev: The nfc device that must start polling * @im_protocols: bitset of nfc initiator protocols to be used for polling * @tm_protocols: bitset of nfc transport protocols to be used for polling * * The device remains polling for targets until a target is found or * the nfc_stop_poll function is called. */ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) { int rc; pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n", dev_name(&dev->dev), im_protocols, tm_protocols); if (!im_protocols && !tm_protocols) return -EINVAL; device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (dev->polling) { rc = -EBUSY; goto error; } rc = dev->ops->start_poll(dev, im_protocols, tm_protocols); if (!rc) { dev->polling = true; dev->rf_mode = NFC_RF_NONE; } error: device_unlock(&dev->dev); return rc; } /** * nfc_stop_poll - stop polling for nfc targets * * @dev: The nfc device that must stop polling */ int nfc_stop_poll(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->polling) { rc = -EINVAL; goto error; } dev->ops->stop_poll(dev); dev->polling = false; dev->rf_mode = NFC_RF_NONE; error: device_unlock(&dev->dev); return rc; } static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx) { int i; for (i = 0; i < dev->n_targets; i++) { if (dev->targets[i].idx == target_idx) return &dev->targets[i]; } return NULL; } int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) { int rc = 0; u8 *gb; size_t gb_len; struct nfc_target *target; pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode); if (!dev->ops->dep_link_up) return -EOPNOTSUPP; device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->dep_link_up == true) { rc = -EALREADY; goto error; } gb = nfc_llcp_general_bytes(dev, &gb_len); if (gb_len > NFC_MAX_GT_LEN) { rc = -EINVAL; goto error; } target = nfc_find_target(dev, target_index); if (target == NULL) { rc = -ENOTCONN; goto error; } rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); if (!rc) { dev->active_target = target; dev->rf_mode = NFC_RF_INITIATOR; } error: device_unlock(&dev->dev); return rc; } int nfc_dep_link_down(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); if (!dev->ops->dep_link_down) return -EOPNOTSUPP; device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->dep_link_up == false) { rc = -EALREADY; goto error; } rc = dev->ops->dep_link_down(dev); if (!rc) { dev->dep_link_up = false; dev->active_target = NULL; dev->rf_mode = NFC_RF_NONE; nfc_llcp_mac_is_down(dev); nfc_genl_dep_link_down_event(dev); } error: device_unlock(&dev->dev); return rc; } int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode) { dev->dep_link_up = true; if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { struct nfc_target *target; target = nfc_find_target(dev, target_idx); if (target == NULL) return -ENOTCONN; dev->active_target = target; } dev->polling = false; dev->rf_mode = rf_mode; nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode); } EXPORT_SYMBOL(nfc_dep_link_is_up); /** * nfc_activate_target - prepare the target for data exchange * * @dev: The nfc device that found the target * @target_idx: index of the target that must be activated * @protocol: nfc protocol that will be used for data exchange */ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) { int rc; struct nfc_target *target; pr_debug("dev_name=%s target_idx=%u protocol=%u\n", dev_name(&dev->dev), target_idx, protocol); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->active_target) { rc = -EBUSY; goto error; } target = nfc_find_target(dev, target_idx); if (target == NULL) { rc = -ENOTCONN; goto error; } rc = dev->ops->activate_target(dev, target, protocol); if (!rc) { dev->active_target = target; dev->rf_mode = NFC_RF_INITIATOR; if (dev->ops->check_presence && !dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } error: device_unlock(&dev->dev); return rc; } /** * nfc_deactivate_target - deactivate a nfc target * * @dev: The nfc device that found the target * @target_idx: index of the target that must be deactivated * @mode: idle or sleep? */ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode) { int rc = 0; pr_debug("dev_name=%s target_idx=%u\n", dev_name(&dev->dev), target_idx); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->active_target == NULL) { rc = -ENOTCONN; goto error; } if (dev->active_target->idx != target_idx) { rc = -ENOTCONN; goto error; } if (dev->ops->check_presence) timer_delete_sync(&dev->check_pres_timer); dev->ops->deactivate_target(dev, dev->active_target, mode); dev->active_target = NULL; error: device_unlock(&dev->dev); return rc; } /** * nfc_data_exchange - transceive data * * @dev: The nfc device that found the target * @target_idx: index of the target * @skb: data to be sent * @cb: callback called when the response is received * @cb_context: parameter for the callback function * * The user must wait for the callback before calling this function again. */ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { int rc; pr_debug("dev_name=%s target_idx=%u skb->len=%u\n", dev_name(&dev->dev), target_idx, skb->len); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; kfree_skb(skb); goto error; } if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) { if (dev->active_target->idx != target_idx) { rc = -EADDRNOTAVAIL; kfree_skb(skb); goto error; } if (dev->ops->check_presence) timer_delete_sync(&dev->check_pres_timer); rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb, cb_context); if (!rc && dev->ops->check_presence && !dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) { rc = dev->ops->tm_send(dev, skb); } else { rc = -ENOTCONN; kfree_skb(skb); goto error; } error: device_unlock(&dev->dev); return rc; } struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se; list_for_each_entry(se, &dev->secure_elements, list) if (se->idx == se_idx) return se; return NULL; } EXPORT_SYMBOL(nfc_find_se); int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (dev->polling) { rc = -EBUSY; goto error; } if (!dev->ops->enable_se || !dev->ops->disable_se) { rc = -EOPNOTSUPP; goto error; } se = nfc_find_se(dev, se_idx); if (!se) { rc = -EINVAL; goto error; } if (se->state == NFC_SE_ENABLED) { rc = -EALREADY; goto error; } rc = dev->ops->enable_se(dev, se_idx); if (rc >= 0) se->state = NFC_SE_ENABLED; error: device_unlock(&dev->dev); return rc; } int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (!dev->ops->enable_se || !dev->ops->disable_se) { rc = -EOPNOTSUPP; goto error; } se = nfc_find_se(dev, se_idx); if (!se) { rc = -EINVAL; goto error; } if (se->state == NFC_SE_DISABLED) { rc = -EALREADY; goto error; } rc = dev->ops->disable_se(dev, se_idx); if (rc >= 0) se->state = NFC_SE_DISABLED; error: device_unlock(&dev->dev); return rc; } int nfc_set_remote_general_bytes(struct nfc_dev *dev, const u8 *gb, u8 gb_len) { pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len); return nfc_llcp_set_remote_gb(dev, gb, gb_len); } EXPORT_SYMBOL(nfc_set_remote_general_bytes); u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len) { pr_debug("dev_name=%s\n", dev_name(&dev->dev)); return nfc_llcp_general_bytes(dev, gb_len); } EXPORT_SYMBOL(nfc_get_local_general_bytes); int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb) { /* Only LLCP target mode for now */ if (dev->dep_link_up == false) { kfree_skb(skb); return -ENOLINK; } return nfc_llcp_data_received(dev, skb); } EXPORT_SYMBOL(nfc_tm_data_received); int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode, const u8 *gb, size_t gb_len) { int rc; device_lock(&dev->dev); dev->polling = false; if (gb != NULL) { rc = nfc_set_remote_general_bytes(dev, gb, gb_len); if (rc < 0) goto out; } dev->rf_mode = NFC_RF_TARGET; if (protocol == NFC_PROTO_NFC_DEP_MASK) nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET); rc = nfc_genl_tm_activated(dev, protocol); out: device_unlock(&dev->dev); return rc; } EXPORT_SYMBOL(nfc_tm_activated); int nfc_tm_deactivated(struct nfc_dev *dev) { dev->dep_link_up = false; dev->rf_mode = NFC_RF_NONE; return nfc_genl_tm_deactivated(dev); } EXPORT_SYMBOL(nfc_tm_deactivated); /** * nfc_alloc_send_skb - allocate a skb for data exchange responses * * @dev: device sending the response * @sk: socket sending the response * @flags: MSG_DONTWAIT flag * @size: size to allocate * @err: pointer to memory to store the error code */ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, unsigned int flags, unsigned int size, unsigned int *err) { struct sk_buff *skb; unsigned int total_size; total_size = size + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err); if (skb) skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); return skb; } /** * nfc_alloc_recv_skb - allocate a skb for data exchange responses * * @size: size to allocate * @gfp: gfp flags */ struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp) { struct sk_buff *skb; unsigned int total_size; total_size = size + 1; skb = alloc_skb(total_size, gfp); if (skb) skb_reserve(skb, 1); return skb; } EXPORT_SYMBOL(nfc_alloc_recv_skb); /** * nfc_targets_found - inform that targets were found * * @dev: The nfc device that found the targets * @targets: array of nfc targets found * @n_targets: targets array size * * The device driver must call this function when one or many nfc targets * are found. After calling this function, the device driver must stop * polling for targets. * NOTE: This function can be called with targets=NULL and n_targets=0 to * notify a driver error, meaning that the polling operation cannot complete. * IMPORTANT: this function must not be called from an atomic context. * In addition, it must also not be called from a context that would prevent * the NFC Core to call other nfc ops entry point concurrently. */ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int n_targets) { int i; pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); for (i = 0; i < n_targets; i++) targets[i].idx = dev->target_next_idx++; device_lock(&dev->dev); if (dev->polling == false) { device_unlock(&dev->dev); return 0; } dev->polling = false; dev->targets_generation++; kfree(dev->targets); dev->targets = NULL; if (targets) { dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target), GFP_ATOMIC); if (!dev->targets) { dev->n_targets = 0; device_unlock(&dev->dev); return -ENOMEM; } } dev->n_targets = n_targets; device_unlock(&dev->dev); nfc_genl_targets_found(dev); return 0; } EXPORT_SYMBOL(nfc_targets_found); /** * nfc_target_lost - inform that an activated target went out of field * * @dev: The nfc device that had the activated target in field * @target_idx: the nfc index of the target * * The device driver must call this function when the activated target * goes out of the field. * IMPORTANT: this function must not be called from an atomic context. * In addition, it must also not be called from a context that would prevent * the NFC Core to call other nfc ops entry point concurrently. */ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx) { const struct nfc_target *tg; int i; pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx); device_lock(&dev->dev); for (i = 0; i < dev->n_targets; i++) { tg = &dev->targets[i]; if (tg->idx == target_idx) break; } if (i == dev->n_targets) { device_unlock(&dev->dev); return -EINVAL; } dev->targets_generation++; dev->n_targets--; dev->active_target = NULL; if (dev->n_targets) { memcpy(&dev->targets[i], &dev->targets[i + 1], (dev->n_targets - i) * sizeof(struct nfc_target)); } else { kfree(dev->targets); dev->targets = NULL; } device_unlock(&dev->dev); nfc_genl_target_lost(dev, target_idx); return 0; } EXPORT_SYMBOL(nfc_target_lost); inline void nfc_driver_failure(struct nfc_dev *dev, int err) { nfc_targets_found(dev, NULL, 0); } EXPORT_SYMBOL(nfc_driver_failure); int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); se = nfc_find_se(dev, se_idx); if (se) return -EALREADY; se = kzalloc(sizeof(struct nfc_se), GFP_KERNEL); if (!se) return -ENOMEM; se->idx = se_idx; se->type = type; se->state = NFC_SE_DISABLED; INIT_LIST_HEAD(&se->list); list_add(&se->list, &dev->secure_elements); rc = nfc_genl_se_added(dev, se_idx, type); if (rc < 0) { list_del(&se->list); kfree(se); return rc; } return 0; } EXPORT_SYMBOL(nfc_add_se); int nfc_remove_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se, *n; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); list_for_each_entry_safe(se, n, &dev->secure_elements, list) if (se->idx == se_idx) { rc = nfc_genl_se_removed(dev, se_idx); if (rc < 0) return rc; list_del(&se->list); kfree(se); return 0; } return -EINVAL; } EXPORT_SYMBOL(nfc_remove_se); int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx, struct nfc_evt_transaction *evt_transaction) { int rc; pr_debug("transaction: %x\n", se_idx); device_lock(&dev->dev); if (!evt_transaction) { rc = -EPROTO; goto out; } rc = nfc_genl_se_transaction(dev, se_idx, evt_transaction); out: device_unlock(&dev->dev); return rc; } EXPORT_SYMBOL(nfc_se_transaction); int nfc_se_connectivity(struct nfc_dev *dev, u8 se_idx) { int rc; pr_debug("connectivity: %x\n", se_idx); device_lock(&dev->dev); rc = nfc_genl_se_connectivity(dev, se_idx); device_unlock(&dev->dev); return rc; } EXPORT_SYMBOL(nfc_se_connectivity); static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); struct nfc_se *se, *n; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); nfc_genl_data_exit(&dev->genl_data); kfree(dev->targets); list_for_each_entry_safe(se, n, &dev->secure_elements, list) { nfc_genl_se_removed(dev, se->idx); list_del(&se->list); kfree(se); } ida_free(&nfc_index_ida, dev->idx); kfree(dev); } static void nfc_check_pres_work(struct work_struct *work) { struct nfc_dev *dev = container_of(work, struct nfc_dev, check_pres_work); int rc; device_lock(&dev->dev); if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) { rc = dev->ops->check_presence(dev, dev->active_target); if (rc == -EOPNOTSUPP) goto exit; if (rc) { u32 active_target_idx = dev->active_target->idx; device_unlock(&dev->dev); nfc_target_lost(dev, active_target_idx); return; } if (!dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } exit: device_unlock(&dev->dev); } static void nfc_check_pres_timeout(struct timer_list *t) { struct nfc_dev *dev = from_timer(dev, t, check_pres_timer); schedule_work(&dev->check_pres_work); } const struct class nfc_class = { .name = "nfc", .dev_release = nfc_release, }; EXPORT_SYMBOL(nfc_class); static int match_idx(struct device *d, const void *data) { struct nfc_dev *dev = to_nfc_dev(d); const unsigned int *idx = data; return dev->idx == *idx; } struct nfc_dev *nfc_get_device(unsigned int idx) { struct device *d; d = class_find_device(&nfc_class, NULL, &idx, match_idx); if (!d) return NULL; return to_nfc_dev(d); } /** * nfc_allocate_device - allocate a new nfc device * * @ops: device operations * @supported_protocols: NFC protocols supported by the device * @tx_headroom: reserved space at beginning of skb * @tx_tailroom: reserved space at end of skb */ struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops, u32 supported_protocols, int tx_headroom, int tx_tailroom) { struct nfc_dev *dev; int rc; if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || !ops->deactivate_target || !ops->im_transceive) return NULL; if (!supported_protocols) return NULL; dev = kzalloc(sizeof(struct nfc_dev), GFP_KERNEL); if (!dev) return NULL; rc = ida_alloc(&nfc_index_ida, GFP_KERNEL); if (rc < 0) goto err_free_dev; dev->idx = rc; dev->dev.class = &nfc_class; dev_set_name(&dev->dev, "nfc%d", dev->idx); device_initialize(&dev->dev); dev->ops = ops; dev->supported_protocols = supported_protocols; dev->tx_headroom = tx_headroom; dev->tx_tailroom = tx_tailroom; INIT_LIST_HEAD(&dev->secure_elements); nfc_genl_data_init(&dev->genl_data); dev->rf_mode = NFC_RF_NONE; /* first generation must not be 0 */ dev->targets_generation = 1; if (ops->check_presence) { timer_setup(&dev->check_pres_timer, nfc_check_pres_timeout, 0); INIT_WORK(&dev->check_pres_work, nfc_check_pres_work); } return dev; err_free_dev: kfree(dev); return NULL; } EXPORT_SYMBOL(nfc_allocate_device); /** * nfc_register_device - register a nfc device in the nfc subsystem * * @dev: The nfc device to register */ int nfc_register_device(struct nfc_dev *dev) { int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; rc = device_add(&dev->dev); mutex_unlock(&nfc_devlist_mutex); if (rc < 0) return rc; rc = nfc_llcp_register_device(dev); if (rc) pr_err("Could not register llcp device\n"); device_lock(&dev->dev); dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev, RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev); if (dev->rfkill) { if (rfkill_register(dev->rfkill) < 0) { rfkill_destroy(dev->rfkill); dev->rfkill = NULL; } } dev->shutting_down = false; device_unlock(&dev->dev); rc = nfc_genl_device_added(dev); if (rc) pr_debug("The userspace won't be notified that the device %s was added\n", dev_name(&dev->dev)); return 0; } EXPORT_SYMBOL(nfc_register_device); /** * nfc_unregister_device - unregister a nfc device in the nfc subsystem * * @dev: The nfc device to unregister */ void nfc_unregister_device(struct nfc_dev *dev) { int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); rc = nfc_genl_device_removed(dev); if (rc) pr_debug("The userspace won't be notified that the device %s " "was removed\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->rfkill) { rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); dev->rfkill = NULL; } dev->shutting_down = true; device_unlock(&dev->dev); if (dev->ops->check_presence) { timer_delete_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); } nfc_llcp_unregister_device(dev); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; device_del(&dev->dev); mutex_unlock(&nfc_devlist_mutex); } EXPORT_SYMBOL(nfc_unregister_device); static int __init nfc_init(void) { int rc; pr_info("NFC Core ver %s\n", VERSION); rc = class_register(&nfc_class); if (rc) return rc; rc = nfc_genl_init(); if (rc) goto err_genl; /* the first generation must not be 0 */ nfc_devlist_generation = 1; rc = rawsock_init(); if (rc) goto err_rawsock; rc = nfc_llcp_init(); if (rc) goto err_llcp_sock; rc = af_nfc_init(); if (rc) goto err_af_nfc; return 0; err_af_nfc: nfc_llcp_exit(); err_llcp_sock: rawsock_exit(); err_rawsock: nfc_genl_exit(); err_genl: class_unregister(&nfc_class); return rc; } static void __exit nfc_exit(void) { af_nfc_exit(); nfc_llcp_exit(); rawsock_exit(); nfc_genl_exit(); class_unregister(&nfc_class); } subsys_initcall(nfc_init); module_exit(nfc_exit); MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>"); MODULE_DESCRIPTION("NFC Core ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_NFC); MODULE_ALIAS_GENL_FAMILY(NFC_GENL_NAME);
86 86 86 86 86 86 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019 Hammerspace Inc */ #include <linux/module.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/string.h> #include <linux/nfs_fs.h> #include <linux/rcupdate.h> #include <linux/lockd/lockd.h> #include "internal.h" #include "nfs4_fs.h" #include "netns.h" #include "sysfs.h" static struct kset *nfs_kset; static void nfs_kset_release(struct kobject *kobj) { struct kset *kset = container_of(kobj, struct kset, kobj); kfree(kset); } static const struct kobj_ns_type_operations *nfs_netns_object_child_ns_type( const struct kobject *kobj) { return &net_ns_type_operations; } static struct kobj_type nfs_kset_type = { .release = nfs_kset_release, .sysfs_ops = &kobj_sysfs_ops, .child_ns_type = nfs_netns_object_child_ns_type, }; int nfs_sysfs_init(void) { int ret; nfs_kset = kzalloc(sizeof(*nfs_kset), GFP_KERNEL); if (!nfs_kset) return -ENOMEM; ret = kobject_set_name(&nfs_kset->kobj, "nfs"); if (ret) { kfree(nfs_kset); return ret; } nfs_kset->kobj.parent = fs_kobj; nfs_kset->kobj.ktype = &nfs_kset_type; nfs_kset->kobj.kset = NULL; ret = kset_register(nfs_kset); if (ret) { kfree(nfs_kset); return ret; } return 0; } void nfs_sysfs_exit(void) { kset_unregister(nfs_kset); } static ssize_t nfs_netns_identifier_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct nfs_netns_client *c = container_of(kobj, struct nfs_netns_client, kobject); ssize_t ret; rcu_read_lock(); ret = sysfs_emit(buf, "%s\n", rcu_dereference(c->identifier)); rcu_read_unlock(); return ret; } /* Strip trailing '\n' */ static size_t nfs_string_strip(const char *c, size_t len) { while (len > 0 && c[len-1] == '\n') --len; return len; } static ssize_t nfs_netns_identifier_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct nfs_netns_client *c = container_of(kobj, struct nfs_netns_client, kobject); const char *old; char *p; size_t len; len = nfs_string_strip(buf, min_t(size_t, count, CONTAINER_ID_MAXLEN)); if (!len) return 0; p = kmemdup_nul(buf, len, GFP_KERNEL); if (!p) return -ENOMEM; old = rcu_dereference_protected(xchg(&c->identifier, (char __rcu *)p), 1); if (old) { synchronize_rcu(); kfree(old); } return count; } static void nfs_netns_client_release(struct kobject *kobj) { struct nfs_netns_client *c = container_of(kobj, struct nfs_netns_client, kobject); kfree(rcu_dereference_raw(c->identifier)); } static const void *nfs_netns_client_namespace(const struct kobject *kobj) { return container_of(kobj, struct nfs_netns_client, kobject)->net; } static struct kobj_attribute nfs_netns_client_id = __ATTR(identifier, 0644, nfs_netns_identifier_show, nfs_netns_identifier_store); static struct attribute *nfs_netns_client_attrs[] = { &nfs_netns_client_id.attr, NULL, }; ATTRIBUTE_GROUPS(nfs_netns_client); static struct kobj_type nfs_netns_client_type = { .release = nfs_netns_client_release, .default_groups = nfs_netns_client_groups, .sysfs_ops = &kobj_sysfs_ops, .namespace = nfs_netns_client_namespace, }; static void nfs_netns_object_release(struct kobject *kobj) { struct nfs_netns_client *c = container_of(kobj, struct nfs_netns_client, nfs_net_kobj); kfree(c); } static const void *nfs_netns_namespace(const struct kobject *kobj) { return container_of(kobj, struct nfs_netns_client, nfs_net_kobj)->net; } static struct kobj_type nfs_netns_object_type = { .release = nfs_netns_object_release, .sysfs_ops = &kobj_sysfs_ops, .namespace = nfs_netns_namespace, }; static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent, struct net *net) { struct nfs_netns_client *p; p = kzalloc(sizeof(*p), GFP_KERNEL); if (p) { p->net = net; p->kobject.kset = nfs_kset; p->nfs_net_kobj.kset = nfs_kset; if (kobject_init_and_add(&p->nfs_net_kobj, &nfs_netns_object_type, parent, "net") != 0) { kobject_put(&p->nfs_net_kobj); return NULL; } if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type, &p->nfs_net_kobj, "nfs_client") == 0) return p; kobject_put(&p->kobject); } return NULL; } void nfs_netns_sysfs_setup(struct nfs_net *netns, struct net *net) { struct nfs_netns_client *clp; clp = nfs_netns_client_alloc(&nfs_kset->kobj, net); if (clp) { netns->nfs_client = clp; kobject_uevent(&clp->kobject, KOBJ_ADD); } } void nfs_netns_sysfs_destroy(struct nfs_net *netns) { struct nfs_netns_client *clp = netns->nfs_client; if (clp) { kobject_uevent(&clp->kobject, KOBJ_REMOVE); kobject_del(&clp->kobject); kobject_put(&clp->kobject); kobject_del(&clp->nfs_net_kobj); kobject_put(&clp->nfs_net_kobj); netns->nfs_client = NULL; } } static bool shutdown_match_client(const struct rpc_task *task, const void *data) { return true; } static void shutdown_client(struct rpc_clnt *clnt) { clnt->cl_shutdown = 1; rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL); } /* * Shut down the nfs_client only once all the superblocks * have been shut down. */ static void shutdown_nfs_client(struct nfs_client *clp) { struct nfs_server *server; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { if (!(server->flags & NFS_MOUNT_SHUTDOWN)) { rcu_read_unlock(); return; } } rcu_read_unlock(); nfs_mark_client_ready(clp, -EIO); shutdown_client(clp->cl_rpcclient); } static ssize_t shutdown_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); bool shutdown = server->flags & NFS_MOUNT_SHUTDOWN; return sysfs_emit(buf, "%d\n", shutdown); } static ssize_t shutdown_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct nfs_server *server; int ret, val; server = container_of(kobj, struct nfs_server, kobj); ret = kstrtoint(buf, 0, &val); if (ret) return ret; if (val != 1) return -EINVAL; /* already shut down? */ if (server->flags & NFS_MOUNT_SHUTDOWN) goto out; server->flags |= NFS_MOUNT_SHUTDOWN; shutdown_client(server->client); if (!IS_ERR(server->client_acl)) shutdown_client(server->client_acl); if (server->nlm_host) shutdown_client(server->nlm_host->h_rpcclnt); out: shutdown_nfs_client(server->nfs_client); return count; } static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown); #if IS_ENABLED(CONFIG_NFS_V4_1) static ssize_t implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid; if (!impl_id || strlen(impl_id->domain) == 0) return 0; //sysfs_emit(buf, ""); return sysfs_emit(buf, "%s\n", impl_id->domain); } static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain); static ssize_t implid_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct nfs_server *server = container_of(kobj, struct nfs_server, kobj); struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid; if (!impl_id || strlen(impl_id->name) == 0) return 0; //sysfs_emit(buf, ""); return sysfs_emit(buf, "%s\n", impl_id->name); } static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name); #endif /* IS_ENABLED(CONFIG_NFS_V4_1) */ #define RPC_CLIENT_NAME_SIZE 64 void nfs_sysfs_link_rpc_client(struct nfs_server *server, struct rpc_clnt *clnt, const char *uniq) { char name[RPC_CLIENT_NAME_SIZE]; int ret; strscpy(name, clnt->cl_program->name, sizeof(name)); strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1); strncat(name, "_client", sizeof(name) - strlen(name) - 1); ret = sysfs_create_link_nowarn(&server->kobj, &clnt->cl_sysfs->kobject, name); if (ret < 0) pr_warn("NFS: can't create link to %s in sysfs (%d)\n", name, ret); } EXPORT_SYMBOL_GPL(nfs_sysfs_link_rpc_client); static void nfs_sysfs_sb_release(struct kobject *kobj) { /* no-op: why? see lib/kobject.c kobject_cleanup() */ } static const void *nfs_netns_server_namespace(const struct kobject *kobj) { return container_of(kobj, struct nfs_server, kobj)->nfs_client->cl_net; } static struct kobj_type nfs_sb_ktype = { .release = nfs_sysfs_sb_release, .sysfs_ops = &kobj_sysfs_ops, .namespace = nfs_netns_server_namespace, .child_ns_type = nfs_netns_object_child_ns_type, }; #if IS_ENABLED(CONFIG_NFS_V4_1) static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server) { int ret; if (!server->nfs_client->cl_implid) return; ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr, nfs_netns_server_namespace(&server->kobj)); if (ret < 0) pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", server->s_sysfs_id, ret); ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr, nfs_netns_server_namespace(&server->kobj)); if (ret < 0) pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", server->s_sysfs_id, ret); } #else /* CONFIG_NFS_V4_1 */ static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server) { } #endif /* CONFIG_NFS_V4_1 */ void nfs_sysfs_add_server(struct nfs_server *server) { int ret; ret = kobject_init_and_add(&server->kobj, &nfs_sb_ktype, &nfs_kset->kobj, "server-%d", server->s_sysfs_id); if (ret < 0) { pr_warn("NFS: nfs sysfs add server-%d failed (%d)\n", server->s_sysfs_id, ret); return; } ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_shutdown.attr, nfs_netns_server_namespace(&server->kobj)); if (ret < 0) pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n", server->s_sysfs_id, ret); nfs_sysfs_add_nfsv41_server(server); } EXPORT_SYMBOL_GPL(nfs_sysfs_add_server); void nfs_sysfs_move_server_to_sb(struct super_block *s) { struct nfs_server *server = s->s_fs_info; int ret; ret = kobject_rename(&server->kobj, s->s_id); if (ret < 0) pr_warn("NFS: rename sysfs %s failed (%d)\n", server->kobj.name, ret); } void nfs_sysfs_move_sb_to_server(struct nfs_server *server) { const char *s; int ret = -ENOMEM; s = kasprintf(GFP_KERNEL, "server-%d", server->s_sysfs_id); if (s) { ret = kobject_rename(&server->kobj, s); kfree(s); } if (ret < 0) pr_warn("NFS: rename sysfs %s failed (%d)\n", server->kobj.name, ret); } /* unlink, not dec-ref */ void nfs_sysfs_remove_server(struct nfs_server *server) { kobject_del(&server->kobj); }
8 8 166 166 167 167 167 166 167 166 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 // SPDX-License-Identifier: GPL-2.0 /* * Assorted bcachefs debug code * * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> * Copyright 2012 Google, Inc. */ #include "bcachefs.h" #include "alloc_foreground.h" #include "bkey_methods.h" #include "btree_cache.h" #include "btree_io.h" #include "btree_iter.h" #include "btree_locking.h" #include "btree_update.h" #include "btree_update_interior.h" #include "buckets.h" #include "debug.h" #include "error.h" #include "extents.h" #include "fsck.h" #include "inode.h" #include "journal_reclaim.h" #include "super.h" #include <linux/console.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/random.h> #include <linux/seq_file.h> static struct dentry *bch_debug; static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, struct extent_ptr_decoded pick) { struct btree *v = c->verify_data; struct btree_node *n_ondisk = c->verify_ondisk; struct btree_node *n_sorted = c->verify_data->data; struct bset *sorted, *inmemory = &b->data->keys; struct bio *bio; bool failed = false, saw_error = false; struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); if (!ca) return false; bio = bio_alloc_bioset(ca->disk_sb.bdev, buf_pages(n_sorted, btree_buf_bytes(b)), REQ_OP_READ|REQ_META, GFP_NOFS, &c->btree_bio); bio->bi_iter.bi_sector = pick.ptr.offset; bch2_bio_map(bio, n_sorted, btree_buf_bytes(b)); submit_bio_wait(bio); bio_put(bio); percpu_ref_put(&ca->io_ref[READ]); memcpy(n_ondisk, n_sorted, btree_buf_bytes(b)); v->written = 0; if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error) return false; n_sorted = c->verify_data->data; sorted = &n_sorted->keys; if (inmemory->u64s != sorted->u64s || memcmp(inmemory->start, sorted->start, vstruct_end(inmemory) - (void *) inmemory->start)) { unsigned offset = 0, sectors; struct bset *i; unsigned j; console_lock(); printk(KERN_ERR "*** in memory:\n"); bch2_dump_bset(c, b, inmemory, 0); printk(KERN_ERR "*** read back in:\n"); bch2_dump_bset(c, v, sorted, 0); while (offset < v->written) { if (!offset) { i = &n_ondisk->keys; sectors = vstruct_blocks(n_ondisk, c->block_bits) << c->block_bits; } else { struct btree_node_entry *bne = (void *) n_ondisk + (offset << 9); i = &bne->keys; sectors = vstruct_blocks(bne, c->block_bits) << c->block_bits; } printk(KERN_ERR "*** on disk block %u:\n", offset); bch2_dump_bset(c, b, i, offset); offset += sectors; } for (j = 0; j < le16_to_cpu(inmemory->u64s); j++) if (inmemory->_data[j] != sorted->_data[j]) break; console_unlock(); bch_err(c, "verify failed at key %u", j); failed = true; } if (v->written != b->written) { bch_err(c, "written wrong: expected %u, got %u", b->written, v->written); failed = true; } return failed; } void __bch2_btree_verify(struct bch_fs *c, struct btree *b) { struct bkey_ptrs_c ptrs; struct extent_ptr_decoded p; const union bch_extent_entry *entry; struct btree *v; struct bset *inmemory = &b->data->keys; struct bkey_packed *k; bool failed = false; if (c->opts.nochanges) return; bch2_btree_node_io_lock(b); mutex_lock(&c->verify_lock); if (!c->verify_ondisk) { c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL); if (!c->verify_ondisk) goto out; } if (!c->verify_data) { c->verify_data = __bch2_btree_node_mem_alloc(c); if (!c->verify_data) goto out; list_del_init(&c->verify_data->list); } BUG_ON(b->nsets != 1); for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k)) if (k->type == KEY_TYPE_btree_ptr_v2) ((struct bch_btree_ptr_v2 *) bkeyp_val(&b->format, k))->mem_ptr = 0; v = c->verify_data; bkey_copy(&v->key, &b->key); v->c.level = b->c.level; v->c.btree_id = b->c.btree_id; bch2_btree_keys_init(v); ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key)); bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry) failed |= bch2_btree_verify_replica(c, b, p); if (failed) { struct printbuf buf = PRINTBUF; bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); bch2_fs_fatal_error(c, ": btree node verify failed for: %s\n", buf.buf); printbuf_exit(&buf); } out: mutex_unlock(&c->verify_lock); bch2_btree_node_io_unlock(b); } void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) { struct btree_node *n_ondisk = NULL; struct extent_ptr_decoded pick; struct bch_dev *ca; struct bio *bio = NULL; unsigned offset = 0; int ret; if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick, -1) <= 0) { prt_printf(out, "error getting device to read from: invalid device\n"); return; } ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); if (!ca) { prt_printf(out, "error getting device to read from: not online\n"); return; } n_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL); if (!n_ondisk) { prt_printf(out, "memory allocation failure\n"); goto out; } bio = bio_alloc_bioset(ca->disk_sb.bdev, buf_pages(n_ondisk, btree_buf_bytes(b)), REQ_OP_READ|REQ_META, GFP_NOFS, &c->btree_bio); bio->bi_iter.bi_sector = pick.ptr.offset; bch2_bio_map(bio, n_ondisk, btree_buf_bytes(b)); ret = submit_bio_wait(bio); if (ret) { prt_printf(out, "IO error reading btree node: %s\n", bch2_err_str(ret)); goto out; } while (offset < btree_sectors(c)) { struct bset *i; struct nonce nonce; struct bch_csum csum; struct bkey_packed *k; unsigned sectors; if (!offset) { i = &n_ondisk->keys; if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) { prt_printf(out, "unknown checksum type at offset %u: %llu\n", offset, BSET_CSUM_TYPE(i)); goto out; } nonce = btree_nonce(i, offset << 9); csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, n_ondisk); if (bch2_crc_cmp(csum, n_ondisk->csum)) { prt_printf(out, "invalid checksum\n"); goto out; } bset_encrypt(c, i, offset << 9); sectors = vstruct_sectors(n_ondisk, c->block_bits); } else { struct btree_node_entry *bne = (void *) n_ondisk + (offset << 9); i = &bne->keys; if (i->seq != n_ondisk->keys.seq) break; if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) { prt_printf(out, "unknown checksum type at offset %u: %llu\n", offset, BSET_CSUM_TYPE(i)); goto out; } nonce = btree_nonce(i, offset << 9); csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); if (bch2_crc_cmp(csum, bne->csum)) { prt_printf(out, "invalid checksum"); goto out; } bset_encrypt(c, i, offset << 9); sectors = vstruct_sectors(bne, c->block_bits); } prt_printf(out, " offset %u version %u, journal seq %llu\n", offset, le16_to_cpu(i->version), le64_to_cpu(i->journal_seq)); offset += sectors; printbuf_indent_add(out, 4); for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) { struct bkey u; bch2_bkey_val_to_text(out, c, bkey_disassemble(b, k, &u)); prt_newline(out); } printbuf_indent_sub(out, 4); } out: if (bio) bio_put(bio); kvfree(n_ondisk); percpu_ref_put(&ca->io_ref[READ]); } #ifdef CONFIG_DEBUG_FS /* XXX: bch_fs refcounting */ struct dump_iter { struct bch_fs *c; enum btree_id id; struct bpos from; struct bpos prev_node; u64 iter; struct printbuf buf; char __user *ubuf; /* destination user buffer */ size_t size; /* size of requested read */ ssize_t ret; /* bytes read so far */ }; static ssize_t flush_buf(struct dump_iter *i) { if (i->buf.pos) { size_t bytes = min_t(size_t, i->buf.pos, i->size); int copied = bytes - copy_to_user(i->ubuf, i->buf.buf, bytes); i->ret += copied; i->ubuf += copied; i->size -= copied; i->buf.pos -= copied; memmove(i->buf.buf, i->buf.buf + copied, i->buf.pos); if (copied != bytes) return -EFAULT; } return i->size ? 0 : i->ret; } static int bch2_dump_open(struct inode *inode, struct file *file) { struct btree_debug *bd = inode->i_private; struct dump_iter *i; i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL); if (!i) return -ENOMEM; file->private_data = i; i->from = POS_MIN; i->iter = 0; i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]); i->id = bd->id; i->buf = PRINTBUF; return 0; } static int bch2_dump_release(struct inode *inode, struct file *file) { struct dump_iter *i = file->private_data; printbuf_exit(&i->buf); kfree(i); return 0; } static ssize_t bch2_read_btree(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; i->ubuf = buf; i->size = size; i->ret = 0; return flush_buf(i) ?: bch2_trans_run(i->c, for_each_btree_key(trans, iter, i->id, i->from, BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, ({ bch2_bkey_val_to_text(&i->buf, i->c, k); prt_newline(&i->buf); bch2_trans_unlock(trans); i->from = bpos_successor(iter.pos); flush_buf(i); }))) ?: i->ret; } static const struct file_operations btree_debug_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_read_btree, }; static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; i->ubuf = buf; i->size = size; i->ret = 0; ssize_t ret = flush_buf(i); if (ret) return ret; if (bpos_eq(SPOS_MAX, i->from)) return i->ret; return bch2_trans_run(i->c, for_each_btree_node(trans, iter, i->id, i->from, 0, b, ({ bch2_btree_node_to_text(&i->buf, i->c, b); i->from = !bpos_eq(SPOS_MAX, b->key.k.p) ? bpos_successor(b->key.k.p) : b->key.k.p; drop_locks_do(trans, flush_buf(i)); }))) ?: i->ret; } static const struct file_operations btree_format_debug_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_read_btree_formats, }; static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; i->ubuf = buf; i->size = size; i->ret = 0; return flush_buf(i) ?: bch2_trans_run(i->c, for_each_btree_key(trans, iter, i->id, i->from, BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, ({ struct btree_path_level *l = &btree_iter_path(trans, &iter)->l[0]; struct bkey_packed *_k = bch2_btree_node_iter_peek(&l->iter, l->b); if (bpos_gt(l->b->key.k.p, i->prev_node)) { bch2_btree_node_to_text(&i->buf, i->c, l->b); i->prev_node = l->b->key.k.p; } bch2_bfloat_to_text(&i->buf, l->b, _k); bch2_trans_unlock(trans); i->from = bpos_successor(iter.pos); flush_buf(i); }))) ?: i->ret; } static const struct file_operations bfloat_failed_debug_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_read_bfloat_failed, }; static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *c, struct btree *b) { if (!out->nr_tabstops) printbuf_tabstop_push(out, 32); prt_printf(out, "%px ", b); bch2_btree_id_level_to_text(out, b->c.btree_id, b->c.level); prt_printf(out, "\n"); printbuf_indent_add(out, 2); bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); prt_newline(out); prt_printf(out, "flags:\t"); prt_bitflags(out, bch2_btree_node_flags, b->flags); prt_newline(out); prt_printf(out, "pcpu read locks:\t%u\n", b->c.lock.readers != NULL); prt_printf(out, "written:\t%u\n", b->written); prt_printf(out, "writes blocked:\t%u\n", !list_empty_careful(&b->write_blocked)); prt_printf(out, "will make reachable:\t%lx\n", b->will_make_reachable); prt_printf(out, "journal pin %px:\t%llu\n", &b->writes[0].journal, b->writes[0].journal.seq); prt_printf(out, "journal pin %px:\t%llu\n", &b->writes[1].journal, b->writes[1].journal.seq); printbuf_indent_sub(out, 2); } static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct bch_fs *c = i->c; bool done = false; ssize_t ret = 0; i->ubuf = buf; i->size = size; i->ret = 0; do { struct bucket_table *tbl; struct rhash_head *pos; struct btree *b; ret = flush_buf(i); if (ret) return ret; rcu_read_lock(); i->buf.atomic++; tbl = rht_dereference_rcu(c->btree_cache.table.tbl, &c->btree_cache.table); if (i->iter < tbl->size) { rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash) bch2_cached_btree_node_to_text(&i->buf, c, b); i->iter++; } else { done = true; } --i->buf.atomic; rcu_read_unlock(); } while (!done); if (i->buf.allocation_failure) ret = -ENOMEM; if (!ret) ret = flush_buf(i); return ret ?: i->ret; } static const struct file_operations cached_btree_nodes_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_cached_btree_nodes_read, }; typedef int (*list_cmp_fn)(const struct list_head *l, const struct list_head *r); static void list_sort(struct list_head *head, list_cmp_fn cmp) { struct list_head *pos; list_for_each(pos, head) while (!list_is_last(pos, head) && cmp(pos, pos->next) > 0) { struct list_head *pos2, *next = pos->next; list_del(next); list_for_each(pos2, head) if (cmp(next, pos2) < 0) goto pos_found; BUG(); pos_found: list_add_tail(next, pos2); } } static int list_ptr_order_cmp(const struct list_head *l, const struct list_head *r) { return cmp_int(l, r); } static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct bch_fs *c = i->c; struct btree_trans *trans; ssize_t ret = 0; i->ubuf = buf; i->size = size; i->ret = 0; restart: seqmutex_lock(&c->btree_trans_lock); list_sort(&c->btree_trans_list, list_ptr_order_cmp); list_for_each_entry(trans, &c->btree_trans_list, list) { if ((ulong) trans <= i->iter) continue; i->iter = (ulong) trans; if (!closure_get_not_zero(&trans->ref)) continue; u32 seq = seqmutex_unlock(&c->btree_trans_lock); bch2_btree_trans_to_text(&i->buf, trans); prt_printf(&i->buf, "backtrace:\n"); printbuf_indent_add(&i->buf, 2); bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task, 0, GFP_KERNEL); printbuf_indent_sub(&i->buf, 2); prt_newline(&i->buf); closure_put(&trans->ref); ret = flush_buf(i); if (ret) goto unlocked; if (!seqmutex_relock(&c->btree_trans_lock, seq)) goto restart; } seqmutex_unlock(&c->btree_trans_lock); unlocked: if (i->buf.allocation_failure) ret = -ENOMEM; if (!ret) ret = flush_buf(i); return ret ?: i->ret; } static const struct file_operations btree_transactions_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_btree_transactions_read, }; static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct bch_fs *c = i->c; bool done = false; int err; i->ubuf = buf; i->size = size; i->ret = 0; while (1) { err = flush_buf(i); if (err) return err; if (!i->size) break; if (done) break; done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter); i->iter++; } if (i->buf.allocation_failure) return -ENOMEM; return i->ret; } static const struct file_operations journal_pins_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_journal_pins_read, }; static ssize_t bch2_btree_updates_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct bch_fs *c = i->c; int err; i->ubuf = buf; i->size = size; i->ret = 0; if (!i->iter) { bch2_btree_updates_to_text(&i->buf, c); i->iter++; } err = flush_buf(i); if (err) return err; if (i->buf.allocation_failure) return -ENOMEM; return i->ret; } static const struct file_operations btree_updates_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_btree_updates_read, }; static int btree_transaction_stats_open(struct inode *inode, struct file *file) { struct bch_fs *c = inode->i_private; struct dump_iter *i; i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL); if (!i) return -ENOMEM; i->iter = 1; i->c = c; i->buf = PRINTBUF; file->private_data = i; return 0; } static int btree_transaction_stats_release(struct inode *inode, struct file *file) { struct dump_iter *i = file->private_data; printbuf_exit(&i->buf); kfree(i); return 0; } static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct bch_fs *c = i->c; int err; i->ubuf = buf; i->size = size; i->ret = 0; while (1) { struct btree_transaction_stats *s = &c->btree_transaction_stats[i->iter]; err = flush_buf(i); if (err) return err; if (!i->size) break; if (i->iter == ARRAY_SIZE(bch2_btree_transaction_fns) || !bch2_btree_transaction_fns[i->iter]) break; prt_printf(&i->buf, "%s:\n", bch2_btree_transaction_fns[i->iter]); printbuf_indent_add(&i->buf, 2); mutex_lock(&s->lock); prt_printf(&i->buf, "Max mem used: %u\n", s->max_mem); prt_printf(&i->buf, "Transaction duration:\n"); printbuf_indent_add(&i->buf, 2); bch2_time_stats_to_text(&i->buf, &s->duration); printbuf_indent_sub(&i->buf, 2); if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS)) { prt_printf(&i->buf, "Lock hold times:\n"); printbuf_indent_add(&i->buf, 2); bch2_time_stats_to_text(&i->buf, &s->lock_hold_times); printbuf_indent_sub(&i->buf, 2); } if (s->max_paths_text) { prt_printf(&i->buf, "Maximum allocated btree paths (%u):\n", s->nr_max_paths); printbuf_indent_add(&i->buf, 2); prt_str_indented(&i->buf, s->max_paths_text); printbuf_indent_sub(&i->buf, 2); } mutex_unlock(&s->lock); printbuf_indent_sub(&i->buf, 2); prt_newline(&i->buf); i->iter++; } if (i->buf.allocation_failure) return -ENOMEM; return i->ret; } static const struct file_operations btree_transaction_stats_op = { .owner = THIS_MODULE, .open = btree_transaction_stats_open, .release = btree_transaction_stats_release, .read = btree_transaction_stats_read, }; /* walk btree transactions until we find a deadlock and print it */ static void btree_deadlock_to_text(struct printbuf *out, struct bch_fs *c) { struct btree_trans *trans; ulong iter = 0; restart: seqmutex_lock(&c->btree_trans_lock); list_sort(&c->btree_trans_list, list_ptr_order_cmp); list_for_each_entry(trans, &c->btree_trans_list, list) { if ((ulong) trans <= iter) continue; iter = (ulong) trans; if (!closure_get_not_zero(&trans->ref)) continue; u32 seq = seqmutex_unlock(&c->btree_trans_lock); bool found = bch2_check_for_deadlock(trans, out) != 0; closure_put(&trans->ref); if (found) return; if (!seqmutex_relock(&c->btree_trans_lock, seq)) goto restart; } seqmutex_unlock(&c->btree_trans_lock); } typedef void (*fs_to_text_fn)(struct printbuf *, struct bch_fs *); static ssize_t bch2_simple_print(struct file *file, char __user *buf, size_t size, loff_t *ppos, fs_to_text_fn fn) { struct dump_iter *i = file->private_data; struct bch_fs *c = i->c; ssize_t ret = 0; i->ubuf = buf; i->size = size; i->ret = 0; if (!i->iter) { fn(&i->buf, c); i->iter++; } if (i->buf.allocation_failure) ret = -ENOMEM; if (!ret) ret = flush_buf(i); return ret ?: i->ret; } static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { return bch2_simple_print(file, buf, size, ppos, btree_deadlock_to_text); } static const struct file_operations btree_deadlock_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_btree_deadlock_read, }; static ssize_t bch2_write_points_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { return bch2_simple_print(file, buf, size, ppos, bch2_write_points_to_text); } static const struct file_operations write_points_ops = { .owner = THIS_MODULE, .open = bch2_dump_open, .release = bch2_dump_release, .read = bch2_write_points_read, }; void bch2_fs_debug_exit(struct bch_fs *c) { if (!IS_ERR_OR_NULL(c->fs_debug_dir)) debugfs_remove_recursive(c->fs_debug_dir); } static void bch2_fs_debug_btree_init(struct bch_fs *c, struct btree_debug *bd) { struct dentry *d; d = debugfs_create_dir(bch2_btree_id_str(bd->id), c->btree_debug_dir); debugfs_create_file("keys", 0400, d, bd, &btree_debug_ops); debugfs_create_file("formats", 0400, d, bd, &btree_format_debug_ops); debugfs_create_file("bfloat-failed", 0400, d, bd, &bfloat_failed_debug_ops); } void bch2_fs_debug_init(struct bch_fs *c) { struct btree_debug *bd; char name[100]; if (IS_ERR_OR_NULL(bch_debug)) return; snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b); c->fs_debug_dir = debugfs_create_dir(name, bch_debug); if (IS_ERR_OR_NULL(c->fs_debug_dir)) return; debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir, c->btree_debug, &cached_btree_nodes_ops); debugfs_create_file("btree_transactions", 0400, c->fs_debug_dir, c->btree_debug, &btree_transactions_ops); debugfs_create_file("journal_pins", 0400, c->fs_debug_dir, c->btree_debug, &journal_pins_ops); debugfs_create_file("btree_updates", 0400, c->fs_debug_dir, c->btree_debug, &btree_updates_ops); debugfs_create_file("btree_transaction_stats", 0400, c->fs_debug_dir, c, &btree_transaction_stats_op); debugfs_create_file("btree_deadlock", 0400, c->fs_debug_dir, c->btree_debug, &btree_deadlock_ops); debugfs_create_file("write_points", 0400, c->fs_debug_dir, c->btree_debug, &write_points_ops); c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir); if (IS_ERR_OR_NULL(c->btree_debug_dir)) return; for (bd = c->btree_debug; bd < c->btree_debug + ARRAY_SIZE(c->btree_debug); bd++) { bd->id = bd - c->btree_debug; bch2_fs_debug_btree_init(c, bd); } } #endif void bch2_debug_exit(void) { if (!IS_ERR_OR_NULL(bch_debug)) debugfs_remove_recursive(bch_debug); } int __init bch2_debug_init(void) { bch_debug = debugfs_create_dir("bcachefs", NULL); return 0; }
76 76 76 76 28 76 23 4 1 4 4 22 25 25 25 25 23 25 15 15 24 24 24 25 25 25 25 26 26 26 26 26 26 26 26 25 62 26 25 25 5 26 26 26 25 25 25 24 25 25 1 1 1 1 1 18 18 18 18 18 18 14 18 1 18 18 52 2 52 18 52 52 52 52 52 52 52 40 15 45 52 52 52 52 52 18 52 12 12 12 12 8 8 8 8 1 1 24 24 25 25 25 24 25 1 1 25 25 24 25 25 25 24 25 25 25 27 27 25 25 25 19 27 19 19 19 19 37 19 19 27 25 27 27 25 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/mempool.h> #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include <linux/fs.h> #include <linux/list_sort.h> #include <linux/blkdev.h> #include "bmap.h" #include "dir.h" #include "gfs2.h" #include "incore.h" #include "inode.h" #include "glock.h" #include "glops.h" #include "log.h" #include "lops.h" #include "meta_io.h" #include "recovery.h" #include "rgrp.h" #include "trans.h" #include "util.h" #include "trace_gfs2.h" /** * gfs2_pin - Pin a buffer in memory * @sdp: The superblock * @bh: The buffer to be pinned * * The log lock must be held when calling this function */ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) { struct gfs2_bufdata *bd; BUG_ON(!current->journal_info); clear_buffer_dirty(bh); if (test_set_buffer_pinned(bh)) gfs2_assert_withdraw(sdp, 0); if (!buffer_uptodate(bh)) gfs2_io_error_bh_wd(sdp, bh); bd = bh->b_private; /* If this buffer is in the AIL and it has already been written * to in-place disk block, remove it from the AIL. */ spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list); spin_unlock(&sdp->sd_ail_lock); get_bh(bh); atomic_inc(&sdp->sd_log_pinned); trace_gfs2_pin(bd, 1); } static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) { return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; } static void maybe_release_space(struct gfs2_bufdata *bd) { struct gfs2_glock *gl = bd->bd_gl; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; struct gfs2_bitmap *bi = rgd->rd_bits + index; rgrp_lock_local(rgd); if (bi->bi_clone == NULL) goto out; if (sdp->sd_args.ar_discard) gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); memcpy(bi->bi_clone + bi->bi_offset, bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); clear_bit(GBF_FULL, &bi->bi_flags); rgd->rd_free_clone = rgd->rd_free; BUG_ON(rgd->rd_free_clone < rgd->rd_reserved); rgd->rd_extfail_pt = rgd->rd_free; out: rgrp_unlock_local(rgd); } /** * gfs2_unpin - Unpin a buffer * @sdp: the filesystem the buffer belongs to * @bh: The buffer to unpin * @tr: The system transaction being flushed */ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, struct gfs2_trans *tr) { struct gfs2_bufdata *bd = bh->b_private; BUG_ON(!buffer_uptodate(bh)); BUG_ON(!buffer_pinned(bh)); lock_buffer(bh); mark_buffer_dirty(bh); clear_buffer_pinned(bh); if (buffer_is_rgrp(bd)) maybe_release_space(bd); spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) { list_del(&bd->bd_ail_st_list); brelse(bh); } else { struct gfs2_glock *gl = bd->bd_gl; list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); atomic_inc(&gl->gl_ail_count); } bd->bd_tr = tr; list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); spin_unlock(&sdp->sd_ail_lock); clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); trace_gfs2_pin(bd, 0); unlock_buffer(bh); atomic_dec(&sdp->sd_log_pinned); } void gfs2_log_incr_head(struct gfs2_sbd *sdp) { BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && (sdp->sd_log_flush_head != sdp->sd_log_head)); if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) sdp->sd_log_flush_head = 0; } u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock) { struct gfs2_journal_extent *je; list_for_each_entry(je, &jd->extent_list, list) { if (lblock >= je->lblock && lblock < je->lblock + je->blocks) return je->dblock + lblock - je->lblock; } return -1; } /** * gfs2_end_log_write_bh - end log write of pagecache data with buffers * @sdp: The superblock * @folio: The folio * @offset: The first byte within the folio that completed * @size: The number of bytes that completed * @error: The i/o status * * This finds the relevant buffers and unlocks them and sets the * error flag according to the status of the i/o request. This is * used when the log is writing data which has an in-place version * that is pinned in the pagecache. */ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio, size_t offset, size_t size, blk_status_t error) { struct buffer_head *bh, *next; bh = folio_buffers(folio); while (bh_offset(bh) < offset) bh = bh->b_this_page; do { if (error) mark_buffer_write_io_error(bh); unlock_buffer(bh); next = bh->b_this_page; size -= bh->b_size; brelse(bh); bh = next; } while (bh && size); } /** * gfs2_end_log_write - end of i/o to the log * @bio: The bio * * Each bio_vec contains either data from the pagecache or data * relating to the log itself. Here we iterate over the bio_vec * array, processing both kinds of data. * */ static void gfs2_end_log_write(struct bio *bio) { struct gfs2_sbd *sdp = bio->bi_private; struct bio_vec *bvec; struct bvec_iter_all iter_all; if (bio->bi_status) { if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status)) fs_err(sdp, "Error %d writing to journal, jid=%u\n", bio->bi_status, sdp->sd_jdesc->jd_jid); gfs2_withdraw_delayed(sdp); /* prevent more writes to the journal */ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); wake_up(&sdp->sd_logd_waitq); } bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; struct folio *folio = page_folio(page); if (folio && folio_buffers(folio)) gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset, bvec->bv_len, bio->bi_status); else mempool_free(page, gfs2_page_pool); } bio_put(bio); if (atomic_dec_and_test(&sdp->sd_log_in_flight)) wake_up(&sdp->sd_log_flush_wait); } /** * gfs2_log_submit_bio - Submit any pending log bio * @biop: Address of the bio pointer * @opf: REQ_OP | op_flags * * Submit any pending part-built or full bio to the block device. If * there is no pending bio, then this is a no-op. */ void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf) { struct bio *bio = *biop; if (bio) { struct gfs2_sbd *sdp = bio->bi_private; atomic_inc(&sdp->sd_log_in_flight); bio->bi_opf = opf; submit_bio(bio); *biop = NULL; } } /** * gfs2_log_alloc_bio - Allocate a bio * @sdp: The super block * @blkno: The device block number we want to write to * @end_io: The bi_end_io callback * * Allocate a new bio, initialize it with the given parameters and return it. * * Returns: The newly allocated bio */ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, bio_end_io_t *end_io) { struct super_block *sb = sdp->sd_vfs; struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO); bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; bio->bi_end_io = end_io; bio->bi_private = sdp; return bio; } /** * gfs2_log_get_bio - Get cached log bio, or allocate a new one * @sdp: The super block * @blkno: The device block number we want to write to * @biop: The bio to get or allocate * @op: REQ_OP * @end_io: The bi_end_io callback * @flush: Always flush the current bio and allocate a new one? * * If there is a cached bio, then if the next block number is sequential * with the previous one, return it, otherwise flush the bio to the * device. If there is no cached bio, or we just flushed it, then * allocate a new one. * * Returns: The bio to use for log writes */ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, struct bio **biop, enum req_op op, bio_end_io_t *end_io, bool flush) { struct bio *bio = *biop; if (bio) { u64 nblk; nblk = bio_end_sector(bio); nblk >>= sdp->sd_fsb2bb_shift; if (blkno == nblk && !flush) return bio; gfs2_log_submit_bio(biop, op); } *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); return *biop; } /** * gfs2_log_write - write to log * @sdp: the filesystem * @jd: The journal descriptor * @page: the page to write * @size: the size of the data to write * @offset: the offset within the page * @blkno: block number of the log entry * * Try and add the page segment to the current bio. If that fails, * submit the current bio to the device and create a new one, and * then add the page segment to that. */ void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, struct page *page, unsigned size, unsigned offset, u64 blkno) { struct bio *bio; int ret; bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, gfs2_end_log_write, false); ret = bio_add_page(bio, page, size, offset); if (ret == 0) { bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, gfs2_end_log_write, true); ret = bio_add_page(bio, page, size, offset); WARN_ON(ret == 0); } } /** * gfs2_log_write_bh - write a buffer's content to the log * @sdp: The super block * @bh: The buffer pointing to the in-place location * * This writes the content of the buffer to the next available location * in the log. The buffer will be unlocked once the i/o to the log has * completed. */ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) { u64 dblock; dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); gfs2_log_incr_head(sdp); gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0), bh->b_size, bh_offset(bh), dblock); } /** * gfs2_log_write_page - write one block stored in a page, into the log * @sdp: The superblock * @page: The struct page * * This writes the first block-sized part of the page into the log. Note * that the page must have been allocated from the gfs2_page_pool mempool * and that after this has been called, ownership has been transferred and * the page may be freed at any time. */ static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) { struct super_block *sb = sdp->sd_vfs; u64 dblock; dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); gfs2_log_incr_head(sdp); gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock); } /** * gfs2_end_log_read - end I/O callback for reads from the log * @bio: The bio * * Simply unlock the pages in the bio. The main thread will wait on them and * process them in order as necessary. */ static void gfs2_end_log_read(struct bio *bio) { int error = blk_status_to_errno(bio->bi_status); struct folio_iter fi; bio_for_each_folio_all(fi, bio) { /* We're abusing wb_err to get the error to gfs2_find_jhead */ filemap_set_wb_err(fi.folio->mapping, error); folio_end_read(fi.folio, !error); } bio_put(bio); } /** * gfs2_jhead_folio_search - Look for the journal head in a given page. * @jd: The journal descriptor * @head: The journal head to start from * @folio: The folio to look in * * Returns: 1 if found, 0 otherwise. */ static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, struct folio *folio) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_log_header_host lh; void *kaddr; unsigned int offset; bool ret = false; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); kaddr = kmap_local_folio(folio, 0); for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { if (lh.lh_sequence >= head->lh_sequence) *head = lh; else { ret = true; break; } } } kunmap_local(kaddr); return ret; } /** * gfs2_jhead_process_page - Search/cleanup a page * @jd: The journal descriptor * @index: Index of the page to look into * @head: The journal head to start from * @done: If set, perform only cleanup, else search and set if found. * * Find the folio with 'index' in the journal's mapping. Search the folio for * the journal head if requested (cleanup == false). Release refs on the * folio so the page cache can reclaim it. We grabbed a * reference on this folio twice, first when we did a grab_cache_page() * to obtain the folio to add it to the bio and second when we do a * filemap_get_folio() here to get the folio to wait on while I/O on it is being * completed. * This function is also used to free up a folio we might've grabbed but not * used. Maybe we added it to a bio, but not submitted it for I/O. Or we * submitted the I/O, but we already found the jhead so we only need to drop * our references to the folio. */ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, struct gfs2_log_header_host *head, bool *done) { struct folio *folio; folio = filemap_get_folio(jd->jd_inode->i_mapping, index); folio_wait_locked(folio); if (!folio_test_uptodate(folio)) *done = true; if (!*done) *done = gfs2_jhead_folio_search(jd, head, folio); /* filemap_get_folio() and the earlier grab_cache_page() */ folio_put_refs(folio, 2); } static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) { struct bio *new; new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO); bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); bio_chain(new, prev); submit_bio(prev); return new; } /** * gfs2_find_jhead - find the head of a log * @jd: The journal descriptor * @head: The log descriptor for the head of the log is returned here * @keep_cache: If set inode pages will not be truncated * * Do a search of a journal by reading it in large chunks using bios and find * the valid log entry with the highest sequence number. (i.e. the log head) * * Returns: 0 on success, errno otherwise */ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, bool keep_cache) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct address_space *mapping = jd->jd_inode->i_mapping; unsigned int block = 0, blocks_submitted = 0, blocks_read = 0; unsigned int bsize = sdp->sd_sb.sb_bsize, off; unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; unsigned int shift = PAGE_SHIFT - bsize_shift; unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; struct gfs2_journal_extent *je; int ret = 0; struct bio *bio = NULL; struct folio *folio = NULL; bool done = false; errseq_t since; memset(head, 0, sizeof(*head)); if (list_empty(&jd->extent_list)) gfs2_map_journal_extents(sdp, jd); since = filemap_sample_wb_err(mapping); list_for_each_entry(je, &jd->extent_list, list) { u64 dblock = je->dblock; for (; block < je->lblock + je->blocks; block++, dblock++) { if (!folio) { folio = filemap_grab_folio(mapping, block >> shift); if (IS_ERR(folio)) { ret = PTR_ERR(folio); done = true; goto out; } off = 0; } if (bio && (off || block < blocks_submitted + max_blocks)) { sector_t sector = dblock << sdp->sd_fsb2bb_shift; if (bio_end_sector(bio) == sector) { if (bio_add_folio(bio, folio, bsize, off)) goto block_added; } if (off) { unsigned int blocks = (PAGE_SIZE - off) >> bsize_shift; bio = gfs2_chain_bio(bio, blocks); goto add_block_to_new_bio; } } if (bio) { blocks_submitted = block; submit_bio(bio); } bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); bio->bi_opf = REQ_OP_READ; add_block_to_new_bio: if (!bio_add_folio(bio, folio, bsize, off)) BUG(); block_added: off += bsize; if (off == folio_size(folio)) folio = NULL; if (blocks_submitted <= blocks_read + max_blocks) { /* Keep at least one bio in flight */ continue; } gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); blocks_read += PAGE_SIZE >> bsize_shift; if (done) goto out; /* found */ } } out: if (bio) submit_bio(bio); while (blocks_read < block) { gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); blocks_read += PAGE_SIZE >> bsize_shift; } if (!ret) ret = filemap_check_wb_err(mapping, since); if (!keep_cache) truncate_inode_pages(mapping, 0); return ret; } static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, u32 ld_length, u32 ld_data1) { struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); struct gfs2_log_descriptor *ld = page_address(page); clear_page(ld); ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); ld->ld_type = cpu_to_be32(ld_type); ld->ld_length = cpu_to_be32(ld_length); ld->ld_data1 = cpu_to_be32(ld_data1); ld->ld_data2 = 0; return page; } static void gfs2_check_magic(struct buffer_head *bh) { __be32 *ptr; clear_buffer_escaped(bh); ptr = kmap_local_folio(bh->b_folio, bh_offset(bh)); if (*ptr == cpu_to_be32(GFS2_MAGIC)) set_buffer_escaped(bh); kunmap_local(ptr); } static int blocknr_cmp(void *priv, const struct list_head *a, const struct list_head *b) { struct gfs2_bufdata *bda, *bdb; bda = list_entry(a, struct gfs2_bufdata, bd_list); bdb = list_entry(b, struct gfs2_bufdata, bd_list); if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) return -1; if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) return 1; return 0; } static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, unsigned int total, struct list_head *blist, bool is_databuf) { struct gfs2_log_descriptor *ld; struct gfs2_bufdata *bd1 = NULL, *bd2; struct page *page; unsigned int num; unsigned n; __be64 *ptr; gfs2_log_lock(sdp); list_sort(NULL, blist, blocknr_cmp); bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); while(total) { num = total; if (total > limit) num = limit; gfs2_log_unlock(sdp); page = gfs2_get_log_desc(sdp, is_databuf ? GFS2_LOG_DESC_JDATA : GFS2_LOG_DESC_METADATA, num + 1, num); ld = page_address(page); gfs2_log_lock(sdp); ptr = (__be64 *)(ld + 1); n = 0; list_for_each_entry_continue(bd1, blist, bd_list) { *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); if (is_databuf) { gfs2_check_magic(bd1->bd_bh); *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); } if (++n >= num) break; } gfs2_log_unlock(sdp); gfs2_log_write_page(sdp, page); gfs2_log_lock(sdp); n = 0; list_for_each_entry_continue(bd2, blist, bd_list) { get_bh(bd2->bd_bh); gfs2_log_unlock(sdp); lock_buffer(bd2->bd_bh); if (buffer_escaped(bd2->bd_bh)) { void *p; page = mempool_alloc(gfs2_page_pool, GFP_NOIO); p = page_address(page); memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size); *(__be32 *)p = 0; clear_buffer_escaped(bd2->bd_bh); unlock_buffer(bd2->bd_bh); brelse(bd2->bd_bh); gfs2_log_write_page(sdp, page); } else { gfs2_log_write_bh(sdp, bd2->bd_bh); } gfs2_log_lock(sdp); if (++n >= num) break; } BUG_ON(total < num); total -= num; } gfs2_log_unlock(sdp); } static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ unsigned int nbuf; if (tr == NULL) return; nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0); } static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct list_head *head; struct gfs2_bufdata *bd; if (tr == NULL) return; head = &tr->tr_buf; while (!list_empty(head)) { bd = list_first_entry(head, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gfs2_unpin(sdp, bd->bd_bh, tr); } } static void buf_lo_before_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass) { if (pass != 0) return; jd->jd_found_blocks = 0; jd->jd_replayed_blocks = 0; } #define obsolete_rgrp_replay \ "Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n" #define obsolete_rgrp_replay2 \ "busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n" static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log, u64 blkno) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_rgrpd *rgd; struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data; rgd = gfs2_blk2rgrpd(sdp, blkno, false); if (rgd && rgd->rd_addr == blkno && rgd->rd_bits && rgd->rd_bits->bi_bh) { fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno, jd->jd_jid, bh_log->b_blocknr); fs_info(sdp, obsolete_rgrp_replay2, buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0, buffer_pinned(rgd->rd_bits->bi_bh), rgd->rd_igeneration, be64_to_cpu(jrgd->rg_igeneration)); gfs2_dump_glock(NULL, rgd->rd_gl, true); } } static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_glock *gl = ip->i_gl; unsigned int blks = be32_to_cpu(ld->ld_data1); struct buffer_head *bh_log, *bh_ip; u64 blkno; int error = 0; if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) return 0; gfs2_replay_incr_blk(jd, &start); for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { blkno = be64_to_cpu(*ptr++); jd->jd_found_blocks++; if (gfs2_revoke_check(jd, blkno, start)) continue; error = gfs2_replay_read_block(jd, start, &bh_log); if (error) return error; bh_ip = gfs2_meta_new(gl, blkno); memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); if (gfs2_meta_check(sdp, bh_ip)) error = -EIO; else { struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh_ip->b_data; if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) obsolete_rgrp(jd, bh_log, blkno); mark_buffer_dirty(bh_ip); } brelse(bh_log); brelse(bh_ip); if (error) break; jd->jd_replayed_blocks++; } return error; } static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_inode_metasync(ip->i_gl); return; } if (pass != 1) return; gfs2_inode_metasync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); } static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct gfs2_meta_header *mh; unsigned int offset; struct list_head *head = &sdp->sd_log_revokes; struct gfs2_bufdata *bd; struct page *page; unsigned int length; gfs2_flush_revokes(sdp); if (!sdp->sd_log_num_revoke) return; length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke); page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); offset = sizeof(struct gfs2_log_descriptor); list_for_each_entry(bd, head, bd_list) { sdp->sd_log_num_revoke--; if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { gfs2_log_write_page(sdp, page); page = mempool_alloc(gfs2_page_pool, GFP_NOIO); mh = page_address(page); clear_page(mh); mh->mh_magic = cpu_to_be32(GFS2_MAGIC); mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); offset = sizeof(struct gfs2_meta_header); } *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); offset += sizeof(u64); } gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); gfs2_log_write_page(sdp, page); } void gfs2_drain_revokes(struct gfs2_sbd *sdp) { struct list_head *head = &sdp->sd_log_revokes; struct gfs2_bufdata *bd; struct gfs2_glock *gl; while (!list_empty(head)) { bd = list_first_entry(head, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gl = bd->bd_gl; gfs2_glock_remove_revoke(gl); kmem_cache_free(gfs2_bufdata_cachep, bd); } } static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { gfs2_drain_revokes(sdp); } static void revoke_lo_before_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass) { if (pass != 0) return; jd->jd_found_revokes = 0; jd->jd_replay_tail = head->lh_tail; } static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); unsigned int blks = be32_to_cpu(ld->ld_length); unsigned int revokes = be32_to_cpu(ld->ld_data1); struct buffer_head *bh; unsigned int offset; u64 blkno; int first = 1; int error; if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) return 0; offset = sizeof(struct gfs2_log_descriptor); for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { error = gfs2_replay_read_block(jd, start, &bh); if (error) return error; if (!first) gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); error = gfs2_revoke_add(jd, blkno, start); if (error < 0) { brelse(bh); return error; } else if (error) jd->jd_found_revokes++; if (!--revokes) break; offset += sizeof(u64); } brelse(bh); offset = sizeof(struct gfs2_meta_header); first = 0; } return 0; } static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_revoke_clean(jd); return; } if (pass != 1) return; fs_info(sdp, "jid=%u: Found %u revoke tags\n", jd->jd_jid, jd->jd_found_revokes); gfs2_revoke_clean(jd); } /** * databuf_lo_before_commit - Scan the data buffers, writing as we go * @sdp: The filesystem * @tr: The system transaction being flushed */ static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { unsigned int limit = databuf_limit(sdp); unsigned int nbuf; if (tr == NULL) return; nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1); } static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_glock *gl = ip->i_gl; unsigned int blks = be32_to_cpu(ld->ld_data1); struct buffer_head *bh_log, *bh_ip; u64 blkno; u64 esc; int error = 0; if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) return 0; gfs2_replay_incr_blk(jd, &start); for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { blkno = be64_to_cpu(*ptr++); esc = be64_to_cpu(*ptr++); jd->jd_found_blocks++; if (gfs2_revoke_check(jd, blkno, start)) continue; error = gfs2_replay_read_block(jd, start, &bh_log); if (error) return error; bh_ip = gfs2_meta_new(gl, blkno); memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); /* Unescape */ if (esc) { __be32 *eptr = (__be32 *)bh_ip->b_data; *eptr = cpu_to_be32(GFS2_MAGIC); } mark_buffer_dirty(bh_ip); brelse(bh_log); brelse(bh_ip); jd->jd_replayed_blocks++; } return error; } /* FIXME: sort out accounting for log blocks etc. */ static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_inode_metasync(ip->i_gl); return; } if (pass != 1) return; /* data sync? */ gfs2_inode_metasync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); } static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct list_head *head; struct gfs2_bufdata *bd; if (tr == NULL) return; head = &tr->tr_databuf; while (!list_empty(head)) { bd = list_first_entry(head, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gfs2_unpin(sdp, bd->bd_bh, tr); } } static const struct gfs2_log_operations gfs2_buf_lops = { .lo_before_commit = buf_lo_before_commit, .lo_after_commit = buf_lo_after_commit, .lo_before_scan = buf_lo_before_scan, .lo_scan_elements = buf_lo_scan_elements, .lo_after_scan = buf_lo_after_scan, .lo_name = "buf", }; static const struct gfs2_log_operations gfs2_revoke_lops = { .lo_before_commit = revoke_lo_before_commit, .lo_after_commit = revoke_lo_after_commit, .lo_before_scan = revoke_lo_before_scan, .lo_scan_elements = revoke_lo_scan_elements, .lo_after_scan = revoke_lo_after_scan, .lo_name = "revoke", }; static const struct gfs2_log_operations gfs2_databuf_lops = { .lo_before_commit = databuf_lo_before_commit, .lo_after_commit = databuf_lo_after_commit, .lo_scan_elements = databuf_lo_scan_elements, .lo_after_scan = databuf_lo_after_scan, .lo_name = "databuf", }; const struct gfs2_log_operations *gfs2_log_ops[] = { &gfs2_databuf_lops, &gfs2_buf_lops, &gfs2_revoke_lops, NULL, };
2 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 // SPDX-License-Identifier: GPL-2.0-only #include "netlink.h" #include "common.h" #include "bitset.h" struct eee_req_info { struct ethnl_req_info base; }; struct eee_reply_data { struct ethnl_reply_data base; struct ethtool_keee eee; }; #define EEE_REPDATA(__reply_base) \ container_of(__reply_base, struct eee_reply_data, base) const struct nla_policy ethnl_eee_get_policy[] = { [ETHTOOL_A_EEE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), }; static int eee_prepare_data(const struct ethnl_req_info *req_base, struct ethnl_reply_data *reply_base, const struct genl_info *info) { struct eee_reply_data *data = EEE_REPDATA(reply_base); struct net_device *dev = reply_base->dev; struct ethtool_keee *eee = &data->eee; int ret; if (!dev->ethtool_ops->get_eee) return -EOPNOTSUPP; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; ret = dev->ethtool_ops->get_eee(dev, eee); ethnl_ops_complete(dev); return ret; } static int eee_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct eee_reply_data *data = EEE_REPDATA(reply_base); const struct ethtool_keee *eee = &data->eee; int len = 0; int ret; /* MODES_OURS */ ret = ethnl_bitset_size(eee->advertised, eee->supported, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; len += ret; /* MODES_PEERS */ ret = ethnl_bitset_size(eee->lp_advertised, NULL, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; len += ret; len += nla_total_size(sizeof(u8)) + /* _EEE_ACTIVE */ nla_total_size(sizeof(u8)) + /* _EEE_ENABLED */ nla_total_size(sizeof(u8)) + /* _EEE_TX_LPI_ENABLED */ nla_total_size(sizeof(u32)); /* _EEE_TX_LPI_TIMER */ return len; } static int eee_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct eee_reply_data *data = EEE_REPDATA(reply_base); const struct ethtool_keee *eee = &data->eee; int ret; ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_OURS, eee->advertised, eee->supported, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_PEER, eee->lp_advertised, NULL, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, eee->eee_active) || nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, eee->eee_enabled) || nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED, eee->tx_lpi_enabled) || nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer)) return -EMSGSIZE; return 0; } /* EEE_SET */ const struct nla_policy ethnl_eee_set_policy[] = { [ETHTOOL_A_EEE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), [ETHTOOL_A_EEE_MODES_OURS] = { .type = NLA_NESTED }, [ETHTOOL_A_EEE_ENABLED] = { .type = NLA_U8 }, [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .type = NLA_U8 }, [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_U32 }, }; static int ethnl_set_eee_validate(struct ethnl_req_info *req_info, struct genl_info *info) { const struct ethtool_ops *ops = req_info->dev->ethtool_ops; return ops->get_eee && ops->set_eee ? 1 : -EOPNOTSUPP; } static int ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info) { struct net_device *dev = req_info->dev; struct nlattr **tb = info->attrs; struct ethtool_keee eee = {}; bool mod = false; int ret; ret = dev->ethtool_ops->get_eee(dev, &eee); if (ret < 0) return ret; ret = ethnl_update_bitset(eee.advertised, __ETHTOOL_LINK_MODE_MASK_NBITS, tb[ETHTOOL_A_EEE_MODES_OURS], link_mode_names, info->extack, &mod); if (ret < 0) return ret; ethnl_update_bool(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod); ethnl_update_bool(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod); ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER], &mod); if (!mod) return 0; ret = dev->ethtool_ops->set_eee(dev, &eee); return ret < 0 ? ret : 1; } const struct ethnl_request_ops ethnl_eee_request_ops = { .request_cmd = ETHTOOL_MSG_EEE_GET, .reply_cmd = ETHTOOL_MSG_EEE_GET_REPLY, .hdr_attr = ETHTOOL_A_EEE_HEADER, .req_info_size = sizeof(struct eee_req_info), .reply_data_size = sizeof(struct eee_reply_data), .prepare_data = eee_prepare_data, .reply_size = eee_reply_size, .fill_reply = eee_fill_reply, .set_validate = ethnl_set_eee_validate, .set = ethnl_set_eee, .set_ntf_cmd = ETHTOOL_MSG_EEE_NTF, };
5588 5598 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2020 Google LLC. */ #ifndef _LINUX_BPF_LSM_H #define _LINUX_BPF_LSM_H #include <linux/sched.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/lsm_hooks.h> #ifdef CONFIG_BPF_LSM #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ RET bpf_lsm_##NAME(__VA_ARGS__); #include <linux/lsm_hook_defs.h> #undef LSM_HOOK struct bpf_storage_blob { struct bpf_local_storage __rcu *storage; }; extern struct lsm_blob_sizes bpf_lsm_blob_sizes; int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog); bool bpf_lsm_is_sleepable_hook(u32 btf_id); bool bpf_lsm_is_trusted(const struct bpf_prog *prog); static inline struct bpf_storage_blob *bpf_inode( const struct inode *inode) { if (unlikely(!inode->i_security)) return NULL; return inode->i_security + bpf_lsm_blob_sizes.lbs_inode; } extern const struct bpf_func_proto bpf_inode_storage_get_proto; extern const struct bpf_func_proto bpf_inode_storage_delete_proto; void bpf_inode_storage_free(struct inode *inode); void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func); int bpf_lsm_get_retval_range(const struct bpf_prog *prog, struct bpf_retval_range *range); int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str, const struct bpf_dynptr *value_p, int flags); int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str); bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog); #else /* !CONFIG_BPF_LSM */ static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) { return false; } static inline bool bpf_lsm_is_trusted(const struct bpf_prog *prog) { return false; } static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog) { return -EOPNOTSUPP; } static inline struct bpf_storage_blob *bpf_inode( const struct inode *inode) { return NULL; } static inline void bpf_inode_storage_free(struct inode *inode) { } static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func) { } static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog, struct bpf_retval_range *range) { return -EOPNOTSUPP; } static inline int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str, const struct bpf_dynptr *value_p, int flags) { return -EOPNOTSUPP; } static inline int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str) { return -EOPNOTSUPP; } static inline bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog) { return false; } #endif /* CONFIG_BPF_LSM */ #endif /* _LINUX_BPF_LSM_H */
27 2 28 2 31 30 31 31 31 30 31 31 31 31 31 31 16 16 16 16 16 16 2 2 2 2 2 2 2 28 27 28 28 28 27 27 28 16 16 16 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 /* * Non-physical true random number generator based on timing jitter -- * Linux Kernel Crypto API specific code * * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2023 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * ALTERNATIVELY, this product may be distributed under the terms of * the GNU General Public License, in which case the provisions of the GPL2 are * required INSTEAD OF the above restrictions. (This clause is * necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include <crypto/hash.h> #include <crypto/sha3.h> #include <linux/fips.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/time.h> #include <crypto/internal/rng.h> #include "jitterentropy.h" #define JENT_CONDITIONING_HASH "sha3-256-generic" /*************************************************************************** * Helper function ***************************************************************************/ void *jent_kvzalloc(unsigned int len) { return kvzalloc(len, GFP_KERNEL); } void jent_kvzfree(void *ptr, unsigned int len) { kvfree_sensitive(ptr, len); } void *jent_zalloc(unsigned int len) { return kzalloc(len, GFP_KERNEL); } void jent_zfree(void *ptr) { kfree_sensitive(ptr); } /* * Obtain a high-resolution time stamp value. The time stamp is used to measure * the execution time of a given code path and its variations. Hence, the time * stamp must have a sufficiently high resolution. * * Note, if the function returns zero because a given architecture does not * implement a high-resolution time stamp, the RNG code's runtime test * will detect it and will not produce output. */ void jent_get_nstime(__u64 *out) { __u64 tmp = 0; tmp = random_get_entropy(); /* * If random_get_entropy does not return a value, i.e. it is not * implemented for a given architecture, use a clock source. * hoping that there are timers we can work with. */ if (tmp == 0) tmp = ktime_get_ns(); *out = tmp; jent_raw_hires_entropy_store(tmp); } int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, unsigned int addtl_len, __u64 hash_loop_cnt, unsigned int stuck) { struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm); u8 intermediary[SHA3_256_DIGEST_SIZE]; __u64 j = 0; int ret; desc->tfm = hash_state_desc->tfm; if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) { pr_warn_ratelimited("Unexpected digest size\n"); return -EINVAL; } /* * This loop fills a buffer which is injected into the entropy pool. * The main reason for this loop is to execute something over which we * can perform a timing measurement. The injection of the resulting * data into the pool is performed to ensure the result is used and * the compiler cannot optimize the loop away in case the result is not * used at all. Yet that data is considered "additional information" * considering the terminology from SP800-90A without any entropy. * * Note, it does not matter which or how much data you inject, we are * interested in one Keccack1600 compression operation performed with * the crypto_shash_final. */ for (j = 0; j < hash_loop_cnt; j++) { ret = crypto_shash_init(desc) ?: crypto_shash_update(desc, intermediary, sizeof(intermediary)) ?: crypto_shash_finup(desc, addtl, addtl_len, intermediary); if (ret) goto err; } /* * Inject the data from the previous loop into the pool. This data is * not considered to contain any entropy, but it stirs the pool a bit. */ ret = crypto_shash_update(desc, intermediary, sizeof(intermediary)); if (ret) goto err; /* * Insert the time stamp into the hash context representing the pool. * * If the time stamp is stuck, do not finally insert the value into the * entropy pool. Although this operation should not do any harm even * when the time stamp has no entropy, SP800-90B requires that any * conditioning operation to have an identical amount of input data * according to section 3.1.5. */ if (!stuck) { ret = crypto_shash_update(hash_state_desc, (u8 *)&time, sizeof(__u64)); } err: shash_desc_zero(desc); memzero_explicit(intermediary, sizeof(intermediary)); return ret; } int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len) { struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state; u8 jent_block[SHA3_256_DIGEST_SIZE]; /* Obtain data from entropy pool and re-initialize it */ int ret = crypto_shash_final(hash_state_desc, jent_block) ?: crypto_shash_init(hash_state_desc) ?: crypto_shash_update(hash_state_desc, jent_block, sizeof(jent_block)); if (!ret && dst_len) memcpy(dst, jent_block, dst_len); memzero_explicit(jent_block, sizeof(jent_block)); return ret; } /*************************************************************************** * Kernel crypto API interface ***************************************************************************/ struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; struct crypto_shash *tfm; struct shash_desc *sdesc; }; static void jent_kcapi_cleanup(struct crypto_tfm *tfm) { struct jitterentropy *rng = crypto_tfm_ctx(tfm); spin_lock(&rng->jent_lock); if (rng->sdesc) { shash_desc_zero(rng->sdesc); kfree(rng->sdesc); } rng->sdesc = NULL; if (rng->tfm) crypto_free_shash(rng->tfm); rng->tfm = NULL; if (rng->entropy_collector) jent_entropy_collector_free(rng->entropy_collector); rng->entropy_collector = NULL; spin_unlock(&rng->jent_lock); } static int jent_kcapi_init(struct crypto_tfm *tfm) { struct jitterentropy *rng = crypto_tfm_ctx(tfm); struct crypto_shash *hash; struct shash_desc *sdesc; int size, ret = 0; spin_lock_init(&rng->jent_lock); /* * Use SHA3-256 as conditioner. We allocate only the generic * implementation as we are not interested in high-performance. The * execution time of the SHA3 operation is measured and adds to the * Jitter RNG's unpredictable behavior. If we have a slower hash * implementation, the execution timing variations are larger. When * using a fast implementation, we would need to call it more often * as its variations are lower. */ hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); if (IS_ERR(hash)) { pr_err("Cannot allocate conditioning digest\n"); return PTR_ERR(hash); } rng->tfm = hash; size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) { ret = -ENOMEM; goto err; } sdesc->tfm = hash; crypto_shash_init(sdesc); rng->sdesc = sdesc; rng->entropy_collector = jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, sdesc); if (!rng->entropy_collector) { ret = -ENOMEM; goto err; } spin_lock_init(&rng->jent_lock); return 0; err: jent_kcapi_cleanup(tfm); return ret; } static int jent_kcapi_random(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *rdata, unsigned int dlen) { struct jitterentropy *rng = crypto_rng_ctx(tfm); int ret = 0; spin_lock(&rng->jent_lock); ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); if (ret == -3) { /* Handle permanent health test error */ /* * If the kernel was booted with fips=1, it implies that * the entire kernel acts as a FIPS 140 module. In this case * an SP800-90B permanent health test error is treated as * a FIPS module error. */ if (fips_enabled) panic("Jitter RNG permanent health test failure\n"); pr_err("Jitter RNG permanent health test failure\n"); ret = -EFAULT; } else if (ret == -2) { /* Handle intermittent health test error */ pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n"); ret = -EAGAIN; } else if (ret == -1) { /* Handle other errors */ ret = -EINVAL; } spin_unlock(&rng->jent_lock); return ret; } static int jent_kcapi_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { return 0; } static struct rng_alg jent_alg = { .generate = jent_kcapi_random, .seed = jent_kcapi_reset, .seedsize = 0, .base = { .cra_name = "jitterentropy_rng", .cra_driver_name = "jitterentropy_rng", .cra_priority = 100, .cra_ctxsize = sizeof(struct jitterentropy), .cra_module = THIS_MODULE, .cra_init = jent_kcapi_init, .cra_exit = jent_kcapi_cleanup, } }; static int __init jent_mod_init(void) { SHASH_DESC_ON_STACK(desc, tfm); struct crypto_shash *tfm; int ret = 0; jent_testing_init(); tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0); if (IS_ERR(tfm)) { jent_testing_exit(); return PTR_ERR(tfm); } desc->tfm = tfm; crypto_shash_init(desc); ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL); shash_desc_zero(desc); crypto_free_shash(tfm); if (ret) { /* Handle permanent health test error */ if (fips_enabled) panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); jent_testing_exit(); pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret); return -EFAULT; } return crypto_register_rng(&jent_alg); } static void __exit jent_mod_exit(void) { jent_testing_exit(); crypto_unregister_rng(&jent_alg); } module_init(jent_mod_init); module_exit(jent_mod_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); MODULE_DESCRIPTION("Non-physical True Random Number Generator based on CPU Jitter"); MODULE_ALIAS_CRYPTO("jitterentropy_rng");
17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 16 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 // SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/sched.c * * Scheduling for synchronous and asynchronous RPC requests. * * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> * * TCP NFS related read + write fixes * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/mempool.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <linux/sched/mm.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/metrics.h> #include "sunrpc.h" #define CREATE_TRACE_POINTS #include <trace/events/sunrpc.h> /* * RPC slabs and memory pools */ #define RPC_BUFFER_MAXSIZE (2048) #define RPC_BUFFER_POOLSIZE (8) #define RPC_TASK_POOLSIZE (8) static struct kmem_cache *rpc_task_slabp __read_mostly; static struct kmem_cache *rpc_buffer_slabp __read_mostly; static mempool_t *rpc_task_mempool __read_mostly; static mempool_t *rpc_buffer_mempool __read_mostly; static void rpc_async_schedule(struct work_struct *); static void rpc_release_task(struct rpc_task *task); static void __rpc_queue_timer_fn(struct work_struct *); /* * RPC tasks sit here while waiting for conditions to improve. */ static struct rpc_wait_queue delay_queue; /* * rpciod-related stuff */ struct workqueue_struct *rpciod_workqueue __read_mostly; struct workqueue_struct *xprtiod_workqueue __read_mostly; EXPORT_SYMBOL_GPL(xprtiod_workqueue); gfp_t rpc_task_gfp_mask(void) { if (current->flags & PF_WQ_WORKER) return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; return GFP_KERNEL; } EXPORT_SYMBOL_GPL(rpc_task_gfp_mask); bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status) { if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0) return true; return false; } unsigned long rpc_task_timeout(const struct rpc_task *task) { unsigned long timeout = READ_ONCE(task->tk_timeout); if (timeout != 0) { unsigned long now = jiffies; if (time_before(now, timeout)) return timeout - now; } return 0; } EXPORT_SYMBOL_GPL(rpc_task_timeout); /* * Disable the timer for a given RPC task. Should be called with * queue->lock and bh_disabled in order to avoid races within * rpc_run_timer(). */ static void __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) { if (list_empty(&task->u.tk_wait.timer_list)) return; task->tk_timeout = 0; list_del(&task->u.tk_wait.timer_list); if (list_empty(&queue->timer_list.list)) cancel_delayed_work(&queue->timer_list.dwork); } static void rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) { unsigned long now = jiffies; queue->timer_list.expires = expires; if (time_before_eq(expires, now)) expires = 0; else expires -= now; mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); } /* * Set up a timer for the current task. */ static void __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned long timeout) { task->tk_timeout = timeout; if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) rpc_set_queue_timer(queue, timeout); list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); } static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) { if (queue->priority != priority) { queue->priority = priority; queue->nr = 1U << priority; } } static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) { rpc_set_waitqueue_priority(queue, queue->maxpriority); } /* * Add a request to a queue list */ static void __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) { struct rpc_task *t; list_for_each_entry(t, q, u.tk_wait.list) { if (t->tk_owner == task->tk_owner) { list_add_tail(&task->u.tk_wait.links, &t->u.tk_wait.links); /* Cache the queue head in task->u.tk_wait.list */ task->u.tk_wait.list.next = q; task->u.tk_wait.list.prev = NULL; return; } } INIT_LIST_HEAD(&task->u.tk_wait.links); list_add_tail(&task->u.tk_wait.list, q); } /* * Remove request from a queue list */ static void __rpc_list_dequeue_task(struct rpc_task *task) { struct list_head *q; struct rpc_task *t; if (task->u.tk_wait.list.prev == NULL) { list_del(&task->u.tk_wait.links); return; } if (!list_empty(&task->u.tk_wait.links)) { t = list_first_entry(&task->u.tk_wait.links, struct rpc_task, u.tk_wait.links); /* Assume __rpc_list_enqueue_task() cached the queue head */ q = t->u.tk_wait.list.next; list_add_tail(&t->u.tk_wait.list, q); list_del(&task->u.tk_wait.links); } list_del(&task->u.tk_wait.list); } /* * Add new request to a priority queue. */ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) { if (unlikely(queue_priority > queue->maxpriority)) queue_priority = queue->maxpriority; __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); } /* * Add new request to wait queue. */ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) { INIT_LIST_HEAD(&task->u.tk_wait.timer_list); if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task, queue_priority); else list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->tk_waitqueue = queue; queue->qlen++; /* barrier matches the read in rpc_wake_up_task_queue_locked() */ smp_wmb(); rpc_set_queued(task); } /* * Remove request from a priority queue. */ static void __rpc_remove_wait_queue_priority(struct rpc_task *task) { __rpc_list_dequeue_task(task); } /* * Remove request from queue. * Note: must be called with spin lock held. */ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) { __rpc_disable_timer(queue, task); if (RPC_IS_PRIORITY(queue)) __rpc_remove_wait_queue_priority(task); else list_del(&task->u.tk_wait.list); queue->qlen--; } static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) { int i; spin_lock_init(&queue->lock); for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) INIT_LIST_HEAD(&queue->tasks[i]); queue->maxpriority = nr_queues - 1; rpc_reset_waitqueue_priority(queue); queue->qlen = 0; queue->timer_list.expires = 0; INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); INIT_LIST_HEAD(&queue->timer_list.list); rpc_assign_waitqueue_name(queue, qname); } void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) { __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); } EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) { __rpc_init_priority_wait_queue(queue, qname, 1); } EXPORT_SYMBOL_GPL(rpc_init_wait_queue); void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) { cancel_delayed_work_sync(&queue->timer_list.dwork); } EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) { if (unlikely(current->flags & PF_EXITING)) return -EINTR; schedule(); if (signal_pending_state(mode, current)) return -ERESTARTSYS; return 0; } #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) static void rpc_task_set_debuginfo(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; /* Might be a task carrying a reverse-direction operation */ if (!clnt) { static atomic_t rpc_pid; task->tk_pid = atomic_inc_return(&rpc_pid); return; } task->tk_pid = atomic_inc_return(&clnt->cl_pid); } #else static inline void rpc_task_set_debuginfo(struct rpc_task *task) { } #endif static void rpc_set_active(struct rpc_task *task) { rpc_task_set_debuginfo(task); set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); trace_rpc_task_begin(task, NULL); } /* * Mark an RPC call as having completed by clearing the 'active' bit * and then waking up all tasks that were sleeping. */ static int rpc_complete_task(struct rpc_task *task) { void *m = &task->tk_runstate; wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); unsigned long flags; int ret; trace_rpc_task_complete(task, NULL); spin_lock_irqsave(&wq->lock, flags); clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); ret = atomic_dec_and_test(&task->tk_count); if (waitqueue_active(wq)) __wake_up_locked_key(wq, TASK_NORMAL, &k); spin_unlock_irqrestore(&wq->lock, flags); return ret; } /* * Allow callers to wait for completion of an RPC call * * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() * to enforce taking of the wq->lock and hence avoid races with * rpc_complete_task(). */ int rpc_wait_for_completion_task(struct rpc_task *task) { return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); } EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task); /* * Make an RPC task runnable. * * Note: If the task is ASYNC, and is being made runnable after sitting on an * rpc_wait_queue, this must be called with the queue spinlock held to protect * the wait queue operation. * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), * which is needed to ensure that __rpc_execute() doesn't loop (due to the * lockless RPC_IS_QUEUED() test) before we've had a chance to test * the RPC_TASK_RUNNING flag. */ static void rpc_make_runnable(struct workqueue_struct *wq, struct rpc_task *task) { bool need_wakeup = !rpc_test_and_set_running(task); rpc_clear_queued(task); if (!need_wakeup) return; if (RPC_IS_ASYNC(task)) { INIT_WORK(&task->u.tk_work, rpc_async_schedule); queue_work(wq, &task->u.tk_work); } else { smp_mb__after_atomic(); wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); } } /* * Prepare for sleeping on a wait queue. * By always appending tasks to the list we ensure FIFO behavior. * NB: An RPC task will only receive interrupt-driven events as long * as it's on a wait queue. */ static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, unsigned char queue_priority) { trace_rpc_task_sleep(task, q); __rpc_add_wait_queue(q, task, queue_priority); } static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, unsigned char queue_priority) { if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) return; __rpc_do_sleep_on_priority(q, task, queue_priority); } static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, struct rpc_task *task, unsigned long timeout, unsigned char queue_priority) { if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) return; if (time_is_after_jiffies(timeout)) { __rpc_do_sleep_on_priority(q, task, queue_priority); __rpc_add_timer(q, task, timeout); } else task->tk_status = -ETIMEDOUT; } static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) { if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) task->tk_callback = action; } static bool rpc_sleep_check_activated(struct rpc_task *task) { /* We shouldn't ever put an inactive task to sleep */ if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { task->tk_status = -EIO; rpc_put_task_async(task); return false; } return true; } void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, unsigned long timeout) { if (!rpc_sleep_check_activated(task)) return; rpc_set_tk_callback(task, action); /* * Protect the queue operations. */ spin_lock(&q->lock); __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); spin_unlock(&q->lock); } EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action) { if (!rpc_sleep_check_activated(task)) return; rpc_set_tk_callback(task, action); WARN_ON_ONCE(task->tk_timeout != 0); /* * Protect the queue operations. */ spin_lock(&q->lock); __rpc_sleep_on_priority(q, task, task->tk_priority); spin_unlock(&q->lock); } EXPORT_SYMBOL_GPL(rpc_sleep_on); void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, struct rpc_task *task, unsigned long timeout, int priority) { if (!rpc_sleep_check_activated(task)) return; priority -= RPC_PRIORITY_LOW; /* * Protect the queue operations. */ spin_lock(&q->lock); __rpc_sleep_on_priority_timeout(q, task, timeout, priority); spin_unlock(&q->lock); } EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, int priority) { if (!rpc_sleep_check_activated(task)) return; WARN_ON_ONCE(task->tk_timeout != 0); priority -= RPC_PRIORITY_LOW; /* * Protect the queue operations. */ spin_lock(&q->lock); __rpc_sleep_on_priority(q, task, priority); spin_unlock(&q->lock); } EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); /** * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task * @wq: workqueue on which to run task * @queue: wait queue * @task: task to be woken up * * Caller must hold queue->lock, and have cleared the task queued flag. */ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, struct rpc_wait_queue *queue, struct rpc_task *task) { /* Has the task been executed yet? If not, we cannot wake it up! */ if (!RPC_IS_ACTIVATED(task)) { printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); return; } trace_rpc_task_wakeup(task, queue); __rpc_remove_wait_queue(queue, task); rpc_make_runnable(wq, task); } /* * Wake up a queued task while the queue lock is being held */ static struct rpc_task * rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, struct rpc_wait_queue *queue, struct rpc_task *task, bool (*action)(struct rpc_task *, void *), void *data) { if (RPC_IS_QUEUED(task)) { smp_rmb(); if (task->tk_waitqueue == queue) { if (action == NULL || action(task, data)) { __rpc_do_wake_up_task_on_wq(wq, queue, task); return task; } } } return NULL; } /* * Wake up a queued task while the queue lock is being held */ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) { rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, task, NULL, NULL); } /* * Wake up a task on a specific queue */ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) { if (!RPC_IS_QUEUED(task)) return; spin_lock(&queue->lock); rpc_wake_up_task_queue_locked(queue, task); spin_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); static bool rpc_task_action_set_status(struct rpc_task *task, void *status) { task->tk_status = *(int *)status; return true; } static void rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, struct rpc_task *task, int status) { rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, task, rpc_task_action_set_status, &status); } /** * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status * @queue: pointer to rpc_wait_queue * @task: pointer to rpc_task * @status: integer error value * * If @task is queued on @queue, then it is woken up, and @task->tk_status is * set to the value of @status. */ void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, struct rpc_task *task, int status) { if (!RPC_IS_QUEUED(task)) return; spin_lock(&queue->lock); rpc_wake_up_task_queue_set_status_locked(queue, task, status); spin_unlock(&queue->lock); } /* * Wake up the next task on a priority queue. */ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) { struct list_head *q; struct rpc_task *task; /* * Service the privileged queue. */ q = &queue->tasks[RPC_NR_PRIORITY - 1]; if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) { task = list_first_entry(q, struct rpc_task, u.tk_wait.list); goto out; } /* * Service a batch of tasks from a single owner. */ q = &queue->tasks[queue->priority]; if (!list_empty(q) && queue->nr) { queue->nr--; task = list_first_entry(q, struct rpc_task, u.tk_wait.list); goto out; } /* * Service the next queue. */ do { if (q == &queue->tasks[0]) q = &queue->tasks[queue->maxpriority]; else q = q - 1; if (!list_empty(q)) { task = list_first_entry(q, struct rpc_task, u.tk_wait.list); goto new_queue; } } while (q != &queue->tasks[queue->priority]); rpc_reset_waitqueue_priority(queue); return NULL; new_queue: rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); out: return task; } static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) { if (RPC_IS_PRIORITY(queue)) return __rpc_find_next_queued_priority(queue); if (!list_empty(&queue->tasks[0])) return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); return NULL; } /* * Wake up the first task on the wait queue. */ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, struct rpc_wait_queue *queue, bool (*func)(struct rpc_task *, void *), void *data) { struct rpc_task *task = NULL; spin_lock(&queue->lock); task = __rpc_find_next_queued(queue); if (task != NULL) task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, func, data); spin_unlock(&queue->lock); return task; } /* * Wake up the first task on the wait queue. */ struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, bool (*func)(struct rpc_task *, void *), void *data) { return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); } EXPORT_SYMBOL_GPL(rpc_wake_up_first); static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) { return true; } /* * Wake up the next task on the wait queue. */ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) { return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); } EXPORT_SYMBOL_GPL(rpc_wake_up_next); /** * rpc_wake_up_locked - wake up all rpc_tasks * @queue: rpc_wait_queue on which the tasks are sleeping * */ static void rpc_wake_up_locked(struct rpc_wait_queue *queue) { struct rpc_task *task; for (;;) { task = __rpc_find_next_queued(queue); if (task == NULL) break; rpc_wake_up_task_queue_locked(queue, task); } } /** * rpc_wake_up - wake up all rpc_tasks * @queue: rpc_wait_queue on which the tasks are sleeping * * Grabs queue->lock */ void rpc_wake_up(struct rpc_wait_queue *queue) { spin_lock(&queue->lock); rpc_wake_up_locked(queue); spin_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(rpc_wake_up); /** * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. * @queue: rpc_wait_queue on which the tasks are sleeping * @status: status value to set */ static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) { struct rpc_task *task; for (;;) { task = __rpc_find_next_queued(queue); if (task == NULL) break; rpc_wake_up_task_queue_set_status_locked(queue, task, status); } } /** * rpc_wake_up_status - wake up all rpc_tasks and set their status value. * @queue: rpc_wait_queue on which the tasks are sleeping * @status: status value to set * * Grabs queue->lock */ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) { spin_lock(&queue->lock); rpc_wake_up_status_locked(queue, status); spin_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(rpc_wake_up_status); static void __rpc_queue_timer_fn(struct work_struct *work) { struct rpc_wait_queue *queue = container_of(work, struct rpc_wait_queue, timer_list.dwork.work); struct rpc_task *task, *n; unsigned long expires, now, timeo; spin_lock(&queue->lock); expires = now = jiffies; list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { timeo = task->tk_timeout; if (time_after_eq(now, timeo)) { trace_rpc_task_timeout(task, task->tk_action); task->tk_status = -ETIMEDOUT; rpc_wake_up_task_queue_locked(queue, task); continue; } if (expires == now || time_after(expires, timeo)) expires = timeo; } if (!list_empty(&queue->timer_list.list)) rpc_set_queue_timer(queue, expires); spin_unlock(&queue->lock); } static void __rpc_atrun(struct rpc_task *task) { if (task->tk_status == -ETIMEDOUT) task->tk_status = 0; } /* * Run a task at a later time */ void rpc_delay(struct rpc_task *task, unsigned long delay) { rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); } EXPORT_SYMBOL_GPL(rpc_delay); /* * Helper to call task->tk_ops->rpc_call_prepare */ void rpc_prepare_task(struct rpc_task *task) { task->tk_ops->rpc_call_prepare(task, task->tk_calldata); } static void rpc_init_task_statistics(struct rpc_task *task) { /* Initialize retry counters */ task->tk_garb_retry = 2; task->tk_cred_retry = 2; /* starting timestamp */ task->tk_start = ktime_get(); } static void rpc_reset_task_statistics(struct rpc_task *task) { task->tk_timeouts = 0; task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); rpc_init_task_statistics(task); } /* * Helper that calls task->tk_ops->rpc_call_done if it exists */ void rpc_exit_task(struct rpc_task *task) { trace_rpc_task_end(task, task->tk_action); task->tk_action = NULL; if (task->tk_ops->rpc_count_stats) task->tk_ops->rpc_count_stats(task, task->tk_calldata); else if (task->tk_client) rpc_count_iostats(task, task->tk_client->cl_metrics); if (task->tk_ops->rpc_call_done != NULL) { trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done); task->tk_ops->rpc_call_done(task, task->tk_calldata); if (task->tk_action != NULL) { /* Always release the RPC slot and buffer memory */ xprt_release(task); rpc_reset_task_statistics(task); } } } void rpc_signal_task(struct rpc_task *task) { struct rpc_wait_queue *queue; if (!RPC_IS_ACTIVATED(task)) return; if (!rpc_task_set_rpc_status(task, -ERESTARTSYS)) return; trace_rpc_task_signalled(task, task->tk_action); queue = READ_ONCE(task->tk_waitqueue); if (queue) rpc_wake_up_queued_task(queue, task); } void rpc_task_try_cancel(struct rpc_task *task, int error) { struct rpc_wait_queue *queue; if (!rpc_task_set_rpc_status(task, error)) return; queue = READ_ONCE(task->tk_waitqueue); if (queue) rpc_wake_up_queued_task(queue, task); } void rpc_exit(struct rpc_task *task, int status) { task->tk_status = status; task->tk_action = rpc_exit_task; rpc_wake_up_queued_task(task->tk_waitqueue, task); } EXPORT_SYMBOL_GPL(rpc_exit); void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) { if (ops->rpc_release != NULL) ops->rpc_release(calldata); } static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk) { if (!xprt) return false; if (!atomic_read(&xprt->swapper)) return false; return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk; } /* * This is the RPC `scheduler' (or rather, the finite state machine). */ static void __rpc_execute(struct rpc_task *task) { struct rpc_wait_queue *queue; int task_is_async = RPC_IS_ASYNC(task); int status = 0; unsigned long pflags = current->flags; WARN_ON_ONCE(RPC_IS_QUEUED(task)); if (RPC_IS_QUEUED(task)) return; for (;;) { void (*do_action)(struct rpc_task *); /* * Perform the next FSM step or a pending callback. * * tk_action may be NULL if the task has been killed. */ do_action = task->tk_action; /* Tasks with an RPC error status should exit */ if (do_action && do_action != rpc_exit_task && (status = READ_ONCE(task->tk_rpc_status)) != 0) { task->tk_status = status; do_action = rpc_exit_task; } /* Callbacks override all actions */ if (task->tk_callback) { do_action = task->tk_callback; task->tk_callback = NULL; } if (!do_action) break; if (RPC_IS_SWAPPER(task) || xprt_needs_memalloc(task->tk_xprt, task)) current->flags |= PF_MEMALLOC; trace_rpc_task_run_action(task, do_action); do_action(task); /* * Lockless check for whether task is sleeping or not. */ if (!RPC_IS_QUEUED(task)) { cond_resched(); continue; } /* * The queue->lock protects against races with * rpc_make_runnable(). * * Note that once we clear RPC_TASK_RUNNING on an asynchronous * rpc_task, rpc_make_runnable() can assign it to a * different workqueue. We therefore cannot assume that the * rpc_task pointer may still be dereferenced. */ queue = task->tk_waitqueue; spin_lock(&queue->lock); if (!RPC_IS_QUEUED(task)) { spin_unlock(&queue->lock); continue; } /* Wake up any task that has an exit status */ if (READ_ONCE(task->tk_rpc_status) != 0) { rpc_wake_up_task_queue_locked(queue, task); spin_unlock(&queue->lock); continue; } rpc_clear_running(task); spin_unlock(&queue->lock); if (task_is_async) goto out; /* sync task: sleep here */ trace_rpc_task_sync_sleep(task, task->tk_action); status = out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_QUEUED, rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE); if (status < 0) { /* * When a sync task receives a signal, it exits with * -ERESTARTSYS. In order to catch any callbacks that * clean up after sleeping on some queue, we don't * break the loop here, but go around once more. */ rpc_signal_task(task); } trace_rpc_task_sync_wake(task, task->tk_action); } /* Release all resources associated with the task */ rpc_release_task(task); out: current_restore_flags(pflags, PF_MEMALLOC); } /* * User-visible entry point to the scheduler. * * This may be called recursively if e.g. an async NFS task updates * the attributes and finds that dirty pages must be flushed. * NOTE: Upon exit of this function the task is guaranteed to be * released. In particular note that tk_release() will have * been called, so your task memory may have been freed. */ void rpc_execute(struct rpc_task *task) { bool is_async = RPC_IS_ASYNC(task); rpc_set_active(task); rpc_make_runnable(rpciod_workqueue, task); if (!is_async) { unsigned int pflags = memalloc_nofs_save(); __rpc_execute(task); memalloc_nofs_restore(pflags); } } static void rpc_async_schedule(struct work_struct *work) { unsigned int pflags = memalloc_nofs_save(); __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); memalloc_nofs_restore(pflags); } /** * rpc_malloc - allocate RPC buffer resources * @task: RPC task * * A single memory region is allocated, which is split between the * RPC call and RPC reply that this task is being used for. When * this RPC is retired, the memory is released by calling rpc_free. * * To prevent rpciod from hanging, this allocator never sleeps, * returning -ENOMEM and suppressing warning if the request cannot * be serviced immediately. The caller can arrange to sleep in a * way that is safe for rpciod. * * Most requests are 'small' (under 2KiB) and can be serviced from a * mempool, ensuring that NFS reads and writes can always proceed, * and that there is good locality of reference for these buffers. */ int rpc_malloc(struct rpc_task *task) { struct rpc_rqst *rqst = task->tk_rqstp; size_t size = rqst->rq_callsize + rqst->rq_rcvsize; struct rpc_buffer *buf; gfp_t gfp = rpc_task_gfp_mask(); size += sizeof(struct rpc_buffer); if (size <= RPC_BUFFER_MAXSIZE) { buf = kmem_cache_alloc(rpc_buffer_slabp, gfp); /* Reach for the mempool if dynamic allocation fails */ if (!buf && RPC_IS_ASYNC(task)) buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT); } else buf = kmalloc(size, gfp); if (!buf) return -ENOMEM; buf->len = size; rqst->rq_buffer = buf->data; rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; return 0; } EXPORT_SYMBOL_GPL(rpc_malloc); /** * rpc_free - free RPC buffer resources allocated via rpc_malloc * @task: RPC task * */ void rpc_free(struct rpc_task *task) { void *buffer = task->tk_rqstp->rq_buffer; size_t size; struct rpc_buffer *buf; buf = container_of(buffer, struct rpc_buffer, data); size = buf->len; if (size <= RPC_BUFFER_MAXSIZE) mempool_free(buf, rpc_buffer_mempool); else kfree(buf); } EXPORT_SYMBOL_GPL(rpc_free); /* * Creation and deletion of RPC task structures */ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) { memset(task, 0, sizeof(*task)); atomic_set(&task->tk_count, 1); task->tk_flags = task_setup_data->flags; task->tk_ops = task_setup_data->callback_ops; task->tk_calldata = task_setup_data->callback_data; INIT_LIST_HEAD(&task->tk_task); task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; task->tk_owner = current->tgid; /* Initialize workqueue for async tasks */ task->tk_workqueue = task_setup_data->workqueue; task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, xprt_get(task_setup_data->rpc_xprt)); task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); if (task->tk_ops->rpc_call_prepare != NULL) task->tk_action = rpc_prepare_task; rpc_init_task_statistics(task); } static struct rpc_task *rpc_alloc_task(void) { struct rpc_task *task; task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask()); if (task) return task; return mempool_alloc(rpc_task_mempool, GFP_NOWAIT); } /* * Create a new task for the specified client. */ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) { struct rpc_task *task = setup_data->task; unsigned short flags = 0; if (task == NULL) { task = rpc_alloc_task(); if (task == NULL) { rpc_release_calldata(setup_data->callback_ops, setup_data->callback_data); return ERR_PTR(-ENOMEM); } flags = RPC_TASK_DYNAMIC; } rpc_init_task(task, setup_data); task->tk_flags |= flags; return task; } /* * rpc_free_task - release rpc task and perform cleanups * * Note that we free up the rpc_task _after_ rpc_release_calldata() * in order to work around a workqueue dependency issue. * * Tejun Heo states: * "Workqueue currently considers two work items to be the same if they're * on the same address and won't execute them concurrently - ie. it * makes a work item which is queued again while being executed wait * for the previous execution to complete. * * If a work function frees the work item, and then waits for an event * which should be performed by another work item and *that* work item * recycles the freed work item, it can create a false dependency loop. * There really is no reliable way to detect this short of verifying * every memory free." * */ static void rpc_free_task(struct rpc_task *task) { unsigned short tk_flags = task->tk_flags; put_rpccred(task->tk_op_cred); rpc_release_calldata(task->tk_ops, task->tk_calldata); if (tk_flags & RPC_TASK_DYNAMIC) mempool_free(task, rpc_task_mempool); } static void rpc_async_release(struct work_struct *work) { unsigned int pflags = memalloc_nofs_save(); rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); memalloc_nofs_restore(pflags); } static void rpc_release_resources_task(struct rpc_task *task) { xprt_release(task); if (task->tk_msg.rpc_cred) { if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) put_cred(task->tk_msg.rpc_cred); task->tk_msg.rpc_cred = NULL; } rpc_task_release_client(task); } static void rpc_final_put_task(struct rpc_task *task, struct workqueue_struct *q) { if (q != NULL) { INIT_WORK(&task->u.tk_work, rpc_async_release); queue_work(q, &task->u.tk_work); } else rpc_free_task(task); } static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) { if (atomic_dec_and_test(&task->tk_count)) { rpc_release_resources_task(task); rpc_final_put_task(task, q); } } void rpc_put_task(struct rpc_task *task) { rpc_do_put_task(task, NULL); } EXPORT_SYMBOL_GPL(rpc_put_task); void rpc_put_task_async(struct rpc_task *task) { rpc_do_put_task(task, task->tk_workqueue); } EXPORT_SYMBOL_GPL(rpc_put_task_async); static void rpc_release_task(struct rpc_task *task) { WARN_ON_ONCE(RPC_IS_QUEUED(task)); rpc_release_resources_task(task); /* * Note: at this point we have been removed from rpc_clnt->cl_tasks, * so it should be safe to use task->tk_count as a test for whether * or not any other processes still hold references to our rpc_task. */ if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { /* Wake up anyone who may be waiting for task completion */ if (!rpc_complete_task(task)) return; } else { if (!atomic_dec_and_test(&task->tk_count)) return; } rpc_final_put_task(task, task->tk_workqueue); } int rpciod_up(void) { return try_module_get(THIS_MODULE) ? 0 : -EINVAL; } void rpciod_down(void) { module_put(THIS_MODULE); } /* * Start up the rpciod workqueue. */ static int rpciod_start(void) { struct workqueue_struct *wq; /* * Create the rpciod thread and wait for it to start. */ wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!wq) goto out_failed; rpciod_workqueue = wq; wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); if (!wq) goto free_rpciod; xprtiod_workqueue = wq; return 1; free_rpciod: wq = rpciod_workqueue; rpciod_workqueue = NULL; destroy_workqueue(wq); out_failed: return 0; } static void rpciod_stop(void) { struct workqueue_struct *wq = NULL; if (rpciod_workqueue == NULL) return; wq = rpciod_workqueue; rpciod_workqueue = NULL; destroy_workqueue(wq); wq = xprtiod_workqueue; xprtiod_workqueue = NULL; destroy_workqueue(wq); } void rpc_destroy_mempool(void) { rpciod_stop(); mempool_destroy(rpc_buffer_mempool); mempool_destroy(rpc_task_mempool); kmem_cache_destroy(rpc_task_slabp); kmem_cache_destroy(rpc_buffer_slabp); rpc_destroy_wait_queue(&delay_queue); } int rpc_init_mempool(void) { /* * The following is not strictly a mempool initialisation, * but there is no harm in doing it here */ rpc_init_wait_queue(&delay_queue, "delayq"); if (!rpciod_start()) goto err_nomem; rpc_task_slabp = kmem_cache_create("rpc_tasks", sizeof(struct rpc_task), 0, SLAB_HWCACHE_ALIGN, NULL); if (!rpc_task_slabp) goto err_nomem; rpc_buffer_slabp = kmem_cache_create("rpc_buffers", RPC_BUFFER_MAXSIZE, 0, SLAB_HWCACHE_ALIGN, NULL); if (!rpc_buffer_slabp) goto err_nomem; rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, rpc_task_slabp); if (!rpc_task_mempool) goto err_nomem; rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, rpc_buffer_slabp); if (!rpc_buffer_mempool) goto err_nomem; return 0; err_nomem: rpc_destroy_mempool(); return -ENOMEM; }
21 21 16 21 21 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/dccp/minisocks.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> */ #include <linux/dccp.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <net/sock.h> #include <net/xfrm.h> #include <net/inet_timewait_sock.h> #include <net/rstreason.h> #include "ackvec.h" #include "ccid.h" #include "dccp.h" #include "feat.h" struct inet_timewait_death_row dccp_death_row = { .tw_refcount = REFCOUNT_INIT(1), .sysctl_max_tw_buckets = NR_FILE * 2, .hashinfo = &dccp_hashinfo, }; EXPORT_SYMBOL_GPL(dccp_death_row); void dccp_time_wait(struct sock *sk, int state, int timeo) { struct inet_timewait_sock *tw; tw = inet_twsk_alloc(sk, &dccp_death_row, state); if (tw != NULL) { const struct inet_connection_sock *icsk = inet_csk(sk); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { tw->tw_v6_daddr = sk->sk_v6_daddr; tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; tw->tw_ipv6only = sk->sk_ipv6only; } #endif /* Get the TIME_WAIT timeout firing. */ if (timeo < rto) timeo = rto; if (state == DCCP_TIME_WAIT) timeo = DCCP_TIMEWAIT_LEN; /* Linkage updates. * Note that access to tw after this point is illegal. */ inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than * non-graceful socket closings. */ DCCP_WARN("time wait bucket table overflow\n"); } dccp_done(sk); } struct sock *dccp_create_openreq_child(const struct sock *sk, const struct request_sock *req, const struct sk_buff *skb) { /* * Step 3: Process LISTEN state * * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair */ struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); if (newsk != NULL) { struct dccp_request_sock *dreq = dccp_rsk(req); struct inet_connection_sock *newicsk = inet_csk(newsk); struct dccp_sock *newdp = dccp_sk(newsk); newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackvec = NULL; newdp->dccps_service_list = NULL; newdp->dccps_hc_rx_ccid = NULL; newdp->dccps_hc_tx_ccid = NULL; newdp->dccps_service = dreq->dreq_service; newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; newicsk->icsk_rto = DCCP_TIMEOUT_INIT; INIT_LIST_HEAD(&newdp->dccps_featneg); /* * Step 3: Process LISTEN state * * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR from packet (or Init Cookies) * * Setting AWL/AWH and SWL/SWH happens as part of the feature * activation below, as these windows all depend on the local * and remote Sequence Window feature values (7.5.2). */ newdp->dccps_iss = dreq->dreq_iss; newdp->dccps_gss = dreq->dreq_gss; newdp->dccps_gar = newdp->dccps_iss; newdp->dccps_isr = dreq->dreq_isr; newdp->dccps_gsr = dreq->dreq_gsr; /* * Activate features: initialise CCIDs, sequence windows etc. */ if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { sk_free_unlock_clone(newsk); return NULL; } dccp_init_xmit_timers(newsk); __DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS); } return newsk; } EXPORT_SYMBOL_GPL(dccp_create_openreq_child); /* * Process an incoming packet for RESPOND sockets represented * as an request_sock. */ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { struct sock *child = NULL; struct dccp_request_sock *dreq = dccp_rsk(req); bool own_req; /* TCP/DCCP listeners became lockless. * DCCP stores complex state in its request_sock, so we need * a protection for them, now this code runs without being protected * by the parent (listener) lock. */ spin_lock_bh(&dreq->dreq_lock); /* Check for retransmitted REQUEST */ if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) { dccp_pr_debug("Retransmitted REQUEST\n"); dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq; /* * Send another RESPONSE packet * To protect against Request floods, increment retrans * counter (backoff, monitored by dccp_response_timer). */ inet_rtx_syn_ack(sk, req); } /* Network Duplicate, discard packet */ goto out; } DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK && dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK) goto drop; /* Invalid ACK */ if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dreq->dreq_iss, dreq->dreq_gss)) { dccp_pr_debug("Invalid ACK number: ack_seq=%llu, " "dreq_iss=%llu, dreq_gss=%llu\n", (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq, (unsigned long long) dreq->dreq_iss, (unsigned long long) dreq->dreq_gss); goto drop; } if (dccp_parse_options(sk, dreq, skb)) goto drop; child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, req, &own_req); if (child) { child = inet_csk_complete_hashdance(sk, child, req, own_req); goto out; } DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; drop: if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED); inet_csk_reqsk_queue_drop(sk, req); out: spin_unlock_bh(&dreq->dreq_lock); return child; } EXPORT_SYMBOL_GPL(dccp_check_req); /* * Queue segment on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket. */ int dccp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb) __releases(child) { int ret = 0; const int state = child->sk_state; if (!sock_owned_by_user(child)) { ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb), skb->len); /* Wakeup parent, send SIGIO */ if (state == DCCP_RESPOND && child->sk_state != state) parent->sk_data_ready(parent); } else { /* Alas, it is possible again, because we do lookup * in main socket hash table and lock on listening * socket does not protect us more. */ __sk_add_backlog(child, skb); } bh_unlock_sock(child); sock_put(child); return ret; } EXPORT_SYMBOL_GPL(dccp_child_process); void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *rsk) { DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state"); } EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack); int dccp_reqsk_init(struct request_sock *req, struct dccp_sock const *dp, struct sk_buff const *skb) { struct dccp_request_sock *dreq = dccp_rsk(req); spin_lock_init(&dreq->dreq_lock); inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); inet_rsk(req)->acked = 0; dreq->dreq_timestamp_echo = 0; /* inherit feature negotiation options from listening socket */ return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg); } EXPORT_SYMBOL_GPL(dccp_reqsk_init);
34 2700 2712 2701 36 2714 34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 // SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * This is an implementation of the BLAKE2s hash and PRF functions. * * Information: https://blake2.net/ * */ #include <crypto/internal/blake2s.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bug.h> static inline void blake2s_set_lastblock(struct blake2s_state *state) { state->f[0] = -1; } void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; if (unlikely(!inlen)) return; if (inlen > fill) { memcpy(state->buf + state->buflen, in, fill); blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); state->buflen = 0; in += fill; inlen -= fill; } if (inlen > BLAKE2S_BLOCK_SIZE) { const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); } memcpy(state->buf + state->buflen, in, inlen); state->buflen += inlen; } EXPORT_SYMBOL(blake2s_update); void blake2s_final(struct blake2s_state *state, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); blake2s_set_lastblock(state); memset(state->buf + state->buflen, 0, BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ blake2s_compress(state, state->buf, 1, state->buflen); cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); memcpy(out, state->h, state->outlen); memzero_explicit(state, sizeof(*state)); } EXPORT_SYMBOL(blake2s_final); static int __init blake2s_mod_init(void) { if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && WARN_ON(!blake2s_selftest())) return -ENODEV; return 0; } module_init(blake2s_mod_init); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
8 3 19 6 2 3 2 6 2 2 11 6 6 6 1 1 1 1 11 24 24 24 24 8 11 8 7 7 7 1 1 1 11 5 5 3 3 2 1 1 1 1 1 5 17 14 5 17 5 14 17 3 4 1 3 3 3 1 2 2 2 4 2 4 5 14 14 14 13 20 13 20 13 19 13 24 24 18 24 24 24 9 17 17 5 20 5 16 20 9 24 24 19 7 2 3 19 5 24 15 13 15 15 25 25 26 25 26 26 25 26 25 26 3 32 3 3 24 23 24 23 21 23 22 22 23 4 22 23 23 23 23 3 22 23 23 23 23 11 3 11 11 11 11 11 9 11 11 3 3 3 3 3 3 3 3 35 34 35 35 11 24 15 15 15 1 14 14 16 11 16 21 3 38 1 8 6 6 2 8 36 59 4 1 3 2 56 2 54 7 1 53 3 1 52 3 1 59 24 24 16 16 9 7 6 4 9 5 4 31 9 6 31 25 24 24 25 17 25 3 1 22 24 3 24 23 35 50 1 7 6 1 47 11 10 1 8 1 8 44 5 2 4 43 3 2 1 40 3 43 3 43 2 43 4 3 1 42 8 8 34 1 2 40 8 9 32 5 6 2 38 3 6 4 36 2 5 2 38 1 3 50 5 23 5 2 23 17 16 16 4 6 6 6 6 4 22 22 35 35 34 31 23 9 5 9 4 9 5 5 9 4 15 15 15 9 5 5 9 9 8 50 60 60 52 52 25 25 60 60 60 60 60 60 59 60 60 3 60 1596 6 1599 2 1593 86 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 // SPDX-License-Identifier: GPL-2.0-only /* * GENEVE: Generic Network Virtualization Encapsulation * * Copyright (c) 2015 Red Hat, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/ipv6_stubs.h> #include <net/dst_metadata.h> #include <net/gro_cells.h> #include <net/rtnetlink.h> #include <net/geneve.h> #include <net/gro.h> #include <net/netdev_lock.h> #include <net/protocol.h> #define GENEVE_NETDEV_VER "0.6" #define GENEVE_N_VID (1u << 24) #define GENEVE_VID_MASK (GENEVE_N_VID - 1) #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); #define GENEVE_VER 0 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) #define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN) #define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN) /* per-network namespace private data for this module */ struct geneve_net { struct list_head geneve_list; struct list_head sock_list; }; static unsigned int geneve_net_id; struct geneve_dev_node { struct hlist_node hlist; struct geneve_dev *geneve; }; struct geneve_config { struct ip_tunnel_info info; bool collect_md; bool use_udp6_rx_checksums; bool ttl_inherit; enum ifla_geneve_df df; bool inner_proto_inherit; u16 port_min; u16 port_max; }; /* Pseudo network device */ struct geneve_dev { struct geneve_dev_node hlist4; /* vni hash table for IPv4 socket */ #if IS_ENABLED(CONFIG_IPV6) struct geneve_dev_node hlist6; /* vni hash table for IPv6 socket */ #endif struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ #if IS_ENABLED(CONFIG_IPV6) struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ #endif struct list_head next; /* geneve's per namespace list */ struct gro_cells gro_cells; struct geneve_config cfg; }; struct geneve_sock { bool collect_md; struct list_head list; struct socket *sock; struct rcu_head rcu; int refcnt; struct hlist_head vni_list[VNI_HASH_SIZE]; }; static inline __u32 geneve_net_vni_hash(u8 vni[3]) { __u32 vnid; vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; return hash_32(vnid, VNI_HASH_BITS); } static __be64 vni_to_tunnel_id(const __u8 *vni) { #ifdef __BIG_ENDIAN return (vni[0] << 16) | (vni[1] << 8) | vni[2]; #else return (__force __be64)(((__force u64)vni[0] << 40) | ((__force u64)vni[1] << 48) | ((__force u64)vni[2] << 56)); #endif } /* Convert 64 bit tunnel ID to 24 bit VNI. */ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) { #ifdef __BIG_ENDIAN vni[0] = (__force __u8)(tun_id >> 16); vni[1] = (__force __u8)(tun_id >> 8); vni[2] = (__force __u8)tun_id; #else vni[0] = (__force __u8)((__force u64)tun_id >> 40); vni[1] = (__force __u8)((__force u64)tun_id >> 48); vni[2] = (__force __u8)((__force u64)tun_id >> 56); #endif } static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) { return !memcmp(vni, &tun_id[5], 3); } static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) { return gs->sock->sk->sk_family; } static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, __be32 addr, u8 vni[]) { struct hlist_head *vni_list_head; struct geneve_dev_node *node; __u32 hash; /* Find the device for this VNI */ hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; hlist_for_each_entry_rcu(node, vni_list_head, hlist) { if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && addr == node->geneve->cfg.info.key.u.ipv4.dst) return node->geneve; } return NULL; } #if IS_ENABLED(CONFIG_IPV6) static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs, struct in6_addr addr6, u8 vni[]) { struct hlist_head *vni_list_head; struct geneve_dev_node *node; __u32 hash; /* Find the device for this VNI */ hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; hlist_for_each_entry_rcu(node, vni_list_head, hlist) { if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && ipv6_addr_equal(&addr6, &node->geneve->cfg.info.key.u.ipv6.dst)) return node->geneve; } return NULL; } #endif static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) { return (struct genevehdr *)(udp_hdr(skb) + 1); } static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs, struct sk_buff *skb) { static u8 zero_vni[3]; u8 *vni; if (geneve_get_sk_family(gs) == AF_INET) { struct iphdr *iph; __be32 addr; iph = ip_hdr(skb); /* outer IP header... */ if (gs->collect_md) { vni = zero_vni; addr = 0; } else { vni = geneve_hdr(skb)->vni; addr = iph->saddr; } return geneve_lookup(gs, addr, vni); #if IS_ENABLED(CONFIG_IPV6) } else if (geneve_get_sk_family(gs) == AF_INET6) { static struct in6_addr zero_addr6; struct ipv6hdr *ip6h; struct in6_addr addr6; ip6h = ipv6_hdr(skb); /* outer IPv6 header... */ if (gs->collect_md) { vni = zero_vni; addr6 = zero_addr6; } else { vni = geneve_hdr(skb)->vni; addr6 = ip6h->saddr; } return geneve6_lookup(gs, addr6, vni); #endif } return NULL; } /* geneve receive/decap routine */ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, struct sk_buff *skb) { struct genevehdr *gnvh = geneve_hdr(skb); struct metadata_dst *tun_dst = NULL; unsigned int len; int nh, err = 0; void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { IP_TUNNEL_DECLARE_FLAGS(flags) = { }; __set_bit(IP_TUNNEL_KEY_BIT, flags); __assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam); __assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical); tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4); if (!tun_dst) { dev_dstats_rx_dropped(geneve->dev); goto drop; } /* Update tunnel dst according to Geneve options. */ ip_tunnel_flags_zero(flags); __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags); ip_tunnel_info_opts_set(&tun_dst->u.tun_info, gnvh->options, gnvh->opt_len * 4, flags); } else { /* Drop packets w/ critical options, * since we don't support any... */ if (gnvh->critical) { DEV_STATS_INC(geneve->dev, rx_frame_errors); DEV_STATS_INC(geneve->dev, rx_errors); goto drop; } } if (tun_dst) skb_dst_set(skb, &tun_dst->dst); if (gnvh->proto_type == htons(ETH_P_TEB)) { skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, geneve->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) { DEV_STATS_INC(geneve->dev, rx_errors); goto drop; } } else { skb_reset_mac_header(skb); skb->dev = geneve->dev; skb->pkt_type = PACKET_HOST; } /* Save offset of outer header relative to skb->head, * because we are going to reset the network header to the inner header * and might change skb->head. */ nh = skb_network_header(skb) - skb->head; skb_reset_network_header(skb); if (!pskb_inet_may_pull(skb)) { DEV_STATS_INC(geneve->dev, rx_length_errors); DEV_STATS_INC(geneve->dev, rx_errors); goto drop; } /* Get the outer header. */ oiph = skb->head + nh; if (geneve_get_sk_family(gs) == AF_INET) err = IP_ECN_decapsulate(oiph, skb); #if IS_ENABLED(CONFIG_IPV6) else err = IP6_ECN_decapsulate(oiph, skb); #endif if (unlikely(err)) { if (log_ecn_error) { if (geneve_get_sk_family(gs) == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, ((struct iphdr *)oiph)->tos); #if IS_ENABLED(CONFIG_IPV6) else net_info_ratelimited("non-ECT from %pI6\n", &((struct ipv6hdr *)oiph)->saddr); #endif } if (err > 1) { DEV_STATS_INC(geneve->dev, rx_frame_errors); DEV_STATS_INC(geneve->dev, rx_errors); goto drop; } } len = skb->len; err = gro_cells_receive(&geneve->gro_cells, skb); if (likely(err == NET_RX_SUCCESS)) dev_dstats_rx_add(geneve->dev, len); return; drop: /* Consume bad packet */ kfree_skb(skb); } /* Setup stats when device is created */ static int geneve_init(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); int err; err = gro_cells_init(&geneve->gro_cells, dev); if (err) return err; err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL); if (err) { gro_cells_destroy(&geneve->gro_cells); return err; } netdev_lockdep_set_classes(dev); return 0; } static void geneve_uninit(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); dst_cache_destroy(&geneve->cfg.info.dst_cache); gro_cells_destroy(&geneve->gro_cells); } /* Callback from net/ipv4/udp.c to receive packets */ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct genevehdr *geneveh; struct geneve_dev *geneve; struct geneve_sock *gs; __be16 inner_proto; int opts_len; /* Need UDP and Geneve header to be present */ if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) goto drop; /* Return packets with reserved bits set */ geneveh = geneve_hdr(skb); if (unlikely(geneveh->ver != GENEVE_VER)) goto drop; gs = rcu_dereference_sk_user_data(sk); if (!gs) goto drop; geneve = geneve_lookup_skb(gs, skb); if (!geneve) goto drop; inner_proto = geneveh->proto_type; if (unlikely((!geneve->cfg.inner_proto_inherit && inner_proto != htons(ETH_P_TEB)))) { dev_dstats_rx_dropped(geneve->dev); goto drop; } opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto, !net_eq(geneve->net, dev_net(geneve->dev)))) { dev_dstats_rx_dropped(geneve->dev); goto drop; } geneve_rx(geneve, gs, skb); return 0; drop: /* Consume bad packet */ kfree_skb(skb); return 0; } /* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */ static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb) { struct genevehdr *geneveh; struct geneve_sock *gs; u8 zero_vni[3] = { 0 }; u8 *vni = zero_vni; if (!pskb_may_pull(skb, skb_transport_offset(skb) + GENEVE_BASE_HLEN)) return -EINVAL; geneveh = geneve_hdr(skb); if (geneveh->ver != GENEVE_VER) return -EINVAL; if (geneveh->proto_type != htons(ETH_P_TEB)) return -EINVAL; gs = rcu_dereference_sk_user_data(sk); if (!gs) return -ENOENT; if (geneve_get_sk_family(gs) == AF_INET) { struct iphdr *iph = ip_hdr(skb); __be32 addr4 = 0; if (!gs->collect_md) { vni = geneve_hdr(skb)->vni; addr4 = iph->daddr; } return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT; } #if IS_ENABLED(CONFIG_IPV6) if (geneve_get_sk_family(gs) == AF_INET6) { struct ipv6hdr *ip6h = ipv6_hdr(skb); struct in6_addr addr6; memset(&addr6, 0, sizeof(struct in6_addr)); if (!gs->collect_md) { vni = geneve_hdr(skb)->vni; addr6 = ip6h->daddr; } return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT; } #endif return -EPFNOSUPPORT; } static struct socket *geneve_create_sock(struct net *net, bool ipv6, __be16 port, bool ipv6_rx_csum) { struct socket *sock; struct udp_port_cfg udp_conf; int err; memset(&udp_conf, 0, sizeof(udp_conf)); if (ipv6) { udp_conf.family = AF_INET6; udp_conf.ipv6_v6only = 1; udp_conf.use_udp6_rx_checksums = ipv6_rx_csum; } else { udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = htonl(INADDR_ANY); } udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); if (err < 0) return ERR_PTR(err); udp_allow_gso(sock->sk); return sock; } static int geneve_hlen(struct genevehdr *gh) { return sizeof(*gh) + gh->opt_len * 4; } static struct sk_buff *geneve_gro_receive(struct sock *sk, struct list_head *head, struct sk_buff *skb) { struct sk_buff *pp = NULL; struct sk_buff *p; struct genevehdr *gh, *gh2; unsigned int hlen, gh_len, off_gnv; const struct packet_offload *ptype; __be16 type; int flush = 1; off_gnv = skb_gro_offset(skb); hlen = off_gnv + sizeof(*gh); gh = skb_gro_header(skb, hlen, off_gnv); if (unlikely(!gh)) goto out; if (gh->ver != GENEVE_VER || gh->oam) goto out; gh_len = geneve_hlen(gh); hlen = off_gnv + gh_len; if (!skb_gro_may_pull(skb, hlen)) { gh = skb_gro_header_slow(skb, hlen, off_gnv); if (unlikely(!gh)) goto out; } list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; gh2 = (struct genevehdr *)(p->data + off_gnv); if (gh->opt_len != gh2->opt_len || memcmp(gh, gh2, gh_len)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); type = gh->proto_type; if (likely(type == htons(ETH_P_TEB))) return call_gro_receive(eth_gro_receive, head, skb); ptype = gro_find_receive_by_type(type); if (!ptype) goto out; pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; out: skb_gro_flush_final(skb, pp, flush); return pp; } static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) { struct genevehdr *gh; struct packet_offload *ptype; __be16 type; int gh_len; int err = -ENOSYS; gh = (struct genevehdr *)(skb->data + nhoff); gh_len = geneve_hlen(gh); type = gh->proto_type; /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */ if (likely(type == htons(ETH_P_TEB))) return eth_gro_complete(skb, nhoff + gh_len); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); skb_set_inner_mac_header(skb, nhoff + gh_len); return err; } /* Create new listen socket if needed */ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, bool ipv6, bool ipv6_rx_csum) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; struct socket *sock; struct udp_tunnel_sock_cfg tunnel_cfg; int h; gs = kzalloc(sizeof(*gs), GFP_KERNEL); if (!gs) return ERR_PTR(-ENOMEM); sock = geneve_create_sock(net, ipv6, port, ipv6_rx_csum); if (IS_ERR(sock)) { kfree(gs); return ERR_CAST(sock); } gs->sock = sock; gs->refcnt = 1; for (h = 0; h < VNI_HASH_SIZE; ++h) INIT_HLIST_HEAD(&gs->vni_list[h]); /* Initialize the geneve udp offloads structure */ udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); /* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); tunnel_cfg.sk_user_data = gs; tunnel_cfg.encap_type = 1; tunnel_cfg.gro_receive = geneve_gro_receive; tunnel_cfg.gro_complete = geneve_gro_complete; tunnel_cfg.encap_rcv = geneve_udp_encap_recv; tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); list_add(&gs->list, &gn->sock_list); return gs; } static void __geneve_sock_release(struct geneve_sock *gs) { if (!gs || --gs->refcnt) return; list_del(&gs->list); udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); udp_tunnel_sock_release(gs->sock); kfree_rcu(gs, rcu); } static void geneve_sock_release(struct geneve_dev *geneve) { struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4); #if IS_ENABLED(CONFIG_IPV6) struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6); rcu_assign_pointer(geneve->sock6, NULL); #endif rcu_assign_pointer(geneve->sock4, NULL); synchronize_net(); __geneve_sock_release(gs4); #if IS_ENABLED(CONFIG_IPV6) __geneve_sock_release(gs6); #endif } static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, sa_family_t family, __be16 dst_port) { struct geneve_sock *gs; list_for_each_entry(gs, &gn->sock_list, list) { if (inet_sk(gs->sock->sk)->inet_sport == dst_port && geneve_get_sk_family(gs) == family) { return gs; } } return NULL; } static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) { struct net *net = geneve->net; struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev_node *node; struct geneve_sock *gs; __u8 vni[3]; __u32 hash; gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->cfg.info.key.tp_dst); if (gs) { gs->refcnt++; goto out; } gs = geneve_socket_create(net, geneve->cfg.info.key.tp_dst, ipv6, geneve->cfg.use_udp6_rx_checksums); if (IS_ERR(gs)) return PTR_ERR(gs); out: gs->collect_md = geneve->cfg.collect_md; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) { rcu_assign_pointer(geneve->sock6, gs); node = &geneve->hlist6; } else #endif { rcu_assign_pointer(geneve->sock4, gs); node = &geneve->hlist4; } node->geneve = geneve; tunnel_id_to_vni(geneve->cfg.info.key.tun_id, vni); hash = geneve_net_vni_hash(vni); hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]); return 0; } static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); bool metadata = geneve->cfg.collect_md; bool ipv4, ipv6; int ret = 0; ipv6 = geneve->cfg.info.mode & IP_TUNNEL_INFO_IPV6 || metadata; ipv4 = !ipv6 || metadata; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) { ret = geneve_sock_add(geneve, true); if (ret < 0 && ret != -EAFNOSUPPORT) ipv4 = false; } #endif if (ipv4) ret = geneve_sock_add(geneve, false); if (ret < 0) geneve_sock_release(geneve); return ret; } static int geneve_stop(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); hlist_del_init_rcu(&geneve->hlist4.hlist); #if IS_ENABLED(CONFIG_IPV6) hlist_del_init_rcu(&geneve->hlist6.hlist); #endif geneve_sock_release(geneve); return 0; } static void geneve_build_header(struct genevehdr *geneveh, const struct ip_tunnel_info *info, __be16 inner_proto) { geneveh->ver = GENEVE_VER; geneveh->opt_len = info->options_len / 4; geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags); geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT, info->key.tun_flags); geneveh->rsvd1 = 0; tunnel_id_to_vni(info->key.tun_id, geneveh->vni); geneveh->proto_type = inner_proto; geneveh->rsvd2 = 0; if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) ip_tunnel_info_opts_get(geneveh->options, info); } static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, const struct ip_tunnel_info *info, bool xnet, int ip_hdr_len, bool inner_proto_inherit) { bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); struct genevehdr *gnvh; __be16 inner_proto; int min_headroom; int err; skb_reset_mac_header(skb); skb_scrub_packet(skb, xnet); min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + GENEVE_BASE_HLEN + info->options_len + ip_hdr_len; err = skb_cow_head(skb, min_headroom); if (unlikely(err)) goto free_dst; err = udp_tunnel_handle_offloads(skb, udp_sum); if (err) goto free_dst; gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len); inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB); geneve_build_header(gnvh, info, inner_proto); skb_set_inner_protocol(skb, inner_proto); return 0; free_dst: dst_release(dst); return err; } static u8 geneve_get_dsfield(struct sk_buff *skb, struct net_device *dev, const struct ip_tunnel_info *info, bool *use_cache) { struct geneve_dev *geneve = netdev_priv(dev); u8 dsfield; dsfield = info->key.tos; if (dsfield == 1 && !geneve->cfg.collect_md) { dsfield = ip_tunnel_get_dsfield(ip_hdr(skb), skb); *use_cache = false; } return dsfield; } static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); const struct ip_tunnel_key *key = &info->key; struct rtable *rt; bool use_cache; __u8 tos, ttl; __be16 df = 0; __be32 saddr; __be16 sport; int err; if (skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs4) return -EIO; use_cache = ip_tunnel_dst_cache_usable(skb, info); tos = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, geneve->cfg.port_min, geneve->cfg.port_max, true); rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, &info->key, sport, geneve->cfg.info.key.tp_dst, tos, use_cache ? (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); err = skb_tunnel_check_pmtu(skb, &rt->dst, GENEVE_IPV4_HLEN + info->options_len, netif_is_any_bridge_port(dev)); if (err < 0) { dst_release(&rt->dst); return err; } else if (err) { struct ip_tunnel_info *info; info = skb_tunnel_info(skb); if (info) { struct ip_tunnel_info *unclone; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) { dst_release(&rt->dst); return -ENOMEM; } unclone->key.u.ipv4.dst = saddr; unclone->key.u.ipv4.src = info->key.u.ipv4.dst; } if (!pskb_may_pull(skb, ETH_HLEN)) { dst_release(&rt->dst); return -EINVAL; } skb->protocol = eth_type_trans(skb, geneve->dev); __netif_rx(skb); dst_release(&rt->dst); return -EMSGSIZE; } tos = ip_tunnel_ecn_encap(tos, ip_hdr(skb), skb); if (geneve->cfg.collect_md) { ttl = key->ttl; df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? htons(IP_DF) : 0; } else { if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else ttl = key->ttl; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); if (geneve->cfg.df == GENEVE_DF_SET) { df = htons(IP_DF); } else if (geneve->cfg.df == GENEVE_DF_INHERIT) { struct ethhdr *eth = skb_eth_hdr(skb); if (ntohs(eth->h_proto) == ETH_P_IPV6) { df = htons(IP_DF); } else if (ntohs(eth->h_proto) == ETH_P_IP) { struct iphdr *iph = ip_hdr(skb); if (iph->frag_off & htons(IP_DF)) df = htons(IP_DF); } } } err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr), inner_proto_inherit); if (unlikely(err)) return err; udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags)); return 0; } #if IS_ENABLED(CONFIG_IPV6) static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); const struct ip_tunnel_key *key = &info->key; struct dst_entry *dst = NULL; struct in6_addr saddr; bool use_cache; __u8 prio, ttl; __be16 sport; int err; if (skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs6) return -EIO; use_cache = ip_tunnel_dst_cache_usable(skb, info); prio = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, geneve->cfg.port_min, geneve->cfg.port_max, true); dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, &saddr, key, sport, geneve->cfg.info.key.tp_dst, prio, use_cache ? (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); err = skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len, netif_is_any_bridge_port(dev)); if (err < 0) { dst_release(dst); return err; } else if (err) { struct ip_tunnel_info *info = skb_tunnel_info(skb); if (info) { struct ip_tunnel_info *unclone; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) { dst_release(dst); return -ENOMEM; } unclone->key.u.ipv6.dst = saddr; unclone->key.u.ipv6.src = info->key.u.ipv6.dst; } if (!pskb_may_pull(skb, ETH_HLEN)) { dst_release(dst); return -EINVAL; } skb->protocol = eth_type_trans(skb, geneve->dev); __netif_rx(skb); dst_release(dst); return -EMSGSIZE; } prio = ip_tunnel_ecn_encap(prio, ip_hdr(skb), skb); if (geneve->cfg.collect_md) { ttl = key->ttl; } else { if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else ttl = key->ttl; ttl = ttl ? : ip6_dst_hoplimit(dst); } err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr), inner_proto_inherit); if (unlikely(err)) return err; udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, &saddr, &key->u.ipv6.dst, prio, ttl, info->key.label, sport, geneve->cfg.info.key.tp_dst, !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags)); return 0; } #endif static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = NULL; int err; if (geneve->cfg.collect_md) { info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); dev_kfree_skb(skb); dev_dstats_tx_dropped(dev); return NETDEV_TX_OK; } } else { info = &geneve->cfg.info; } rcu_read_lock(); #if IS_ENABLED(CONFIG_IPV6) if (info->mode & IP_TUNNEL_INFO_IPV6) err = geneve6_xmit_skb(skb, dev, geneve, info); else #endif err = geneve_xmit_skb(skb, dev, geneve, info); rcu_read_unlock(); if (likely(!err)) return NETDEV_TX_OK; if (err != -EMSGSIZE) dev_kfree_skb(skb); if (err == -ELOOP) DEV_STATS_INC(dev, collisions); else if (err == -ENETUNREACH) DEV_STATS_INC(dev, tx_carrier_errors); DEV_STATS_INC(dev, tx_errors); return NETDEV_TX_OK; } static int geneve_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu > dev->max_mtu) new_mtu = dev->max_mtu; else if (new_mtu < dev->min_mtu) new_mtu = dev->min_mtu; WRITE_ONCE(dev->mtu, new_mtu); return 0; } static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct geneve_dev *geneve = netdev_priv(dev); __be16 sport; if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); bool use_cache; __be32 saddr; u8 tos; if (!gs4) return -EIO; use_cache = ip_tunnel_dst_cache_usable(skb, info); tos = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, geneve->cfg.port_min, geneve->cfg.port_max, true); rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, &info->key, sport, geneve->cfg.info.key.tp_dst, tos, use_cache ? &info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); info->key.u.ipv4.src = saddr; #if IS_ENABLED(CONFIG_IPV6) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); struct in6_addr saddr; bool use_cache; u8 prio; if (!gs6) return -EIO; use_cache = ip_tunnel_dst_cache_usable(skb, info); prio = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, geneve->cfg.port_min, geneve->cfg.port_max, true); dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, &saddr, &info->key, sport, geneve->cfg.info.key.tp_dst, prio, use_cache ? &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); dst_release(dst); info->key.u.ipv6.src = saddr; #endif } else { return -EINVAL; } info->key.tp_src = sport; info->key.tp_dst = geneve->cfg.info.key.tp_dst; return 0; } static const struct net_device_ops geneve_netdev_ops = { .ndo_init = geneve_init, .ndo_uninit = geneve_uninit, .ndo_open = geneve_open, .ndo_stop = geneve_stop, .ndo_start_xmit = geneve_xmit, .ndo_change_mtu = geneve_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_fill_metadata_dst = geneve_fill_metadata_dst, }; static void geneve_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); strscpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); } static const struct ethtool_ops geneve_ethtool_ops = { .get_drvinfo = geneve_get_drvinfo, .get_link = ethtool_op_get_link, }; /* Info for udev, that this is a virtual tunnel endpoint */ static const struct device_type geneve_type = { .name = "geneve", }; /* Calls the ndo_udp_tunnel_add of the caller in order to * supply the listening GENEVE udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ static void geneve_offload_rx_ports(struct net_device *dev, bool push) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; rcu_read_lock(); list_for_each_entry_rcu(gs, &gn->sock_list, list) { if (push) { udp_tunnel_push_rx_port(dev, gs->sock, UDP_TUNNEL_TYPE_GENEVE); } else { udp_tunnel_drop_rx_port(dev, gs->sock, UDP_TUNNEL_TYPE_GENEVE); } } rcu_read_unlock(); } /* Initialize the device structure. */ static void geneve_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &geneve_netdev_ops; dev->ethtool_ops = &geneve_ethtool_ops; dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &geneve_type); dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->hw_features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; /* MTU range: 68 - (something less than 65535) */ dev->min_mtu = ETH_MIN_MTU; /* The max_mtu calculation does not take account of GENEVE * options, to avoid excluding potentially valid * configurations. This will be further reduced by IPvX hdr size. */ dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; netif_keep_dst(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->lltx = true; eth_hw_addr_random(dev); } static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_UNSPEC] = { .strict_start_type = IFLA_GENEVE_INNER_PROTO_INHERIT }, [IFLA_GENEVE_ID] = { .type = NLA_U32 }, [IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) }, [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, [IFLA_GENEVE_LABEL] = { .type = NLA_U32 }, [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, [IFLA_GENEVE_DF] = { .type = NLA_U8 }, [IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG }, [IFLA_GENEVE_PORT_RANGE] = NLA_POLICY_EXACT_LEN(sizeof(struct ifla_geneve_port_range)), }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], "Provided link layer address is not Ethernet"); return -EINVAL; } if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], "Provided Ethernet address is not unicast"); return -EADDRNOTAVAIL; } } if (!data) { NL_SET_ERR_MSG(extack, "Not enough attributes provided to perform the operation"); return -EINVAL; } if (data[IFLA_GENEVE_ID]) { __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); if (vni >= GENEVE_N_VID) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_ID], "Geneve ID must be lower than 16777216"); return -ERANGE; } } if (data[IFLA_GENEVE_DF]) { enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]); if (df < 0 || df > GENEVE_DF_MAX) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF], "Invalid DF attribute"); return -EINVAL; } } if (data[IFLA_GENEVE_PORT_RANGE]) { const struct ifla_geneve_port_range *p; p = nla_data(data[IFLA_GENEVE_PORT_RANGE]); if (ntohs(p->high) < ntohs(p->low)) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_PORT_RANGE], "Invalid source port range"); return -EINVAL; } } return 0; } static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, const struct ip_tunnel_info *info, bool *tun_on_same_port, bool *tun_collect_md) { struct geneve_dev *geneve, *t = NULL; *tun_on_same_port = false; *tun_collect_md = false; list_for_each_entry(geneve, &gn->geneve_list, next) { if (info->key.tp_dst == geneve->cfg.info.key.tp_dst) { *tun_collect_md = geneve->cfg.collect_md; *tun_on_same_port = true; } if (info->key.tun_id == geneve->cfg.info.key.tun_id && info->key.tp_dst == geneve->cfg.info.key.tp_dst && !memcmp(&info->key.u, &geneve->cfg.info.key.u, sizeof(info->key.u))) t = geneve; } return t; } static bool is_tnl_info_zero(const struct ip_tunnel_info *info) { return !(info->key.tun_id || info->key.tos || !ip_tunnel_flags_empty(info->key.tun_flags) || info->key.ttl || info->key.label || info->key.tp_src || memchr_inv(&info->key.u, 0, sizeof(info->key.u))); } static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, struct ip_tunnel_info *b) { if (ip_tunnel_info_af(a) == AF_INET) return a->key.u.ipv4.dst == b->key.u.ipv4.dst; else return ipv6_addr_equal(&a->key.u.ipv6.dst, &b->key.u.ipv6.dst); } static int geneve_configure(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack, const struct geneve_config *cfg) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); const struct ip_tunnel_info *info = &cfg->info; bool tun_collect_md, tun_on_same_port; int err, encap_len; if (cfg->collect_md && !is_tnl_info_zero(info)) { NL_SET_ERR_MSG(extack, "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified"); return -EINVAL; } geneve->net = net; geneve->dev = dev; t = geneve_find_dev(gn, info, &tun_on_same_port, &tun_collect_md); if (t) return -EBUSY; /* make enough headroom for basic scenario */ encap_len = GENEVE_BASE_HLEN + ETH_HLEN; if (!cfg->collect_md && ip_tunnel_info_af(info) == AF_INET) { encap_len += sizeof(struct iphdr); dev->max_mtu -= sizeof(struct iphdr); } else { encap_len += sizeof(struct ipv6hdr); dev->max_mtu -= sizeof(struct ipv6hdr); } dev->needed_headroom = encap_len + ETH_HLEN; if (cfg->collect_md) { if (tun_on_same_port) { NL_SET_ERR_MSG(extack, "There can be only one externally controlled device on a destination port"); return -EPERM; } } else { if (tun_collect_md) { NL_SET_ERR_MSG(extack, "There already exists an externally controlled device on this destination port"); return -EPERM; } } dst_cache_reset(&geneve->cfg.info.dst_cache); memcpy(&geneve->cfg, cfg, sizeof(*cfg)); if (geneve->cfg.inner_proto_inherit) { dev->header_ops = NULL; dev->type = ARPHRD_NONE; dev->hard_header_len = 0; dev->addr_len = 0; dev->flags = IFF_POINTOPOINT | IFF_NOARP; } err = register_netdevice(dev); if (err) return err; list_add(&geneve->next, &gn->geneve_list); return 0; } static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) { memset(info, 0, sizeof(*info)); info->key.tp_dst = htons(dst_port); } static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack, struct geneve_config *cfg, bool changelink) { struct ip_tunnel_info *info = &cfg->info; int attrtype; if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) { NL_SET_ERR_MSG(extack, "Cannot specify both IPv4 and IPv6 Remote addresses"); return -EINVAL; } if (data[IFLA_GENEVE_REMOTE]) { if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) { attrtype = IFLA_GENEVE_REMOTE; goto change_notsup; } info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); if (ipv4_is_multicast(info->key.u.ipv4.dst)) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE], "Remote IPv4 address cannot be Multicast"); return -EINVAL; } } if (data[IFLA_GENEVE_REMOTE6]) { #if IS_ENABLED(CONFIG_IPV6) if (changelink && (ip_tunnel_info_af(info) == AF_INET)) { attrtype = IFLA_GENEVE_REMOTE6; goto change_notsup; } info->mode = IP_TUNNEL_INFO_IPV6; info->key.u.ipv6.dst = nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]); if (ipv6_addr_type(&info->key.u.ipv6.dst) & IPV6_ADDR_LINKLOCAL) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], "Remote IPv6 address cannot be link-local"); return -EINVAL; } if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], "Remote IPv6 address cannot be Multicast"); return -EINVAL; } __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); cfg->use_udp6_rx_checksums = true; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } if (data[IFLA_GENEVE_ID]) { __u32 vni; __u8 tvni[3]; __be64 tunid; vni = nla_get_u32(data[IFLA_GENEVE_ID]); tvni[0] = (vni & 0x00ff0000) >> 16; tvni[1] = (vni & 0x0000ff00) >> 8; tvni[2] = vni & 0x000000ff; tunid = vni_to_tunnel_id(tvni); if (changelink && (tunid != info->key.tun_id)) { attrtype = IFLA_GENEVE_ID; goto change_notsup; } info->key.tun_id = tunid; } if (data[IFLA_GENEVE_TTL_INHERIT]) { if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT])) cfg->ttl_inherit = true; else cfg->ttl_inherit = false; } else if (data[IFLA_GENEVE_TTL]) { info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); cfg->ttl_inherit = false; } if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_DF]) cfg->df = nla_get_u8(data[IFLA_GENEVE_DF]); if (data[IFLA_GENEVE_LABEL]) { info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_LABEL], "Label attribute only applies for IPv6 Geneve devices"); return -EINVAL; } } if (data[IFLA_GENEVE_PORT]) { if (changelink) { attrtype = IFLA_GENEVE_PORT; goto change_notsup; } info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); } if (data[IFLA_GENEVE_PORT_RANGE]) { const struct ifla_geneve_port_range *p; if (changelink) { attrtype = IFLA_GENEVE_PORT_RANGE; goto change_notsup; } p = nla_data(data[IFLA_GENEVE_PORT_RANGE]); cfg->port_min = ntohs(p->low); cfg->port_max = ntohs(p->high); } if (data[IFLA_GENEVE_COLLECT_METADATA]) { if (changelink) { attrtype = IFLA_GENEVE_COLLECT_METADATA; goto change_notsup; } cfg->collect_md = true; } if (data[IFLA_GENEVE_UDP_CSUM]) { if (changelink) { attrtype = IFLA_GENEVE_UDP_CSUM; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { #if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) __clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { #if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) cfg->use_udp6_rx_checksums = false; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) { if (changelink) { attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT; goto change_notsup; } cfg->inner_proto_inherit = true; } return 0; change_notsup: NL_SET_ERR_MSG_ATTR(extack, data[attrtype], "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported"); return -EOPNOTSUPP; } static void geneve_link_config(struct net_device *dev, struct ip_tunnel_info *info, struct nlattr *tb[]) { struct geneve_dev *geneve = netdev_priv(dev); int ldev_mtu = 0; if (tb[IFLA_MTU]) { geneve_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); return; } switch (ip_tunnel_info_af(info)) { case AF_INET: { struct flowi4 fl4 = { .daddr = info->key.u.ipv4.dst }; struct rtable *rt = ip_route_output_key(geneve->net, &fl4); if (!IS_ERR(rt) && rt->dst.dev) { ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV4_HLEN; ip_rt_put(rt); } break; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { struct rt6_info *rt; if (!__in6_dev_get(dev)) break; rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, NULL, 0); if (rt && rt->dst.dev) ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; ip6_rt_put(rt); break; } #endif } if (ldev_mtu <= 0) return; geneve_change_mtu(dev, ldev_mtu - info->options_len); } static int geneve_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net *link_net = rtnl_newlink_link_net(params); struct nlattr **data = params->data; struct nlattr **tb = params->tb; struct geneve_config cfg = { .df = GENEVE_DF_UNSET, .use_udp6_rx_checksums = false, .ttl_inherit = false, .collect_md = false, .port_min = 1, .port_max = USHRT_MAX, }; int err; init_tnl_info(&cfg.info, GENEVE_UDP_PORT); err = geneve_nl2info(tb, data, extack, &cfg, false); if (err) return err; err = geneve_configure(link_net, dev, extack, &cfg); if (err) return err; geneve_link_config(dev, &cfg.info, tb); return 0; } /* Quiesces the geneve device data path for both TX and RX. * * On transmit geneve checks for non-NULL geneve_sock before it proceeds. * So, if we set that socket to NULL under RCU and wait for synchronize_net() * to complete for the existing set of in-flight packets to be transmitted, * then we would have quiesced the transmit data path. All the future packets * will get dropped until we unquiesce the data path. * * On receive geneve dereference the geneve_sock stashed in the socket. So, * if we set that to NULL under RCU and wait for synchronize_net() to * complete, then we would have quiesced the receive data path. */ static void geneve_quiesce(struct geneve_dev *geneve, struct geneve_sock **gs4, struct geneve_sock **gs6) { *gs4 = rtnl_dereference(geneve->sock4); rcu_assign_pointer(geneve->sock4, NULL); if (*gs4) rcu_assign_sk_user_data((*gs4)->sock->sk, NULL); #if IS_ENABLED(CONFIG_IPV6) *gs6 = rtnl_dereference(geneve->sock6); rcu_assign_pointer(geneve->sock6, NULL); if (*gs6) rcu_assign_sk_user_data((*gs6)->sock->sk, NULL); #else *gs6 = NULL; #endif synchronize_net(); } /* Resumes the geneve device data path for both TX and RX. */ static void geneve_unquiesce(struct geneve_dev *geneve, struct geneve_sock *gs4, struct geneve_sock __maybe_unused *gs6) { rcu_assign_pointer(geneve->sock4, gs4); if (gs4) rcu_assign_sk_user_data(gs4->sock->sk, gs4); #if IS_ENABLED(CONFIG_IPV6) rcu_assign_pointer(geneve->sock6, gs6); if (gs6) rcu_assign_sk_user_data(gs6->sock->sk, gs6); #endif synchronize_net(); } static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs4, *gs6; struct geneve_config cfg; int err; /* If the geneve device is configured for metadata (or externally * controlled, for example, OVS), then nothing can be changed. */ if (geneve->cfg.collect_md) return -EOPNOTSUPP; /* Start with the existing info. */ memcpy(&cfg, &geneve->cfg, sizeof(cfg)); err = geneve_nl2info(tb, data, extack, &cfg, true); if (err) return err; if (!geneve_dst_addr_equal(&geneve->cfg.info, &cfg.info)) { dst_cache_reset(&cfg.info.dst_cache); geneve_link_config(dev, &cfg.info, tb); } geneve_quiesce(geneve, &gs4, &gs6); memcpy(&geneve->cfg, &cfg, sizeof(cfg)); geneve_unquiesce(geneve, gs4, gs6); return 0; } static void geneve_dellink(struct net_device *dev, struct list_head *head) { struct geneve_dev *geneve = netdev_priv(dev); list_del(&geneve->next); unregister_netdevice_queue(dev, head); } static size_t geneve_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */ nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */ nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */ nla_total_size(sizeof(struct ifla_geneve_port_range)) + /* IFLA_GENEVE_PORT_RANGE */ 0; } static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = &geneve->cfg.info; bool ttl_inherit = geneve->cfg.ttl_inherit; bool metadata = geneve->cfg.collect_md; struct ifla_geneve_port_range ports = { .low = htons(geneve->cfg.port_min), .high = htons(geneve->cfg.port_max), }; __u8 tmp_vni[3]; __u32 vni; tunnel_id_to_vni(info->key.tun_id, tmp_vni); vni = (tmp_vni[0] << 16) | (tmp_vni[1] << 8) | tmp_vni[2]; if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) goto nla_put_failure; if (!metadata && ip_tunnel_info_af(info) == AF_INET) { if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, info->key.u.ipv4.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) } else if (!metadata) { if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, &info->key.u.ipv6.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))) goto nla_put_failure; #endif } if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->cfg.df)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) goto nla_put_failure; if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, !geneve->cfg.use_udp6_rx_checksums)) goto nla_put_failure; #endif if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit)) goto nla_put_failure; if (geneve->cfg.inner_proto_inherit && nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT)) goto nla_put_failure; if (nla_put(skb, IFLA_GENEVE_PORT_RANGE, sizeof(ports), &ports)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops geneve_link_ops __read_mostly = { .kind = "geneve", .maxtype = IFLA_GENEVE_MAX, .policy = geneve_policy, .priv_size = sizeof(struct geneve_dev), .setup = geneve_setup, .validate = geneve_validate, .newlink = geneve_newlink, .changelink = geneve_changelink, .dellink = geneve_dellink, .get_size = geneve_get_size, .fill_info = geneve_fill_info, }; struct net_device *geneve_dev_create_fb(struct net *net, const char *name, u8 name_assign_type, u16 dst_port) { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; LIST_HEAD(list_kill); int err; struct geneve_config cfg = { .df = GENEVE_DF_UNSET, .use_udp6_rx_checksums = true, .ttl_inherit = false, .collect_md = true, .port_min = 1, .port_max = USHRT_MAX, }; memset(tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, &geneve_link_ops, tb, NULL); if (IS_ERR(dev)) return dev; init_tnl_info(&cfg.info, dst_port); err = geneve_configure(net, dev, NULL, &cfg); if (err) { free_netdev(dev); return ERR_PTR(err); } /* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. */ err = geneve_change_mtu(dev, IP_MAX_MTU); if (err) goto err; err = rtnl_configure_link(dev, NULL, 0, NULL); if (err < 0) goto err; return dev; err: geneve_dellink(dev, &list_kill); unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); static int geneve_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) geneve_offload_rx_ports(dev, true); else if (event == NETDEV_UDP_TUNNEL_DROP_INFO) geneve_offload_rx_ports(dev, false); return NOTIFY_DONE; } static struct notifier_block geneve_notifier_block __read_mostly = { .notifier_call = geneve_netdevice_event, }; static __net_init int geneve_init_net(struct net *net) { struct geneve_net *gn = net_generic(net, geneve_net_id); INIT_LIST_HEAD(&gn->geneve_list); INIT_LIST_HEAD(&gn->sock_list); return 0; } static void geneve_destroy_tunnels(struct net *net, struct list_head *head) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *geneve, *next; list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) geneve_dellink(geneve->dev, head); } static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list, struct list_head *dev_to_kill) { struct net *net; list_for_each_entry(net, net_list, exit_list) geneve_destroy_tunnels(net, dev_to_kill); } static void __net_exit geneve_exit_net(struct net *net) { const struct geneve_net *gn = net_generic(net, geneve_net_id); WARN_ON_ONCE(!list_empty(&gn->sock_list)); } static struct pernet_operations geneve_net_ops = { .init = geneve_init_net, .exit_batch_rtnl = geneve_exit_batch_rtnl, .exit = geneve_exit_net, .id = &geneve_net_id, .size = sizeof(struct geneve_net), }; static int __init geneve_init_module(void) { int rc; rc = register_pernet_subsys(&geneve_net_ops); if (rc) goto out1; rc = register_netdevice_notifier(&geneve_notifier_block); if (rc) goto out2; rc = rtnl_link_register(&geneve_link_ops); if (rc) goto out3; return 0; out3: unregister_netdevice_notifier(&geneve_notifier_block); out2: unregister_pernet_subsys(&geneve_net_ops); out1: return rc; } late_initcall(geneve_init_module); static void __exit geneve_cleanup_module(void) { rtnl_link_unregister(&geneve_link_ops); unregister_netdevice_notifier(&geneve_notifier_block); unregister_pernet_subsys(&geneve_net_ops); } module_exit(geneve_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(GENEVE_NETDEV_VER); MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("geneve");
18 18 18 18 18 18 18 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 /* * Copyright (c) 2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/random.h> #include <rdma/ib_cache.h> #include "sa.h" static int mcast_add_one(struct ib_device *device); static void mcast_remove_one(struct ib_device *device, void *client_data); static struct ib_client mcast_client = { .name = "ib_multicast", .add = mcast_add_one, .remove = mcast_remove_one }; static struct ib_sa_client sa_client; static struct workqueue_struct *mcast_wq; static union ib_gid mgid0; struct mcast_device; struct mcast_port { struct mcast_device *dev; spinlock_t lock; struct rb_root table; refcount_t refcount; struct completion comp; u32 port_num; }; struct mcast_device { struct ib_device *device; struct ib_event_handler event_handler; int start_port; int end_port; struct mcast_port port[]; }; enum mcast_state { MCAST_JOINING, MCAST_MEMBER, MCAST_ERROR, }; enum mcast_group_state { MCAST_IDLE, MCAST_BUSY, MCAST_GROUP_ERROR, MCAST_PKEY_EVENT }; enum { MCAST_INVALID_PKEY_INDEX = 0xFFFF }; struct mcast_member; struct mcast_group { struct ib_sa_mcmember_rec rec; struct rb_node node; struct mcast_port *port; spinlock_t lock; struct work_struct work; struct list_head pending_list; struct list_head active_list; struct mcast_member *last_join; int members[NUM_JOIN_MEMBERSHIP_TYPES]; atomic_t refcount; enum mcast_group_state state; struct ib_sa_query *query; u16 pkey_index; u8 leave_state; int retries; }; struct mcast_member { struct ib_sa_multicast multicast; struct ib_sa_client *client; struct mcast_group *group; struct list_head list; enum mcast_state state; refcount_t refcount; struct completion comp; }; static void join_handler(int status, struct ib_sa_mcmember_rec *rec, void *context); static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, void *context); static struct mcast_group *mcast_find(struct mcast_port *port, union ib_gid *mgid) { struct rb_node *node = port->table.rb_node; struct mcast_group *group; int ret; while (node) { group = rb_entry(node, struct mcast_group, node); ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); if (!ret) return group; if (ret < 0) node = node->rb_left; else node = node->rb_right; } return NULL; } static struct mcast_group *mcast_insert(struct mcast_port *port, struct mcast_group *group, int allow_duplicates) { struct rb_node **link = &port->table.rb_node; struct rb_node *parent = NULL; struct mcast_group *cur_group; int ret; while (*link) { parent = *link; cur_group = rb_entry(parent, struct mcast_group, node); ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, sizeof group->rec.mgid); if (ret < 0) link = &(*link)->rb_left; else if (ret > 0) link = &(*link)->rb_right; else if (allow_duplicates) link = &(*link)->rb_left; else return cur_group; } rb_link_node(&group->node, parent, link); rb_insert_color(&group->node, &port->table); return NULL; } static void deref_port(struct mcast_port *port) { if (refcount_dec_and_test(&port->refcount)) complete(&port->comp); } static void release_group(struct mcast_group *group) { struct mcast_port *port = group->port; unsigned long flags; spin_lock_irqsave(&port->lock, flags); if (atomic_dec_and_test(&group->refcount)) { rb_erase(&group->node, &port->table); spin_unlock_irqrestore(&port->lock, flags); kfree(group); deref_port(port); } else spin_unlock_irqrestore(&port->lock, flags); } static void deref_member(struct mcast_member *member) { if (refcount_dec_and_test(&member->refcount)) complete(&member->comp); } static void queue_join(struct mcast_member *member) { struct mcast_group *group = member->group; unsigned long flags; spin_lock_irqsave(&group->lock, flags); list_add_tail(&member->list, &group->pending_list); if (group->state == MCAST_IDLE) { group->state = MCAST_BUSY; atomic_inc(&group->refcount); queue_work(mcast_wq, &group->work); } spin_unlock_irqrestore(&group->lock, flags); } /* * A multicast group has four types of members: full member, non member, * sendonly non member and sendonly full member. * We need to keep track of the number of members of each * type based on their join state. Adjust the number of members the belong to * the specified join states. */ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) { int i; for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1) if (join_state & 0x1) group->members[i] += inc; } /* * If a multicast group has zero members left for a particular join state, but * the group is still a member with the SA, we need to leave that join state. * Determine which join states we still belong to, but that do not have any * active members. */ static u8 get_leave_state(struct mcast_group *group) { u8 leave_state = 0; int i; for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++) if (!group->members[i]) leave_state |= (0x1 << i); return leave_state & group->rec.join_state; } static int check_selector(ib_sa_comp_mask comp_mask, ib_sa_comp_mask selector_mask, ib_sa_comp_mask value_mask, u8 selector, u8 src_value, u8 dst_value) { int err; if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) return 0; switch (selector) { case IB_SA_GT: err = (src_value <= dst_value); break; case IB_SA_LT: err = (src_value >= dst_value); break; case IB_SA_EQ: err = (src_value != dst_value); break; default: err = 0; break; } return err; } static int cmp_rec(struct ib_sa_mcmember_rec *src, struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask) { /* MGID must already match */ if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID && memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid)) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) return -EINVAL; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector, src->mtu, dst->mtu)) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && src->traffic_class != dst->traffic_class) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) return -EINVAL; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, IB_SA_MCMEMBER_REC_RATE, dst->rate_selector, src->rate, dst->rate)) return -EINVAL; if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, dst->packet_life_time_selector, src->packet_life_time, dst->packet_life_time)) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && src->flow_label != dst->flow_label) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && src->hop_limit != dst->hop_limit) return -EINVAL; if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope) return -EINVAL; /* join_state checked separately, proxy_join ignored */ return 0; } static int send_join(struct mcast_group *group, struct mcast_member *member) { struct mcast_port *port = group->port; int ret; group->last_join = member; ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, port->port_num, IB_MGMT_METHOD_SET, &member->multicast.rec, member->multicast.comp_mask, 3000, GFP_KERNEL, join_handler, group, &group->query); return (ret > 0) ? 0 : ret; } static int send_leave(struct mcast_group *group, u8 leave_state) { struct mcast_port *port = group->port; struct ib_sa_mcmember_rec rec; int ret; rec = group->rec; rec.join_state = leave_state; group->leave_state = leave_state; ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, port->port_num, IB_SA_METHOD_DELETE, &rec, IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE, 3000, GFP_KERNEL, leave_handler, group, &group->query); return (ret > 0) ? 0 : ret; } static void join_group(struct mcast_group *group, struct mcast_member *member, u8 join_state) { member->state = MCAST_MEMBER; adjust_membership(group, join_state, 1); group->rec.join_state |= join_state; member->multicast.rec = group->rec; member->multicast.rec.join_state = join_state; list_move(&member->list, &group->active_list); } static int fail_join(struct mcast_group *group, struct mcast_member *member, int status) { spin_lock_irq(&group->lock); list_del_init(&member->list); spin_unlock_irq(&group->lock); return member->multicast.callback(status, &member->multicast); } static void process_group_error(struct mcast_group *group) { struct mcast_member *member; int ret = 0; u16 pkey_index; if (group->state == MCAST_PKEY_EVENT) ret = ib_find_pkey(group->port->dev->device, group->port->port_num, be16_to_cpu(group->rec.pkey), &pkey_index); spin_lock_irq(&group->lock); if (group->state == MCAST_PKEY_EVENT && !ret && group->pkey_index == pkey_index) goto out; while (!list_empty(&group->active_list)) { member = list_entry(group->active_list.next, struct mcast_member, list); refcount_inc(&member->refcount); list_del_init(&member->list); adjust_membership(group, member->multicast.rec.join_state, -1); member->state = MCAST_ERROR; spin_unlock_irq(&group->lock); ret = member->multicast.callback(-ENETRESET, &member->multicast); deref_member(member); if (ret) ib_sa_free_multicast(&member->multicast); spin_lock_irq(&group->lock); } group->rec.join_state = 0; out: group->state = MCAST_BUSY; spin_unlock_irq(&group->lock); } static void mcast_work_handler(struct work_struct *work) { struct mcast_group *group; struct mcast_member *member; struct ib_sa_multicast *multicast; int status, ret; u8 join_state; group = container_of(work, typeof(*group), work); retest: spin_lock_irq(&group->lock); while (!list_empty(&group->pending_list) || (group->state != MCAST_BUSY)) { if (group->state != MCAST_BUSY) { spin_unlock_irq(&group->lock); process_group_error(group); goto retest; } member = list_entry(group->pending_list.next, struct mcast_member, list); multicast = &member->multicast; join_state = multicast->rec.join_state; refcount_inc(&member->refcount); if (join_state == (group->rec.join_state & join_state)) { status = cmp_rec(&group->rec, &multicast->rec, multicast->comp_mask); if (!status) join_group(group, member, join_state); else list_del_init(&member->list); spin_unlock_irq(&group->lock); ret = multicast->callback(status, multicast); } else { spin_unlock_irq(&group->lock); status = send_join(group, member); if (!status) { deref_member(member); return; } ret = fail_join(group, member, status); } deref_member(member); if (ret) ib_sa_free_multicast(&member->multicast); spin_lock_irq(&group->lock); } join_state = get_leave_state(group); if (join_state) { group->rec.join_state &= ~join_state; spin_unlock_irq(&group->lock); if (send_leave(group, join_state)) goto retest; } else { group->state = MCAST_IDLE; spin_unlock_irq(&group->lock); release_group(group); } } /* * Fail a join request if it is still active - at the head of the pending queue. */ static void process_join_error(struct mcast_group *group, int status) { struct mcast_member *member; int ret; spin_lock_irq(&group->lock); member = list_entry(group->pending_list.next, struct mcast_member, list); if (group->last_join == member) { refcount_inc(&member->refcount); list_del_init(&member->list); spin_unlock_irq(&group->lock); ret = member->multicast.callback(status, &member->multicast); deref_member(member); if (ret) ib_sa_free_multicast(&member->multicast); } else spin_unlock_irq(&group->lock); } static void join_handler(int status, struct ib_sa_mcmember_rec *rec, void *context) { struct mcast_group *group = context; u16 pkey_index = MCAST_INVALID_PKEY_INDEX; if (status) process_join_error(group, status); else { int mgids_changed, is_mgid0; if (ib_find_pkey(group->port->dev->device, group->port->port_num, be16_to_cpu(rec->pkey), &pkey_index)) pkey_index = MCAST_INVALID_PKEY_INDEX; spin_lock_irq(&group->port->lock); if (group->state == MCAST_BUSY && group->pkey_index == MCAST_INVALID_PKEY_INDEX) group->pkey_index = pkey_index; mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, sizeof(group->rec.mgid)); group->rec = *rec; if (mgids_changed) { rb_erase(&group->node, &group->port->table); is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, sizeof(mgid0)); mcast_insert(group->port, group, is_mgid0); } spin_unlock_irq(&group->port->lock); } mcast_work_handler(&group->work); } static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, void *context) { struct mcast_group *group = context; if (status && group->retries > 0 && !send_leave(group, group->leave_state)) group->retries--; else mcast_work_handler(&group->work); } static struct mcast_group *acquire_group(struct mcast_port *port, union ib_gid *mgid, gfp_t gfp_mask) { struct mcast_group *group, *cur_group; unsigned long flags; int is_mgid0; is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); if (!is_mgid0) { spin_lock_irqsave(&port->lock, flags); group = mcast_find(port, mgid); if (group) goto found; spin_unlock_irqrestore(&port->lock, flags); } group = kzalloc(sizeof *group, gfp_mask); if (!group) return NULL; group->retries = 3; group->port = port; group->rec.mgid = *mgid; group->pkey_index = MCAST_INVALID_PKEY_INDEX; INIT_LIST_HEAD(&group->pending_list); INIT_LIST_HEAD(&group->active_list); INIT_WORK(&group->work, mcast_work_handler); spin_lock_init(&group->lock); spin_lock_irqsave(&port->lock, flags); cur_group = mcast_insert(port, group, is_mgid0); if (cur_group) { kfree(group); group = cur_group; } else refcount_inc(&port->refcount); found: atomic_inc(&group->refcount); spin_unlock_irqrestore(&port->lock, flags); return group; } /* * We serialize all join requests to a single group to make our lives much * easier. Otherwise, two users could try to join the same group * simultaneously, with different configurations, one could leave while the * join is in progress, etc., which makes locking around error recovery * difficult. */ struct ib_sa_multicast * ib_sa_join_multicast(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, gfp_t gfp_mask, int (*callback)(int status, struct ib_sa_multicast *multicast), void *context) { struct mcast_device *dev; struct mcast_member *member; struct ib_sa_multicast *multicast; int ret; dev = ib_get_client_data(device, &mcast_client); if (!dev) return ERR_PTR(-ENODEV); member = kmalloc(sizeof *member, gfp_mask); if (!member) return ERR_PTR(-ENOMEM); ib_sa_client_get(client); member->client = client; member->multicast.rec = *rec; member->multicast.comp_mask = comp_mask; member->multicast.callback = callback; member->multicast.context = context; init_completion(&member->comp); refcount_set(&member->refcount, 1); member->state = MCAST_JOINING; member->group = acquire_group(&dev->port[port_num - dev->start_port], &rec->mgid, gfp_mask); if (!member->group) { ret = -ENOMEM; goto err; } /* * The user will get the multicast structure in their callback. They * could then free the multicast structure before we can return from * this routine. So we save the pointer to return before queuing * any callback. */ multicast = &member->multicast; queue_join(member); return multicast; err: ib_sa_client_put(client); kfree(member); return ERR_PTR(ret); } EXPORT_SYMBOL(ib_sa_join_multicast); void ib_sa_free_multicast(struct ib_sa_multicast *multicast) { struct mcast_member *member; struct mcast_group *group; member = container_of(multicast, struct mcast_member, multicast); group = member->group; spin_lock_irq(&group->lock); if (member->state == MCAST_MEMBER) adjust_membership(group, multicast->rec.join_state, -1); list_del_init(&member->list); if (group->state == MCAST_IDLE) { group->state = MCAST_BUSY; spin_unlock_irq(&group->lock); /* Continue to hold reference on group until callback */ queue_work(mcast_wq, &group->work); } else { spin_unlock_irq(&group->lock); release_group(group); } deref_member(member); wait_for_completion(&member->comp); ib_sa_client_put(member->client); kfree(member); } EXPORT_SYMBOL(ib_sa_free_multicast); int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num, union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) { struct mcast_device *dev; struct mcast_port *port; struct mcast_group *group; unsigned long flags; int ret = 0; dev = ib_get_client_data(device, &mcast_client); if (!dev) return -ENODEV; port = &dev->port[port_num - dev->start_port]; spin_lock_irqsave(&port->lock, flags); group = mcast_find(port, mgid); if (group) *rec = group->rec; else ret = -EADDRNOTAVAIL; spin_unlock_irqrestore(&port->lock, flags); return ret; } EXPORT_SYMBOL(ib_sa_get_mcmember_rec); /** * ib_init_ah_from_mcmember - Initialize AH attribute from multicast * member record and gid of the device. * @device: RDMA device * @port_num: Port of the rdma device to consider * @rec: Multicast member record to use * @ndev: Optional netdevice, applicable only for RoCE * @gid_type: GID type to consider * @ah_attr: AH attribute to fillup on successful completion * * ib_init_ah_from_mcmember() initializes AH attribute based on multicast * member record and other device properties. On success the caller is * responsible to call rdma_destroy_ah_attr on the ah_attr. Returns 0 on * success or appropriate error code. * */ int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, struct net_device *ndev, enum ib_gid_type gid_type, struct rdma_ah_attr *ah_attr) { const struct ib_gid_attr *sgid_attr; /* GID table is not based on the netdevice for IB link layer, * so ignore ndev during search. */ if (rdma_protocol_ib(device, port_num)) ndev = NULL; else if (!rdma_protocol_roce(device, port_num)) return -EINVAL; sgid_attr = rdma_find_gid_by_port(device, &rec->port_gid, gid_type, port_num, ndev); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr); memset(ah_attr, 0, sizeof(*ah_attr)); ah_attr->type = rdma_ah_find_type(device, port_num); rdma_ah_set_dlid(ah_attr, be16_to_cpu(rec->mlid)); rdma_ah_set_sl(ah_attr, rec->sl); rdma_ah_set_port_num(ah_attr, port_num); rdma_ah_set_static_rate(ah_attr, rec->rate); rdma_move_grh_sgid_attr(ah_attr, &rec->mgid, be32_to_cpu(rec->flow_label), rec->hop_limit, rec->traffic_class, sgid_attr); return 0; } EXPORT_SYMBOL(ib_init_ah_from_mcmember); static void mcast_groups_event(struct mcast_port *port, enum mcast_group_state state) { struct mcast_group *group; struct rb_node *node; unsigned long flags; spin_lock_irqsave(&port->lock, flags); for (node = rb_first(&port->table); node; node = rb_next(node)) { group = rb_entry(node, struct mcast_group, node); spin_lock(&group->lock); if (group->state == MCAST_IDLE) { atomic_inc(&group->refcount); queue_work(mcast_wq, &group->work); } if (group->state != MCAST_GROUP_ERROR) group->state = state; spin_unlock(&group->lock); } spin_unlock_irqrestore(&port->lock, flags); } static void mcast_event_handler(struct ib_event_handler *handler, struct ib_event *event) { struct mcast_device *dev; int index; dev = container_of(handler, struct mcast_device, event_handler); if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) return; index = event->element.port_num - dev->start_port; switch (event->event) { case IB_EVENT_PORT_ERR: case IB_EVENT_LID_CHANGE: case IB_EVENT_CLIENT_REREGISTER: mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR); break; case IB_EVENT_PKEY_CHANGE: mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT); break; default: break; } } static int mcast_add_one(struct ib_device *device) { struct mcast_device *dev; struct mcast_port *port; int i; int count = 0; dev = kmalloc(struct_size(dev, port, device->phys_port_cnt), GFP_KERNEL); if (!dev) return -ENOMEM; dev->start_port = rdma_start_port(device); dev->end_port = rdma_end_port(device); for (i = 0; i <= dev->end_port - dev->start_port; i++) { if (!rdma_cap_ib_mcast(device, dev->start_port + i)) continue; port = &dev->port[i]; port->dev = dev; port->port_num = dev->start_port + i; spin_lock_init(&port->lock); port->table = RB_ROOT; init_completion(&port->comp); refcount_set(&port->refcount, 1); ++count; } if (!count) { kfree(dev); return -EOPNOTSUPP; } dev->device = device; ib_set_client_data(device, &mcast_client, dev); INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); ib_register_event_handler(&dev->event_handler); return 0; } static void mcast_remove_one(struct ib_device *device, void *client_data) { struct mcast_device *dev = client_data; struct mcast_port *port; int i; ib_unregister_event_handler(&dev->event_handler); flush_workqueue(mcast_wq); for (i = 0; i <= dev->end_port - dev->start_port; i++) { if (rdma_cap_ib_mcast(device, dev->start_port + i)) { port = &dev->port[i]; deref_port(port); wait_for_completion(&port->comp); } } kfree(dev); } int mcast_init(void) { int ret; mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM); if (!mcast_wq) return -ENOMEM; ib_sa_register_client(&sa_client); ret = ib_register_client(&mcast_client); if (ret) goto err; return 0; err: ib_sa_unregister_client(&sa_client); destroy_workqueue(mcast_wq); return ret; } void mcast_cleanup(void) { ib_unregister_client(&mcast_client); ib_sa_unregister_client(&sa_client); destroy_workqueue(mcast_wq); }
4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 // SPDX-License-Identifier: GPL-2.0-or-later /* * xfrm4_output.c - Common IPsec encapsulation code for IPv4. * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au> */ #include <linux/if_ether.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter_ipv4.h> #include <net/dst.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/icmp.h> static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_NETFILTER struct xfrm_state *x = skb_dst(skb)->xfrm; if (!x) { IPCB(skb)->flags |= IPSKB_REROUTED; return dst_output(net, sk, skb); } #endif return xfrm_output(sk, skb); } int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) { return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, skb->dev, skb_dst(skb)->dev, __xfrm4_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); } void xfrm4_local_error(struct sk_buff *skb, u32 mtu) { struct iphdr *hdr; hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, inet_sk(skb->sk)->inet_dport, mtu); }
5526 3525 5450 77 78 78 1 1 1 77 78 78 1 1 29 22 22 23 10 50 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Fast and scalable bitmaps. * * Copyright (C) 2016 Facebook * Copyright (C) 2013-2014 Jens Axboe */ #ifndef __LINUX_SCALE_BITMAP_H #define __LINUX_SCALE_BITMAP_H #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/cache.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/minmax.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/types.h> #include <linux/wait.h> struct seq_file; /** * struct sbitmap_word - Word in a &struct sbitmap. */ struct sbitmap_word { /** * @word: word holding free bits */ unsigned long word; /** * @cleared: word holding cleared bits */ unsigned long cleared ____cacheline_aligned_in_smp; /** * @swap_lock: serializes simultaneous updates of ->word and ->cleared */ raw_spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** * struct sbitmap - Scalable bitmap. * * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This * trades off higher memory usage for better scalability. */ struct sbitmap { /** * @depth: Number of bits used in the whole bitmap. */ unsigned int depth; /** * @shift: log2(number of bits used per word) */ unsigned int shift; /** * @map_nr: Number of words (cachelines) being used for the bitmap. */ unsigned int map_nr; /** * @round_robin: Allocate bits in strict round-robin order. */ bool round_robin; /** * @map: Allocated bitmap. */ struct sbitmap_word *map; /* * @alloc_hint: Cache of last successfully allocated or freed bit. * * This is per-cpu, which allows multiple users to stick to different * cachelines until the map is exhausted. */ unsigned int __percpu *alloc_hint; }; #define SBQ_WAIT_QUEUES 8 #define SBQ_WAKE_BATCH 8 /** * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue. */ struct sbq_wait_state { /** * @wait: Wait queue. */ wait_queue_head_t wait; } ____cacheline_aligned_in_smp; /** * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free * bits. * * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to * avoid contention on the wait queue spinlock. This ensures that we don't hit a * scalability wall when we run out of free bits and have to start putting tasks * to sleep. */ struct sbitmap_queue { /** * @sb: Scalable bitmap. */ struct sbitmap sb; /** * @wake_batch: Number of bits which must be freed before we wake up any * waiters. */ unsigned int wake_batch; /** * @wake_index: Next wait queue in @ws to wake up. */ atomic_t wake_index; /** * @ws: Wait queues. */ struct sbq_wait_state *ws; /* * @ws_active: count of currently active ws waitqueues */ atomic_t ws_active; /** * @min_shallow_depth: The minimum shallow depth which may be passed to * sbitmap_queue_get_shallow() */ unsigned int min_shallow_depth; /** * @completion_cnt: Number of bits cleared passed to the * wakeup function. */ atomic_t completion_cnt; /** * @wakeup_cnt: Number of thread wake ups issued. */ atomic_t wakeup_cnt; }; /** * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node. * @sb: Bitmap to initialize. * @depth: Number of bits to allocate. * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if * given, a good default is chosen. * @flags: Allocation flags. * @node: Memory node to allocate on. * @round_robin: If true, be stricter about allocation order; always allocate * starting from the last allocated bit. This is less efficient * than the default behavior (false). * @alloc_hint: If true, apply percpu hint for where to start searching for * a free bit. * * Return: Zero on success or negative errno on failure. */ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, gfp_t flags, int node, bool round_robin, bool alloc_hint); /* sbitmap internal helper */ static inline unsigned int __map_depth(const struct sbitmap *sb, int index) { if (index == sb->map_nr - 1) return sb->depth - (index << sb->shift); return 1U << sb->shift; } /** * sbitmap_free() - Free memory used by a &struct sbitmap. * @sb: Bitmap to free. */ static inline void sbitmap_free(struct sbitmap *sb) { free_percpu(sb->alloc_hint); kvfree(sb->map); sb->map = NULL; } /** * sbitmap_resize() - Resize a &struct sbitmap. * @sb: Bitmap to resize. * @depth: New number of bits to resize to. * * Doesn't reallocate anything. It's up to the caller to ensure that the new * depth doesn't exceed the depth that the sb was initialized with. */ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); /** * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. * @sb: Bitmap to allocate from. * * This operation provides acquire barrier semantics if it succeeds. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get(struct sbitmap *sb); /** * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, * limiting the depth used from each word. * @sb: Bitmap to allocate from. * @shallow_depth: The maximum number of bits to allocate from a single word. * * This rather specific operation allows for having multiple users with * different allocation limits. E.g., there can be a high-priority class that * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority * class can only allocate half of the total bits in the bitmap, preventing it * from starving out the high-priority class. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); /** * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. * @sb: Bitmap to check. * * Return: true if any bit in the bitmap is set, false otherwise. */ bool sbitmap_any_bit_set(const struct sbitmap *sb); #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); /** * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. * @start: Where to start the iteration. * @sb: Bitmap to iterate over. * @fn: Callback. Should return true to continue or false to break early. * @data: Pointer to pass to callback. * * This is inline even though it's non-trivial so that the function calls to the * callback will hopefully get optimized away. */ static inline void __sbitmap_for_each_set(struct sbitmap *sb, unsigned int start, sb_for_each_fn fn, void *data) { unsigned int index; unsigned int nr; unsigned int scanned = 0; if (start >= sb->depth) start = 0; index = SB_NR_TO_INDEX(sb, start); nr = SB_NR_TO_BIT(sb, start); while (scanned < sb->depth) { unsigned long word; unsigned int depth = min_t(unsigned int, __map_depth(sb, index) - nr, sb->depth - scanned); scanned += depth; word = sb->map[index].word & ~sb->map[index].cleared; if (!word) goto next; /* * On the first iteration of the outer loop, we need to add the * bit offset back to the size of the word for find_next_bit(). * On all other iterations, nr is zero, so this is a noop. */ depth += nr; while (1) { nr = find_next_bit(&word, depth, nr); if (nr >= depth) break; if (!fn(sb, (index << sb->shift) + nr, data)) return; nr++; } next: nr = 0; if (++index >= sb->map_nr) index = 0; } } /** * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. * @sb: Bitmap to iterate over. * @fn: Callback. Should return true to continue or false to break early. * @data: Pointer to pass to callback. */ static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, void *data) { __sbitmap_for_each_set(sb, 0, fn, data); } static inline unsigned long *__sbitmap_word(struct sbitmap *sb, unsigned int bitnr) { return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; } /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */ static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr) { set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) { clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } /* * This one is special, since it doesn't actually clear the bit, rather it * sets the corresponding bit in the ->cleared mask instead. Paired with * the caller doing sbitmap_deferred_clear() if a given index is full, which * will clear the previously freed entries in the corresponding ->word. */ static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) { unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; set_bit(SB_NR_TO_BIT(sb, bitnr), addr); } /* * Pair of sbitmap_get, and this one applies both cleared bit and * allocation hint. */ static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr) { sbitmap_deferred_clear_bit(sb, bitnr); if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth)) *raw_cpu_ptr(sb->alloc_hint) = bitnr; } static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) { return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } static inline int sbitmap_calculate_shift(unsigned int depth) { int shift = ilog2(BITS_PER_LONG); /* * If the bitmap is small, shrink the number of bits per word so * we spread over a few cachelines, at least. If less than 4 * bits, just forget about it, it's not going to work optimally * anyway. */ if (depth >= 4) { while ((4U << shift) > depth) shift--; } return shift; } /** * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. * @sb: Bitmap to show. * @m: struct seq_file to write to. * * This is intended for debugging. The format may change at any time. */ void sbitmap_show(struct sbitmap *sb, struct seq_file *m); /** * sbitmap_weight() - Return how many set and not cleared bits in a &struct * sbitmap. * @sb: Bitmap to check. * * Return: How many set and not cleared bits set */ unsigned int sbitmap_weight(const struct sbitmap *sb); /** * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct * seq_file. * @sb: Bitmap to show. * @m: struct seq_file to write to. * * This is intended for debugging. The output isn't guaranteed to be internally * consistent. */ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); /** * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific * memory node. * @sbq: Bitmap queue to initialize. * @depth: See sbitmap_init_node(). * @shift: See sbitmap_init_node(). * @round_robin: See sbitmap_get(). * @flags: Allocation flags. * @node: Memory node to allocate on. * * Return: Zero on success or negative errno on failure. */ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, int shift, bool round_robin, gfp_t flags, int node); /** * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue. * * @sbq: Bitmap queue to free. */ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) { kfree(sbq->ws); sbitmap_free(&sbq->sb); } /** * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch * @sbq: Bitmap queue to recalculate wake batch. * @users: Number of shares. * * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch * by depth. This interface is for HCTX shared tags or queue shared tags. */ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, unsigned int users); /** * sbitmap_queue_resize() - Resize a &struct sbitmap_queue. * @sbq: Bitmap queue to resize. * @depth: New number of bits to resize to. * * Like sbitmap_resize(), this doesn't reallocate anything. It has to do * some extra work on the &struct sbitmap_queue, so it's not safe to just * resize the underlying &struct sbitmap. */ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); /** * __sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue with preemption already disabled. * @sbq: Bitmap queue to allocate from. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int __sbitmap_queue_get(struct sbitmap_queue *sbq); /** * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits * @sbq: Bitmap queue to allocate from. * @nr_tags: number of tags requested * @offset: offset to add to returned bits * * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is * a bit in the mask returned, and the caller must add @offset to the value to * get the absolute tag value. */ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, unsigned int *offset); /** * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue, limiting the depth used from each word, with preemption * already disabled. * @sbq: Bitmap queue to allocate from. * @shallow_depth: The maximum number of bits to allocate from a single word. * See sbitmap_get_shallow(). * * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after * initializing @sbq. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, unsigned int shallow_depth); /** * sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue. * @sbq: Bitmap queue to allocate from. * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to * sbitmap_queue_clear()). * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, unsigned int *cpu) { int nr; *cpu = get_cpu(); nr = __sbitmap_queue_get(sbq); put_cpu(); return nr; } /** * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the * minimum shallow depth that will be used. * @sbq: Bitmap queue in question. * @min_shallow_depth: The minimum shallow depth that will be passed to * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). * * sbitmap_queue_clear() batches wakeups as an optimization. The batch size * depends on the depth of the bitmap. Since the shallow allocation functions * effectively operate with a different depth, the shallow depth must be taken * into account when calculating the batch size. This function must be called * with the minimum shallow depth that will be used. Failure to do so can result * in missed wakeups. */ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, unsigned int min_shallow_depth); /** * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a * &struct sbitmap_queue. * @sbq: Bitmap to free from. * @nr: Bit number to free. * @cpu: CPU the bit was allocated on. */ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu); /** * sbitmap_queue_clear_batch() - Free a batch of allocated bits * &struct sbitmap_queue. * @sbq: Bitmap to free from. * @offset: offset for each tag in array * @tags: array of tags * @nr_tags: number of tags in array */ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, int *tags, int nr_tags); static inline int sbq_index_inc(int index) { return (index + 1) & (SBQ_WAIT_QUEUES - 1); } static inline void sbq_index_atomic_inc(atomic_t *index) { int old = atomic_read(index); int new = sbq_index_inc(old); atomic_cmpxchg(index, old, new); } /** * sbq_wait_ptr() - Get the next wait queue to use for a &struct * sbitmap_queue. * @sbq: Bitmap queue to wait on. * @wait_index: A counter per "user" of @sbq. */ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, atomic_t *wait_index) { struct sbq_wait_state *ws; ws = &sbq->ws[atomic_read(wait_index)]; sbq_index_atomic_inc(wait_index); return ws; } /** * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct * sbitmap_queue. * @sbq: Bitmap queue to wake up. */ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); /** * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue * on a &struct sbitmap_queue. * @sbq: Bitmap queue to wake up. * @nr: Number of bits cleared. */ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr); /** * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct * seq_file. * @sbq: Bitmap queue to show. * @m: struct seq_file to write to. * * This is intended for debugging. The format may change at any time. */ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); struct sbq_wait { struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ struct wait_queue_entry wait; }; #define DEFINE_SBQ_WAIT(name) \ struct sbq_wait name = { \ .sbq = NULL, \ .wait = { \ .private = current, \ .func = autoremove_wake_function, \ .entry = LIST_HEAD_INIT((name).wait.entry), \ } \ } /* * Wrapper around prepare_to_wait_exclusive(), which maintains some extra * internal state. */ void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait, int state); /* * Must be paired with sbitmap_prepare_to_wait(). */ void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait); /* * Wrapper around add_wait_queue(), which maintains some extra internal state */ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait); /* * Must be paired with sbitmap_add_wait_queue() */ void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); #endif /* __LINUX_SCALE_BITMAP_H */
7 22 22 21 22 7 22 7 7 7 7 7 7 7 1 7 7 7 1 1 1 1 7 7 17 22 22 1 22 22 4 21 21 17 17 17 15 17 17 17 17 17 22 22 6 1 1 1 18 4 22 22 15 10 7 1 6 7 7 5 5 5 7 21 29 29 29 9 1 2 2 16 1 6 2 1 4 3 2 1 1 30 30 30 30 30 30 30 29 29 30 23 23 1 1 22 22 22 22 22 2 21 5 22 22 17 17 17 2 1 1 16 1 1 1 17 5 4 5 6 17 12 12 12 29 17 17 17 17 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 // SPDX-License-Identifier: GPL-2.0+ // // em28xx-cards.c - driver for Empia EM2800/EM2820/2840 USB // video capture devices // // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> // Markus Rechberger <mrechberger@gmail.com> // Mauro Carvalho Chehab <mchehab@kernel.org> // Sascha Sommer <saschasommer@freenet.de> // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com> #include "em28xx.h" #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/usb.h> #include <media/tuner.h> #include <media/drv-intf/msp3400.h> #include <media/i2c/saa7115.h> #include <dt-bindings/media/tvp5150.h> #include <media/i2c/tvaudio.h> #include <media/tveeprom.h> #include <media/v4l2-common.h> #include <sound/ac97_codec.h> #define DRIVER_NAME "em28xx" static int tuner = -1; module_param(tuner, int, 0444); MODULE_PARM_DESC(tuner, "tuner type"); static unsigned int disable_ir; module_param(disable_ir, int, 0444); MODULE_PARM_DESC(disable_ir, "disable infrared remote support"); static unsigned int disable_usb_speed_check; module_param(disable_usb_speed_check, int, 0444); MODULE_PARM_DESC(disable_usb_speed_check, "override min bandwidth requirement of 480M bps"); static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U }; module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(card, "card type"); static int usb_xfer_mode = -1; module_param(usb_xfer_mode, int, 0444); MODULE_PARM_DESC(usb_xfer_mode, "USB transfer mode for frame data (-1 = auto, 0 = prefer isoc, 1 = prefer bulk)"); /* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */ static DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS); struct em28xx_hash_table { unsigned long hash; unsigned int model; unsigned int tuner; }; static void em28xx_pre_card_setup(struct em28xx *dev); /* * Reset sequences for analog/digital modes */ /* Reset for the most [analog] boards */ static const struct em28xx_reg_seq default_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Reset for the most [digital] boards */ static const struct em28xx_reg_seq default_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Board :Zolid Hybrid Tv Stick */ static struct em28xx_reg_seq zolid_tuner[] = { {EM2820_R08_GPIO_CTRL, 0xfd, 0xff, 100}, {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 100}, { -1, -1, -1, -1}, }; static struct em28xx_reg_seq zolid_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6a, 0xff, 100}, {EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 100}, {EM2880_R04_GPO, 0x04, 0xff, 100}, {EM2880_R04_GPO, 0x0c, 0xff, 100}, { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 analog */ static const struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = { {EM2820_R08_GPIO_CTRL, 0x2d, ~EM_GPIO_4, 10}, { 0x05, 0xff, 0x10, 10}, { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 digital */ static const struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x04, 0x0f, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, { -1, -1, -1, -1}, }; /* Board Hauppauge WinTV HVR 900 (R2) digital */ static const struct em28xx_reg_seq hauppauge_wintv_hvr_900R2_digital[] = { {EM2820_R08_GPIO_CTRL, 0x2e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x0c, 0x0f, 10}, { -1, -1, -1, -1}, }; /* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */ static const struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = { {EM2820_R08_GPIO_CTRL, 0x69, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Board - EM2882 Kworld 315U digital */ static const struct em28xx_reg_seq em2882_kworld_315u_digital[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 10}, {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x7e, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = { {EM2880_R04_GPO, 0x08, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq kworld_330u_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq kworld_330u_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, { -1, -1, -1, -1}, }; /* * Evga inDtube * GPIO0 - Enable digital power (s5h1409) - low to enable * GPIO1 - Enable analog power (tvp5150/emp202) - low to enable * GPIO4 - xc3028 reset * GOP3 - s5h1409 reset */ static const struct em28xx_reg_seq evga_indtube_analog[] = { {EM2820_R08_GPIO_CTRL, 0x79, 0xff, 60}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq evga_indtube_digital[] = { {EM2820_R08_GPIO_CTRL, 0x7a, 0xff, 1}, {EM2880_R04_GPO, 0x04, 0xff, 10}, {EM2880_R04_GPO, 0x0c, 0xff, 1}, { -1, -1, -1, -1}, }; /* * KWorld PlusTV 340U, UB435-Q and UB435-Q V2 (ATSC) GPIOs map: * EM_GPIO_0 - currently unknown * EM_GPIO_1 - LED disable/enable (1 = off, 0 = on) * EM_GPIO_2 - currently unknown * EM_GPIO_3 - currently unknown * EM_GPIO_4 - TDA18271HD/C1 tuner (1 = active, 0 = in reset) * EM_GPIO_5 - LGDT3304 ATSC/QAM demod (1 = active, 0 = in reset) * EM_GPIO_6 - currently unknown * EM_GPIO_7 - currently unknown */ static const struct em28xx_reg_seq kworld_a340_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq kworld_ub435q_v3_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100}, { -1, -1, -1, -1}, }; /* Pinnacle Hybrid Pro eb1a:2881 */ static const struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = { {EM2820_R08_GPIO_CTRL, 0xfd, ~EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x04, 0xff, 100},/* zl10353 reset */ {EM2880_R04_GPO, 0x0c, 0xff, 1}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = { {EM2820_R08_GPIO_CTRL, 0x6d, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x00, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, { -1, -1, -1, -1}, }; /* * PCTV HD Mini (80e) GPIOs * 0-5: not used * 6: demod reset, active low * 7: LED on, active high */ static const struct em28xx_reg_seq em2874_pctv_80e_digital[] = { {EM28XX_R06_I2C_CLK, 0x45, 0xff, 10}, /*400 KHz*/ {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 100},/*Demod reset*/ {EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 10}, { -1, -1, -1, -1}, }; /* * eb1a:2868 Reddo DVB-C USB TV Box * GPIO4 - CU1216L NIM * Other GPIOs seems to be don't care. */ static const struct em28xx_reg_seq reddo_dvb_c_usb_box[] = { {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xde, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x7f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x6f, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, { -1, -1, -1, -1}, }; /* Callback for the most boards */ static const struct em28xx_reg_seq default_tuner_gpio[] = { {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, 0, EM_GPIO_4, 10}, {EM2820_R08_GPIO_CTRL, EM_GPIO_4, EM_GPIO_4, 10}, { -1, -1, -1, -1}, }; /* Mute/unmute */ static const struct em28xx_reg_seq compro_unmute_tv_gpio[] = { {EM2820_R08_GPIO_CTRL, 5, 7, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq compro_unmute_svid_gpio[] = { {EM2820_R08_GPIO_CTRL, 4, 7, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq compro_mute_gpio[] = { {EM2820_R08_GPIO_CTRL, 6, 7, 10}, { -1, -1, -1, -1}, }; /* Terratec AV350 */ static const struct em28xx_reg_seq terratec_av350_mute_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0x7f, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_av350_unmute_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq silvercrest_reg_seq[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2820_R08_GPIO_CTRL, 0x01, 0xf7, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq vc211a_enable[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0x07, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0x0f, 10}, {EM2820_R08_GPIO_CTRL, 0xff, 0x0b, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq dikom_dk300_digital[] = { {EM2820_R08_GPIO_CTRL, 0x6e, ~EM_GPIO_4, 10}, {EM2880_R04_GPO, 0x08, 0xff, 10}, { -1, -1, -1, -1}, }; /* Reset for the most [digital] boards */ static const struct em28xx_reg_seq leadership_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq leadership_reset[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xff, 10}, { -1, -1, -1, -1}, }; /* * 2013:024f PCTV nanoStick T2 290e * GPIO_6 - demod reset * GPIO_7 - LED */ static const struct em28xx_reg_seq pctv_290e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 80}, {EM2874_R80_GPIO_P0_CTRL, 0x40, 0xff, 80}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0xc0, 0xff, 80}, /* GPIO_7 = 1 */ { -1, -1, -1, -1}, }; #if 0 static const struct em28xx_reg_seq terratec_h5_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_h5_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, { -1, -1, -1, -1}, }; #endif /* * 2013:024f PCTV DVB-S2 Stick 460e * GPIO_0 - POWER_ON * GPIO_1 - BOOST * GPIO_2 - VUV_LNB (red LED) * GPIO_3 - EXT_12V * GPIO_4 - INT_DEM (DEMOD GPIO_0) * GPIO_5 - INT_LNB * GPIO_6 - RESET_DEM * GPIO_7 - LED (green LED) */ static const struct em28xx_reg_seq pctv_460e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x01, 0xff, 50}, { 0x0d, 0xff, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0x41, 0xff, 50}, /* GPIO_6=1 */ { 0x0d, 0x42, 0xff, 50}, {EM2874_R80_GPIO_P0_CTRL, 0x61, 0xff, 50}, /* GPIO_5=1 */ { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq c3tech_digital_duo_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 10}, /* xc5000 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xf9, 0xff, 35}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 20}, { -1, -1, -1, -1}, }; /* * 2013:0258 PCTV DVB-S2 Stick (461e) * GPIO 0 = POWER_ON * GPIO 1 = BOOST * GPIO 2 = VUV_LNB (red LED) * GPIO 3 = #EXT_12V * GPIO 4 = INT_DEM * GPIO 5 = INT_LNB * GPIO 6 = #RESET_DEM * GPIO 7 = P07_LED (green LED) */ static const struct em28xx_reg_seq pctv_461e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 0}, {0x0d, 0xff, 0xff, 0}, {EM2874_R80_GPIO_P0_CTRL, 0x3f, 0xff, 100}, /* reset demod */ {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 200}, /* reset demod */ {0x0d, 0x42, 0xff, 0}, {EM2874_R80_GPIO_P0_CTRL, 0xeb, 0xff, 0}, {EM2874_R5F_TS_ENABLE, 0x84, 0x84, 0}, /* parallel? | null discard */ { -1, -1, -1, -1}, }; #if 0 static const struct em28xx_reg_seq hauppauge_930c_gpio[] = { {EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, /* xc5000 reset */ {EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0x4f, 0xff, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq hauppauge_930c_digital[] = { {EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 10}, { -1, -1, -1, -1}, }; #endif /* * 1b80:e425 MaxMedia UB425-TC * 1b80:e1cc Delock 61959 * GPIO_6 - demod reset, 0=active * GPIO_7 - LED, 0=active */ static const struct em28xx_reg_seq maxmedia_ub425_tc[] = { {EM2874_R80_GPIO_P0_CTRL, 0x83, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xc3, 0xff, 100}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0x43, 0xff, 000}, /* GPIO_7 = 0 */ { -1, -1, -1, -1}, }; /* * 2304:0242 PCTV QuatroStick (510e) * GPIO_2: decoder reset, 0=active * GPIO_4: decoder suspend, 0=active * GPIO_6: demod reset, 0=active * GPIO_7: LED, 1=active */ static const struct em28xx_reg_seq pctv_510e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ { -1, -1, -1, -1}, }; /* * 2013:0251 PCTV QuatroStick nano (520e) * GPIO_2: decoder reset, 0=active * GPIO_4: decoder suspend, 0=active * GPIO_6: demod reset, 0=active * GPIO_7: LED, 1=active */ static const struct em28xx_reg_seq pctv_520e[] = { {EM2874_R80_GPIO_P0_CTRL, 0x10, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0x14, 0xff, 100}, /* GPIO_2 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0x54, 0xff, 050}, /* GPIO_6 = 1 */ {EM2874_R80_GPIO_P0_CTRL, 0xd4, 0xff, 000}, /* GPIO_7 = 1 */ { -1, -1, -1, -1}, }; /* * 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam * reg 0x80/0x84: * GPIO_0: capturing LED, 0=on, 1=off * GPIO_2: AV mute button, 0=pressed, 1=unpressed * GPIO 3: illumination button, 0=pressed, 1=unpressed * GPIO_6: illumination/flash LED, 0=on, 1=off * reg 0x81/0x85: * GPIO_7: snapshot button, 0=pressed, 1=unpressed */ static const struct em28xx_reg_seq speedlink_vad_laplace_reg_seq[] = { {EM2820_R08_GPIO_CTRL, 0xf7, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xb2, 10}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq pctv_292e[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 950}, {EM2874_R80_GPIO_P0_CTRL, 0xbd, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 410}, {EM2874_R80_GPIO_P0_CTRL, 0x7d, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0x7c, 0xff, 60}, {0x0d, 0x42, 0xff, 50}, {EM2874_R5F_TS_ENABLE, 0x85, 0xff, 0}, {-1, -1, -1, -1}, }; static const struct em28xx_reg_seq terratec_t2_stick_hd[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 600}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xbc, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0xf8, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 300}, {0x0d, 0x42, 0xff, 1000}, {EM2874_R5F_TS_ENABLE, 0x85, 0xff, 0}, {-1, -1, -1, -1}, }; static const struct em28xx_reg_seq plex_px_bcud[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 0}, {EM2874_R50_IR_CONFIG, 0x01, 0xff, 0}, {EM28XX_R06_I2C_CLK, 0x40, 0xff, 0}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 100}, {EM28XX_R12_VINENABLE, 0x20, 0x20, 0}, {0x0d, 0x42, 0xff, 1000}, {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 10}, {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 10}, {0x73, 0xfd, 0xff, 100}, {-1, -1, -1, -1}, }; /* * 2040:0265 Hauppauge WinTV-dualHD DVB Isoc * 2040:8265 Hauppauge WinTV-dualHD DVB Bulk * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM Isoc * 2040:826d Hauppauge WinTV-dualHD ATSC/QAM Bulk * reg 0x80/0x84: * GPIO_0: Yellow LED tuner 1, 0=on, 1=off * GPIO_1: Green LED tuner 1, 0=on, 1=off * GPIO_2: Yellow LED tuner 2, 0=on, 1=off * GPIO_3: Green LED tuner 2, 0=on, 1=off * GPIO_5: Reset #2, 0=active * GPIO_6: Reset #1, 0=active */ static const struct em28xx_reg_seq hauppauge_dualhd_dvb[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 200}, {0x50, 0x04, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0xbf, 0xff, 100}, /* demod 1 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xdf, 0xff, 100}, /* demod 2 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100}, {EM2874_R5F_TS_ENABLE, 0x00, 0xff, 50}, /* disable TS filters */ {EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50}, {EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50}, {-1, -1, -1, -1}, }; /* Hauppauge USB QuadHD */ static struct em28xx_reg_seq hauppauge_usb_quadhd_atsc_reg_seq[] = { {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0}, {0x0d, 0xff, 0xff, 200}, {0x50, 0x04, 0xff, 300}, {EM2874_R80_GPIO_P0_CTRL, 0xb0, 0xf0, 100}, /* demod 1 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xf0, 100}, {EM2874_R80_GPIO_P0_CTRL, 0xd0, 0xf0, 100}, /* demod 2 reset */ {EM2874_R80_GPIO_P0_CTRL, 0xf0, 0xf0, 100}, {EM2874_R5F_TS_ENABLE, 0x44, 0xff, 50}, {EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50}, {EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50}, {-1, -1, -1, -1}, }; /* * MyGica USB TV Box * GPIO_1,0: 00=Composite audio * 01=Tuner audio * 10=Mute audio * 11=FM radio? (if equipped) * GPIO_2-6: Unused * GPIO_7: ?? */ static const struct em28xx_reg_seq mygica_utv3_composite_audio_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xfc, 0xff, 0}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq mygica_utv3_tuner_audio_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xfd, 0xff, 0}, { -1, -1, -1, -1}, }; static const struct em28xx_reg_seq mygica_utv3_suspend_gpio[] = { {EM2820_R08_GPIO_CTRL, 0xfe, 0xff, 0}, { -1, -1, -1, -1}, }; /* * Button definitions */ static const struct em28xx_button std_snapshot_button[] = { { .role = EM28XX_BUTTON_SNAPSHOT, .reg_r = EM28XX_R0C_USBSUSP, .reg_clearing = EM28XX_R0C_USBSUSP, .mask = EM28XX_R0C_USBSUSP_SNAPSHOT, .inverted = 0, }, {-1, 0, 0, 0, 0}, }; static const struct em28xx_button speedlink_vad_laplace_buttons[] = { { .role = EM28XX_BUTTON_SNAPSHOT, .reg_r = EM2874_R85_GPIO_P1_STATE, .mask = 0x80, .inverted = 1, }, { .role = EM28XX_BUTTON_ILLUMINATION, .reg_r = EM2874_R84_GPIO_P0_STATE, .mask = 0x08, .inverted = 1, }, {-1, 0, 0, 0, 0}, }; /* * LED definitions */ static struct em28xx_led speedlink_vad_laplace_leds[] = { { .role = EM28XX_LED_ANALOG_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x01, .inverted = 1, }, { .role = EM28XX_LED_ILLUMINATION, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x40, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led kworld_ub435q_v3_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x80, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led pctv_80e_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = 0x80, .inverted = 0, }, {-1, 0, 0, 0}, }; static struct em28xx_led terratec_grabby_leds[] = { { .role = EM28XX_LED_ANALOG_CAPTURING, .gpio_reg = EM2820_R08_GPIO_CTRL, .gpio_mask = EM_GPIO_3, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led hauppauge_dualhd_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_1, .inverted = 1, }, { .role = EM28XX_LED_DIGITAL_CAPTURING_TS2, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_3, .inverted = 1, }, {-1, 0, 0, 0}, }; static struct em28xx_led hauppauge_usb_quadhd_leds[] = { { .role = EM28XX_LED_DIGITAL_CAPTURING, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_2, .inverted = 1, }, { .role = EM28XX_LED_DIGITAL_CAPTURING_TS2, .gpio_reg = EM2874_R80_GPIO_P0_CTRL, .gpio_mask = EM_GPIO_0, .inverted = 1, }, {-1, 0, 0, 0}, }; /* * Board definitions */ const struct em28xx_board em28xx_boards[] = { [EM2750_BOARD_UNKNOWN] = { .name = "EM2710/EM2750/EM2751 webcam grabber", .xclk = EM28XX_XCLK_FREQUENCY_20MHZ, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, .gpio = silvercrest_reg_seq, } }, }, [EM2800_BOARD_UNKNOWN] = { .name = "Unknown EM2800 video grabber", .is_em2800 = 1, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_UNKNOWN] = { .name = "Unknown EM2750/28xx video grabber", .tuner_type = TUNER_ABSENT, .is_webcam = 1, /* To enable sensor probe */ }, [EM2882_BOARD_ZOLID_HYBRID_TV_STICK] = { .name = ":ZOLID HYBRID TV STICK", .tuner_type = TUNER_XC2028, .tuner_gpio = zolid_tuner, .decoder = EM28XX_TVP5150, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = zolid_digital, }, [EM2750_BOARD_DLCW_130] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "Huaqi DLCW-130", .valid = EM28XX_BOARD_NOT_VALIDATED, .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2820_BOARD_KWORLD_PVRTV2800RF] = { .name = "Kworld PVR TV 2800 RF", .tuner_type = TUNER_TEMIC_PAL, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_GADMEI_TVR200] = { .name = "Gadmei TVR200", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_TERRATEC_CINERGY_250] = { .name = "Terratec Cinergy 250 USB", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .has_ir_i2c = 1, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PINNACLE_USB_2] = { .name = "Pinnacle PCTV USB 2", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .has_ir_i2c = 1, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_HAUPPAUGE_WINTV_USB_2] = { .name = "Hauppauge WinTV USB 2", .tuner_type = TUNER_PHILIPS_FM1236_MK3, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .decoder = EM28XX_TVP5150, .has_msp34xx = 1, .has_ir_i2c = 1, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = MSP_INPUT_DEFAULT, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, MSP_DSP_IN_SCART, MSP_DSP_IN_SCART), } }, }, [EM2820_BOARD_DLINK_USB_TV] = { .name = "D-Link DUB-T210 TV Tuner", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_HERCULES_SMART_TV_USB2] = { .name = "Hercules Smart TV USB 2.0", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PINNACLE_USB_2_FM1216ME] = { .name = "Pinnacle PCTV USB 2 (Philips FM1216ME)", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_GADMEI_UTV310] = { .name = "Gadmei UTV310", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_TNF_5335MF, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE] = { .name = "Leadtek Winfast USB II Deluxe", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .has_ir_i2c = 1, .tvaudio_addr = 0x58, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT2_ACTIVE | TDA9887_QSS, .decoder = EM28XX_SAA711X, .adecoder = EM28XX_TVAUDIO, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE4, .amux = EM28XX_AMUX_AUX, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE5, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, .radio = { .type = EM28XX_RADIO, .amux = EM28XX_AMUX_AUX, } }, [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { .name = "Videology 20K14XUSB USB2.0", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2820_BOARD_SILVERCREST_WEBCAM] = { .name = "Silvercrest Webcam 1.3mpix", .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, .gpio = silvercrest_reg_seq, } }, }, [EM2821_BOARD_SUPERCOMP_USB_2] = { .name = "Supercomp USB 2.0 TV", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_PHILIPS_FM1236_MK3, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2821_BOARD_USBGEAR_VD204] = { .name = "Usbgear VD204v9", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_NETGMBH_CAM] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "NetGMBH Cam", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2860_BOARD_TYPHOON_DVD_MAKER] = { .name = "Typhoon DVD Maker", .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* Capture only device */ .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_GADMEI_UTV330] = { .name = "Gadmei UTV330", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_TNF_5335MF, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2861_BOARD_GADMEI_UTV330PLUS] = { .name = "Gadmei UTV330+", .tuner_type = TUNER_TNF_5335MF, .tda9887_conf = TDA9887_PRESENT, .ir_codes = RC_MAP_GADMEI_RM008Z, .decoder = EM28XX_SAA711X, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_TERRATEC_HYBRID_XS] = { .name = "Terratec Cinergy A Hybrid XS", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2861_BOARD_KWORLD_PVRTV_300U] = { .name = "KWorld PVRTV 300U", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2861_BOARD_YAKUMO_MOVIE_MIXER] = { .name = "Yakumo MovieMixer", .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_TVP5150_REFERENCE_DESIGN] = { .name = "EM2860/TVP5150 Reference Design", .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2861_BOARD_PLEXTOR_PX_TV100U] = { .name = "Plextor ConvertX PX-TV100U", .tuner_type = TUNER_TNF_5335MF, .xclk = EM28XX_XCLK_I2S_MSB_TIMING | EM28XX_XCLK_FREQUENCY_12MHZ, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_TVP5150, .has_msp34xx = 1, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, } }, }, /* Those boards with em2870 are DVB Only*/ [EM2870_BOARD_TERRATEC_XS] = { .name = "Terratec Cinergy T XS", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, }, [EM2870_BOARD_TERRATEC_XS_MT2060] = { .name = "Terratec Cinergy T XS (MT2060)", .xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_12MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, .tuner_type = TUNER_ABSENT, /* MT2060 */ .has_dvb = 1, .tuner_gpio = default_tuner_gpio, }, [EM2870_BOARD_KWORLD_350U] = { .name = "Kworld 350 U DVB-T", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, }, [EM2870_BOARD_KWORLD_355U] = { .name = "Kworld 355 U DVB-T", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, .tuner_gpio = default_tuner_gpio, .has_dvb = 1, .dvb_gpio = default_digital, }, [EM2870_BOARD_PINNACLE_PCTV_DVB] = { .name = "Pinnacle PCTV DVB-T", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, /* MT2060 */ /* djh - I have serious doubts this is right... */ .xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_10MHZ, }, [EM2870_BOARD_COMPRO_VIDEOMATE] = { .name = "Compro, VideoMate U3", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_ABSENT, /* MT2060 */ }, [EM2880_BOARD_TERRATEC_HYBRID_XS_FR] = { .name = "Terratec Hybrid XS Secam", .has_msp34xx = 1, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = terratec_cinergy_USB_XS_FR_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = terratec_cinergy_USB_XS_FR_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_cinergy_USB_XS_FR_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_cinergy_USB_XS_FR_analog, } }, }, [EM2884_BOARD_TERRATEC_H5] = { .name = "Terratec Cinergy H5", .has_dvb = 1, #if 0 .tuner_type = TUNER_PHILIPS_TDA8290, .tuner_addr = 0x41, .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */ .tuner_gpio = terratec_h5_gpio, #else .tuner_type = TUNER_ABSENT, #endif .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_TERRATEC_H6] = { .name = "Terratec Cinergy H6 rev. 2", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, #if 0 .tuner_type = TUNER_PHILIPS_TDA8290, .tuner_addr = 0x41, .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */ .tuner_gpio = terratec_h5_gpio, #else .tuner_type = TUNER_ABSENT, #endif .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = { .name = "Hauppauge WinTV HVR 930C", .has_dvb = 1, #if 0 /* FIXME: Add analog support */ .tuner_type = TUNER_XC5000, .tuner_addr = 0x41, .dvb_gpio = hauppauge_930c_digital, .tuner_gpio = hauppauge_930c_gpio, #else .tuner_type = TUNER_ABSENT, #endif .ir_codes = RC_MAP_HAUPPAUGE, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_C3TECH_DIGITAL_DUO] = { .name = "C3 Tech Digital Duo HDTV/SDTV USB", .has_dvb = 1, /* FIXME: Add analog support - need a saa7136 driver */ .tuner_type = TUNER_ABSENT, /* Digital-only TDA18271HD */ .ir_codes = RC_MAP_EMPTY, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, .dvb_gpio = c3tech_digital_duo_digital, }, [EM2884_BOARD_CINERGY_HTC_STICK] = { .name = "Terratec Cinergy HTC Stick", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .tuner_type = TUNER_ABSENT, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_ELGATO_EYETV_HYBRID_2008] = { .name = "Elgato EyeTV Hybrid 2008 INT", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .tuner_type = TUNER_ABSENT, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = { .name = "Hauppauge WinTV HVR 900", .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2] = { .name = "Hauppauge WinTV HVR 900 (R2)", .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900R2_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850] = { .name = "Hauppauge WinTV HVR 850", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950] = { .name = "Hauppauge WinTV HVR 950", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_HAUPPAUGE, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_PINNACLE_PCTV_HD_PRO] = { .name = "Pinnacle PCTV HD Pro Stick", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600] = { .name = "AMD ATI TV Wonder HD 600", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_ATI_TV_WONDER_HD_600, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2880_BOARD_TERRATEC_HYBRID_XS] = { .name = "Terratec Hybrid XS", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = default_digital, .ir_codes = RC_MAP_TERRATEC_CINERGY_XS, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */ .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, /* * maybe there's a reason behind it why Terratec sells the Hybrid XS * as Prodigy XS with a different PID, let's keep it separated for now * maybe we'll need it later on */ [EM2880_BOARD_TERRATEC_PRODIGY_XS] = { .name = "Terratec Prodigy XS", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2820_BOARD_MSI_VOX_USB_2] = { .name = "MSI VOX USB 2.0", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .max_range_640_480 = 1, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE4, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_TERRATEC_CINERGY_200] = { .name = "Terratec Cinergy 200 USB", .is_em2800 = 1, .has_ir_i2c = 1, .tuner_type = TUNER_LG_TALN, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_GRABBEEX_USB2800] = { .name = "eMPIA Technology, Inc. GrabBeeX+ Video Encoder", .is_em2800 = 1, .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* capture only board */ .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_VC211A] = { .name = "Actionmaster/LinXcel/Digitus VC211A", .is_em2800 = 1, .tuner_type = TUNER_ABSENT, /* Capture-only board */ .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = vc211a_enable, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, .gpio = vc211a_enable, } }, }, [EM2800_BOARD_LEADTEK_WINFAST_USBII] = { .name = "Leadtek Winfast USB II", .is_em2800 = 1, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_KWORLD_USB2800] = { .name = "Kworld USB2800", .is_em2800 = 1, .tuner_type = TUNER_PHILIPS_FCV1236D, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PINNACLE_DVC_90] = { .name = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker / Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U", .tuner_type = TUNER_ABSENT, /* capture only board */ .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2800_BOARD_VGEAR_POCKETTV] = { .name = "V-Gear PocketTV", .is_em2800 = 1, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2] = { .name = "Pixelview PlayTV Box 4 USB 2.0", .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_YMEC_TVF_5533MF, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .aout = EM28XX_AOUT_MONO | /* I2S */ EM28XX_AOUT_MASTER, /* Line out pin */ }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_PROLINK_PLAYTV_USB2] = { .name = "SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0", .buttons = std_snapshot_button, .tda9887_conf = TDA9887_PRESENT, .tuner_type = TUNER_YMEC_TVF_5533MF, .tuner_addr = 0x60, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .aout = EM28XX_AOUT_MONO | /* I2S */ EM28XX_AOUT_MASTER, /* Line out pin */ }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_SAA711X_REFERENCE_DESIGN] = { .name = "EM2860/SAA711X Reference Design", .buttons = std_snapshot_button, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, } }, }, [EM2874_BOARD_LEADERSHIP_ISDBT] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .xclk = EM28XX_XCLK_FREQUENCY_10MHZ, .name = "EM2874 Leadership ISDBT", .tuner_type = TUNER_ABSENT, .tuner_gpio = leadership_reset, .dvb_gpio = leadership_digital, .has_dvb = 1, }, [EM2880_BOARD_MSI_DIGIVOX_AD] = { .name = "MSI DigiVox A/D", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, } }, }, [EM2880_BOARD_MSI_DIGIVOX_AD_II] = { .name = "MSI DigiVox A/D II", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2880_msi_digivox_ad_analog, } }, }, [EM2880_BOARD_KWORLD_DVB_305U] = { .name = "KWorld DVB-T 305U", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2880_BOARD_KWORLD_DVB_310U] = { .name = "KWorld DVB-T 310U", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .has_dvb = 1, .dvb_gpio = default_digital, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { /* S-video has not been tested yet */ .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, [EM2882_BOARD_KWORLD_ATSC_315U] = { .name = "KWorld ATSC 315U HDTV TV Box", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_THOMSON_DTT761X, .tuner_gpio = em2882_kworld_315u_tuner_gpio, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_SAA711X, .has_dvb = 1, .dvb_gpio = em2882_kworld_315u_digital, .ir_codes = RC_MAP_KWORLD_315U, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, #if 0 /* FIXME: Analog mode - still not ready */ .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .gpio = em2882_kworld_315u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2882_kworld_315u_analog1, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, .gpio = em2882_kworld_315u_analog1, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, } }, #endif }, [EM2880_BOARD_EMPIRE_DUAL_TV] = { .name = "Empire dual TV", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .has_dvb = 1, .dvb_gpio = default_digital, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, [EM2881_BOARD_DNT_DA2_HYBRID] = { .name = "DNT DA2 Hybrid", .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = default_analog, } }, }, [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { .name = "Pinnacle Hybrid Pro", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = pinnacle_hybrid_pro_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = pinnacle_hybrid_pro_analog, } }, }, [EM2882_BOARD_PINNACLE_HYBRID_PRO_330E] = { .name = "Pinnacle Hybrid Pro (330e)", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900R2_digital, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2882_BOARD_KWORLD_VS_DVBT] = { .name = "Kworld VS-DVB-T 323UR", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = kworld_330u_digital, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */ .ir_codes = RC_MAP_KWORLD_315U, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2882_BOARD_TERRATEC_HYBRID_XS] = { .name = "Terratec Cinergy Hybrid T USB XS (em2882)", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .has_dvb = 1, .dvb_gpio = hauppauge_wintv_hvr_900_digital, .ir_codes = RC_MAP_TERRATEC_CINERGY_XS, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = hauppauge_wintv_hvr_900_analog, } }, }, [EM2882_BOARD_DIKOM_DK300] = { .name = "Dikom DK300", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = dikom_dk300_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = default_analog, } }, }, [EM2883_BOARD_KWORLD_HYBRID_330U] = { .name = "Kworld PlusTV HD Hybrid 330", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = kworld_330u_digital, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_EEPROM_ON_BOARD | EM28XX_I2C_EEPROM_KEY_VALID, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = kworld_330u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = kworld_330u_analog, .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = kworld_330u_analog, } }, }, [EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU] = { .name = "Compro VideoMate ForYou/Stereo", .tuner_type = TUNER_LG_PAL_NEW_TAPC, .tvaudio_addr = 0xb0, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_TVP5150, .adecoder = EM28XX_TVAUDIO, .mute_gpio = compro_mute_gpio, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = compro_unmute_tv_gpio, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = compro_unmute_svid_gpio, } }, }, [EM2860_BOARD_KAIOMY_TVNPC_U2] = { .name = "Kaiomy TVnPC U2", .vchannels = 3, .tuner_type = TUNER_XC2028, .tuner_addr = 0x61, .mts_firmware = 1, .decoder = EM28XX_TVP5150, .tuner_gpio = default_tuner_gpio, .ir_codes = RC_MAP_KAIOMY, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, .radio = { .type = EM28XX_RADIO, .amux = EM28XX_AMUX_LINE_IN, } }, [EM2860_BOARD_EASYCAP] = { .name = "Easy Cap Capture DC-60", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2820_BOARD_IODATA_GVMVP_SZ] = { .name = "IO-DATA GV-MVP/SZ", .tuner_type = TUNER_PHILIPS_FM1236_MK3, .tuner_gpio = default_tuner_gpio, .tda9887_conf = TDA9887_PRESENT, .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, }, { /* Composite has not been tested yet */ .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_VIDEO, }, { /* S-video has not been tested yet */ .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_VIDEO, } }, }, [EM2860_BOARD_TERRATEC_GRABBY] = { .name = "Terratec Grabby", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, .buttons = std_snapshot_button, .leds = terratec_grabby_leds, }, [EM2860_BOARD_TERRATEC_AV350] = { .name = "Terratec AV350", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_TVP5150, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, .mute_gpio = terratec_av350_mute_gpio, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, } }, }, [EM2860_BOARD_ELGATO_VIDEO_CAPTURE] = { .name = "Elgato Video Capture", .decoder = EM28XX_SAA711X, .tuner_type = TUNER_ABSENT, /* Capture only device */ .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2882_BOARD_EVGA_INDTUBE] = { .name = "Evga inDtube", .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */ .mts_firmware = 1, .has_dvb = 1, .dvb_gpio = evga_indtube_digital, .ir_codes = RC_MAP_EVGA_INDTUBE, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = evga_indtube_analog, }, { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, .gpio = evga_indtube_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, .gpio = evga_indtube_analog, } }, }, /* * eb1a:2868 Empia EM2870 + Philips CU1216L NIM * (Philips TDA10023 + Infineon TUA6034) */ [EM2870_BOARD_REDDO_DVB_C_USB_BOX] = { .name = "Reddo DVB-C USB TV Box", .tuner_type = TUNER_ABSENT, .tuner_gpio = reddo_dvb_c_usb_box, .has_dvb = 1, }, /* * 1b80:a340 - Empia EM2870, NXP TDA18271HD and LG DT3304, sold * initially as the KWorld PlusTV 340U, then as the UB435-Q. * Early variants have a TDA18271HD/C1, later ones a TDA18271HD/C2 */ [EM2870_BOARD_KWORLD_A340] = { .name = "KWorld PlusTV 340U or UB435-Q (ATSC)", .tuner_type = TUNER_ABSENT, /* Digital-only TDA18271HD */ .has_dvb = 1, .dvb_gpio = kworld_a340_digital, .tuner_gpio = default_tuner_gpio, }, /* * 2013:024f PCTV nanoStick T2 290e. * Empia EM28174, Sony CXD2820R and NXP TDA18271HD/C2 */ [EM28174_BOARD_PCTV_290E] = { .name = "PCTV nanoStick T2 290e", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_290e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * 2013:024f PCTV DVB-S2 Stick 460e * Empia EM28174, NXP TDA10071, Conexant CX24118A and Allegro A8293 */ [EM28174_BOARD_PCTV_460E] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .name = "PCTV DVB-S2 Stick (460e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_460e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * eb1a:5006 Honestech VIDBOX NW03 * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner */ [EM2860_BOARD_HT_VIDBOX_NW03] = { .name = "Honestech Vidbox NW03", .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, /* S-VIDEO needs check */ .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * 1b80:e425 MaxMedia UB425-TC * Empia EM2874B + Micronas DRX 3913KA2 + NXP TDA18271HDC2 */ [EM2874_BOARD_MAXMEDIA_UB425_TC] = { .name = "MaxMedia UB425-TC", .tuner_type = TUNER_ABSENT, .tuner_gpio = maxmedia_ub425_tc, .has_dvb = 1, .ir_codes = RC_MAP_REDDO, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 2304:0242 PCTV QuatroStick (510e) * Empia EM2884 + Micronas DRX 3926K + NXP TDA18271HDC2 */ [EM2884_BOARD_PCTV_510E] = { .name = "PCTV QuatroStick (510e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_510e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 2013:0251 PCTV QuatroStick nano (520e) * Empia EM2884 + Micronas DRX 3926K + NXP TDA18271HDC2 */ [EM2884_BOARD_PCTV_520E] = { .name = "PCTV QuatroStick nano (520e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_520e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, [EM2884_BOARD_TERRATEC_HTC_USB_XS] = { .name = "Terratec Cinergy HTC USB XS", .has_dvb = 1, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .tuner_type = TUNER_ABSENT, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 1b80:e1cc Delock 61959 * Empia EM2874B + Micronas DRX 3913KA2 + NXP TDA18271HDC2 * mostly the same as MaxMedia UB-425-TC but different remote */ [EM2874_BOARD_DELOCK_61959] = { .name = "Delock 61959", .tuner_type = TUNER_ABSENT, .tuner_gpio = maxmedia_ub425_tc, .has_dvb = 1, .ir_codes = RC_MAP_DELOCK_61959, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, /* * 1b80:e346 KWorld USB ATSC TV Stick UB435-Q V2 * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2 */ [EM2874_BOARD_KWORLD_UB435Q_V2] = { .name = "KWorld USB ATSC TV Stick UB435-Q V2", .tuner_type = TUNER_ABSENT, .has_dvb = 1, .dvb_gpio = kworld_a340_digital, .tuner_gpio = default_tuner_gpio, .def_i2c_bus = 1, }, /* * 1b80:e34c KWorld USB ATSC TV Stick UB435-Q V3 * Empia EM2874B + LG DT3305 + NXP TDA18271HDC2 */ [EM2874_BOARD_KWORLD_UB435Q_V3] = { .name = "KWorld USB ATSC TV Stick UB435-Q V3", .tuner_type = TUNER_ABSENT, .has_dvb = 1, .tuner_gpio = kworld_ub435q_v3_digital, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .leds = kworld_ub435q_v3_leds, }, [EM2874_BOARD_PCTV_HD_MINI_80E] = { .name = "Pinnacle PCTV HD Mini", .tuner_type = TUNER_ABSENT, .has_dvb = 1, .dvb_gpio = em2874_pctv_80e_digital, .decoder = EM28XX_NODECODER, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, .leds = pctv_80e_leds, }, /* * 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam * Empia EM2765 + OmniVision OV2640 */ [EM2765_BOARD_SPEEDLINK_VAD_LAPLACE] = { .name = "SpeedLink Vicious And Devine Laplace webcam", .xclk = EM28XX_XCLK_FREQUENCY_24MHZ, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .def_i2c_bus = 1, .tuner_type = TUNER_ABSENT, .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE, .amux = EM28XX_AMUX_VIDEO, .gpio = speedlink_vad_laplace_reg_seq, } }, .buttons = speedlink_vad_laplace_buttons, .leds = speedlink_vad_laplace_leds, }, /* * 2013:0258 PCTV DVB-S2 Stick (461e) * Empia EM28178, Montage M88DS3103, Montage M88TS2022, Allegro A8293 */ [EM28178_BOARD_PCTV_461E] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .name = "PCTV DVB-S2 Stick (461e)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_461e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * 2013:0259 PCTV DVB-S2 Stick (461e_v2) * Empia EM28178, Montage M88DS3103b, Montage M88TS2022, Allegro A8293 */ [EM28178_BOARD_PCTV_461E_V2] = { .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .name = "PCTV DVB-S2 Stick (461e v2)", .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_461e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, /* * 2013:025f PCTV tripleStick (292e). * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2157 */ [EM28178_BOARD_PCTV_292E] = { .name = "PCTV tripleStick (292e)", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = pctv_292e, .has_dvb = 1, .ir_codes = RC_MAP_PINNACLE_PCTV_HD, }, [EM2861_BOARD_LEADTEK_VC100] = { .name = "Leadtek VC100", .tuner_type = TUNER_ABSENT, /* Capture only device */ .decoder = EM28XX_TVP5150, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * eb1a:8179 Terratec Cinergy T2 Stick HD. * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2146 */ [EM28178_BOARD_TERRATEC_T2_STICK_HD] = { .name = "Terratec Cinergy T2 Stick HD", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = terratec_t2_stick_hd, .has_dvb = 1, .ir_codes = RC_MAP_TERRATEC_SLIM_2, }, /* * 3275:0085 PLEX PX-BCUD. * Empia EM28178, TOSHIBA TC90532XBG, Sharp QM1D1C0042 */ [EM28178_BOARD_PLEX_PX_BCUD] = { .name = "PLEX PX-BCUD", .xclk = EM28XX_XCLK_FREQUENCY_4_3MHZ, .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE, .tuner_type = TUNER_ABSENT, .tuner_gpio = plex_px_bcud, .has_dvb = 1, }, /* * 2040:0265 Hauppauge WinTV-dualHD (DVB version) Isoc. * 2040:8265 Hauppauge WinTV-dualHD (DVB version) Bulk. * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = { .name = "Hauppauge WinTV-dualHD DVB", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_SI2157, .tuner_gpio = hauppauge_dualhd_dvb, .has_dvb = 1, .has_dual_ts = 1, .ir_codes = RC_MAP_HAUPPAUGE, .leds = hauppauge_dualhd_leds, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc. * 2040:826d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Bulk. * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = { .name = "Hauppauge WinTV-dualHD 01595 ATSC/QAM", .def_i2c_bus = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = hauppauge_dualhd_dvb, .has_dvb = 1, .has_dual_ts = 1, .ir_codes = RC_MAP_HAUPPAUGE, .leds = hauppauge_dualhd_leds, }, /* * 1b80:e349 Magix USB Videowandler-2 * (same chips as Honestech VIDBOX NW03) * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner */ [EM2861_BOARD_MAGIX_VIDEOWANDLER2] = { .name = "Magix USB Videowandler-2", .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* * 1f4d:1abe MyGica iGrabber * (same as several other EM2860 devices) * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner */ [EM2860_BOARD_MYGICA_IGRABBER] = { .name = "MyGica iGrabber", .vchannels = 2, .tuner_type = TUNER_ABSENT, .decoder = EM28XX_SAA711X, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, .amux = EM28XX_AMUX_LINE_IN, } }, }, /* 2040:826d Hauppauge USB QuadHD * Empia 28274, Max Linear 692 ATSC combo demod/tuner */ [EM2874_BOARD_HAUPPAUGE_USB_QUADHD] = { .name = "Hauppauge USB QuadHD ATSC", .def_i2c_bus = 1, .has_dual_ts = 1, .has_dvb = 1, .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, .tuner_type = TUNER_ABSENT, .tuner_gpio = hauppauge_usb_quadhd_atsc_reg_seq, .leds = hauppauge_usb_quadhd_leds, }, /* * eb1a:2860 MyGica UTV3 Analog USB2.0 TV Box * Empia EM2860, Philips SAA7113, NXP TDA9801T demod, * Tena TNF931D-DFDR1 tuner (contains NXP TDA6509A), * ST HCF4052 demux (switches audio to line out), * no audio over USB */ [EM2860_BOARD_MYGICA_UTV3] = { .name = "MyGica UTV3 Analog USB2.0 TV Box", .xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_12MHZ, .tuner_type = TUNER_TENA_TNF_931D_DFDR1, .ir_codes = RC_MAP_MYGICA_UTV3, .decoder = EM28XX_SAA711X, .suspend_gpio = mygica_utv3_suspend_gpio, .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = SAA7115_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, .gpio = mygica_utv3_composite_audio_gpio, }, { .type = EM28XX_VMUX_TELEVISION, .vmux = SAA7115_COMPOSITE2, .amux = EM28XX_AMUX_VIDEO, .gpio = mygica_utv3_tuner_audio_gpio, } }, }, }; EXPORT_SYMBOL_GPL(em28xx_boards); static const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); /* table of devices that work with this driver */ struct usb_device_id em28xx_id_table[] = { { USB_DEVICE(0xeb1a, 0x2750), .driver_info = EM2750_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2751), .driver_info = EM2750_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2800), .driver_info = EM2800_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2710), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2820), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2821), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2860), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2861), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2862), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2863), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2870), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2881), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2883), /* used by :Zolid Hybrid Tv Stick */ .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2868), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2875), .driver_info = EM2820_BOARD_UNKNOWN }, { USB_DEVICE(0xeb1a, 0x2885), /* MSI Digivox Trio */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0xeb1a, 0xe300), .driver_info = EM2861_BOARD_KWORLD_PVRTV_300U }, { USB_DEVICE(0xeb1a, 0xe303), .driver_info = EM2860_BOARD_KAIOMY_TVNPC_U2 }, { USB_DEVICE(0xeb1a, 0xe305), .driver_info = EM2880_BOARD_KWORLD_DVB_305U }, { USB_DEVICE(0xeb1a, 0xe310), .driver_info = EM2880_BOARD_MSI_DIGIVOX_AD }, { USB_DEVICE(0xeb1a, 0xa313), .driver_info = EM2882_BOARD_KWORLD_ATSC_315U }, { USB_DEVICE(0xeb1a, 0xa316), .driver_info = EM2883_BOARD_KWORLD_HYBRID_330U }, { USB_DEVICE(0xeb1a, 0xe320), .driver_info = EM2880_BOARD_MSI_DIGIVOX_AD_II }, { USB_DEVICE(0xeb1a, 0xe323), .driver_info = EM2882_BOARD_KWORLD_VS_DVBT }, { USB_DEVICE(0xeb1a, 0xe350), .driver_info = EM2870_BOARD_KWORLD_350U }, { USB_DEVICE(0xeb1a, 0xe355), .driver_info = EM2870_BOARD_KWORLD_355U }, { USB_DEVICE(0xeb1a, 0x2801), .driver_info = EM2800_BOARD_GRABBEEX_USB2800 }, { USB_DEVICE(0xeb1a, 0xe357), .driver_info = EM2870_BOARD_KWORLD_355U }, { USB_DEVICE(0xeb1a, 0xe359), .driver_info = EM2870_BOARD_KWORLD_355U }, { USB_DEVICE(0x1b80, 0xe302), /* Kaiser Baas Video to DVD maker */ .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x1b80, 0xe304), /* Kworld DVD Maker 2 */ .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x0ccd, 0x0036), .driver_info = EM2820_BOARD_TERRATEC_CINERGY_250 }, { USB_DEVICE(0x0ccd, 0x004c), .driver_info = EM2880_BOARD_TERRATEC_HYBRID_XS_FR }, { USB_DEVICE(0x0ccd, 0x004f), .driver_info = EM2860_BOARD_TERRATEC_HYBRID_XS }, { USB_DEVICE(0x0ccd, 0x005e), .driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS }, { USB_DEVICE(0x0ccd, 0x0042), .driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS }, { USB_DEVICE(0x0ccd, 0x0043), .driver_info = EM2870_BOARD_TERRATEC_XS_MT2060 }, { USB_DEVICE(0x0ccd, 0x008e), /* Cinergy HTC USB XS Rev. 1 */ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS }, { USB_DEVICE(0x0ccd, 0x00ac), /* Cinergy HTC USB XS Rev. 2 */ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS }, { USB_DEVICE(0x0ccd, 0x10a2), /* H5 Rev. 1 */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0x0ccd, 0x10b2), /* H6 */ .driver_info = EM2884_BOARD_TERRATEC_H6 }, { USB_DEVICE(0x0ccd, 0x0084), .driver_info = EM2860_BOARD_TERRATEC_AV350 }, { USB_DEVICE(0x0ccd, 0x0096), .driver_info = EM2860_BOARD_TERRATEC_GRABBY }, { USB_DEVICE(0x0ccd, 0x10AF), .driver_info = EM2860_BOARD_TERRATEC_GRABBY }, { USB_DEVICE(0x0ccd, 0x00b2), .driver_info = EM2884_BOARD_CINERGY_HTC_STICK }, { USB_DEVICE(0x0fd9, 0x0018), .driver_info = EM2884_BOARD_ELGATO_EYETV_HYBRID_2008 }, { USB_DEVICE(0x0fd9, 0x0033), .driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE }, { USB_DEVICE(0x185b, 0x2870), .driver_info = EM2870_BOARD_COMPRO_VIDEOMATE }, { USB_DEVICE(0x185b, 0x2041), .driver_info = EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU }, { USB_DEVICE(0x2040, 0x4200), .driver_info = EM2820_BOARD_HAUPPAUGE_WINTV_USB_2 }, { USB_DEVICE(0x2040, 0x4201), .driver_info = EM2820_BOARD_HAUPPAUGE_WINTV_USB_2 }, { USB_DEVICE(0x2040, 0x6500), .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 }, { USB_DEVICE(0x2040, 0x6502), .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2 }, { USB_DEVICE(0x2040, 0x6513), /* HCW HVR-980 */ .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, { USB_DEVICE(0x2040, 0x6517), /* HP HVR-950 */ .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, { USB_DEVICE(0x2040, 0x651b), /* RP HVR-950 */ .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, { USB_DEVICE(0x2040, 0x651f), .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 }, { USB_DEVICE(0x2040, 0x0265), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, { USB_DEVICE(0x2040, 0x8265), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, { USB_DEVICE(0x2040, 0x026d), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, { USB_DEVICE(0x2040, 0x826d), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, { USB_DEVICE(0x2040, 0x846d), .driver_info = EM2874_BOARD_HAUPPAUGE_USB_QUADHD }, { USB_DEVICE(0x0438, 0xb002), .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 }, { USB_DEVICE(0x2001, 0xf112), .driver_info = EM2820_BOARD_DLINK_USB_TV }, { USB_DEVICE(0x2304, 0x0207), .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x2304, 0x0208), .driver_info = EM2820_BOARD_PINNACLE_USB_2 }, { USB_DEVICE(0x2304, 0x021a), .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x2304, 0x0226), .driver_info = EM2882_BOARD_PINNACLE_HYBRID_PRO_330E }, { USB_DEVICE(0x2304, 0x0227), .driver_info = EM2880_BOARD_PINNACLE_PCTV_HD_PRO }, { USB_DEVICE(0x2304, 0x023f), .driver_info = EM2874_BOARD_PCTV_HD_MINI_80E }, { USB_DEVICE(0x0413, 0x6023), .driver_info = EM2800_BOARD_LEADTEK_WINFAST_USBII }, { USB_DEVICE(0x093b, 0xa003), .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, { USB_DEVICE(0x093b, 0xa005), .driver_info = EM2861_BOARD_PLEXTOR_PX_TV100U }, { USB_DEVICE(0x04bb, 0x0515), .driver_info = EM2820_BOARD_IODATA_GVMVP_SZ }, { USB_DEVICE(0xeb1a, 0x50a6), .driver_info = EM2860_BOARD_GADMEI_UTV330 }, { USB_DEVICE(0x1b80, 0xa340), .driver_info = EM2870_BOARD_KWORLD_A340 }, { USB_DEVICE(0x1b80, 0xe346), .driver_info = EM2874_BOARD_KWORLD_UB435Q_V2 }, { USB_DEVICE(0x1b80, 0xe34c), .driver_info = EM2874_BOARD_KWORLD_UB435Q_V3 }, { USB_DEVICE(0x2013, 0x024f), .driver_info = EM28174_BOARD_PCTV_290E }, { USB_DEVICE(0x2013, 0x024c), .driver_info = EM28174_BOARD_PCTV_460E }, { USB_DEVICE(0x2040, 0x1605), .driver_info = EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C }, { USB_DEVICE(0x1b80, 0xe755), .driver_info = EM2884_BOARD_C3TECH_DIGITAL_DUO }, { USB_DEVICE(0xeb1a, 0x5006), .driver_info = EM2860_BOARD_HT_VIDBOX_NW03 }, { USB_DEVICE(0x1b80, 0xe309), /* Sveon STV40 */ .driver_info = EM2860_BOARD_EASYCAP }, { USB_DEVICE(0x1b80, 0xe425), .driver_info = EM2874_BOARD_MAXMEDIA_UB425_TC }, { USB_DEVICE(0x1f4d, 0x1abe), .driver_info = EM2860_BOARD_MYGICA_IGRABBER }, { USB_DEVICE(0x2304, 0x0242), .driver_info = EM2884_BOARD_PCTV_510E }, { USB_DEVICE(0x2013, 0x0251), .driver_info = EM2884_BOARD_PCTV_520E }, { USB_DEVICE(0x1b80, 0xe1cc), .driver_info = EM2874_BOARD_DELOCK_61959 }, { USB_DEVICE(0x1ae7, 0x9003), .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE }, { USB_DEVICE(0x1ae7, 0x9004), .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE }, { USB_DEVICE(0x2013, 0x0258), .driver_info = EM28178_BOARD_PCTV_461E }, { USB_DEVICE(0x2013, 0x8258), /* Bulk transport 461e */ .driver_info = EM28178_BOARD_PCTV_461E }, { USB_DEVICE(0x2013, 0x0461), .driver_info = EM28178_BOARD_PCTV_461E_V2 }, { USB_DEVICE(0x2013, 0x8461), /* Bulk transport 461e v2 */ .driver_info = EM28178_BOARD_PCTV_461E_V2 }, { USB_DEVICE(0x2013, 0x0259), .driver_info = EM28178_BOARD_PCTV_461E_V2 }, { USB_DEVICE(0x2013, 0x025f), .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2013, 0x0264), /* Hauppauge WinTV-soloHD 292e SE */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD Isoc */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2040, 0x8264), /* Hauppauge OEM Generic WinTV-soloHD Bulk */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x0413, 0x6f07), .driver_info = EM2861_BOARD_LEADTEK_VC100 }, { USB_DEVICE(0xeb1a, 0x8179), .driver_info = EM28178_BOARD_TERRATEC_T2_STICK_HD }, { USB_DEVICE(0x3275, 0x0085), .driver_info = EM28178_BOARD_PLEX_PX_BCUD }, { USB_DEVICE(0xeb1a, 0x5051), /* Ion Video 2 PC MKII / Startech svid2usb23 / Raygo R12-41373 */ .driver_info = EM2860_BOARD_TVP5150_REFERENCE_DESIGN }, { USB_DEVICE(0x1b80, 0xe349), /* Magix USB Videowandler-2 */ .driver_info = EM2861_BOARD_MAGIX_VIDEOWANDLER2 }, { }, }; MODULE_DEVICE_TABLE(usb, em28xx_id_table); /* * EEPROM hash table for devices with generic USB IDs */ static const struct em28xx_hash_table em28xx_eeprom_hash[] = { /* P/N: SA 60002070465 Tuner: TVF7533-MF */ {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF}, {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF}, {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, {0x166a0441, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, {0xb8846b20, EM2881_BOARD_PINNACLE_HYBRID_PRO, TUNER_XC2028}, {0x63f653bd, EM2870_BOARD_REDDO_DVB_C_USB_BOX, TUNER_ABSENT}, {0x4e913442, EM2882_BOARD_DIKOM_DK300, TUNER_XC2028}, {0x85dd871e, EM2882_BOARD_ZOLID_HYBRID_TV_STICK, TUNER_XC2028}, {0x8f597549, EM2860_BOARD_MYGICA_UTV3, TUNER_TENA_TNF_931D_DFDR1}, }; /* I2C devicelist hash table for devices with generic USB IDs */ static const struct em28xx_hash_table em28xx_i2c_hash[] = { {0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC}, {0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC}, {0x1ba50080, EM2860_BOARD_SAA711X_REFERENCE_DESIGN, TUNER_ABSENT}, {0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT}, {0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC}, {0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF}, {0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT}, {0x27e10080, EM2882_BOARD_ZOLID_HYBRID_TV_STICK, TUNER_XC2028}, {0x840d0484, EM2860_BOARD_MYGICA_UTV3, TUNER_TENA_TNF_931D_DFDR1}, }; /* NOTE: introduce a separate hash table for devices with 16 bit eeproms */ int em28xx_tuner_callback(void *ptr, int component, int command, int arg) { struct em28xx_i2c_bus *i2c_bus = ptr; struct em28xx *dev = i2c_bus->dev; int rc = 0; if (dev->tuner_type != TUNER_XC2028 && dev->tuner_type != TUNER_XC5000) return 0; if (command != XC2028_TUNER_RESET && command != XC5000_TUNER_RESET) return 0; rc = em28xx_gpio_set(dev, dev->board.tuner_gpio); return rc; } EXPORT_SYMBOL_GPL(em28xx_tuner_callback); static inline void em28xx_set_xclk_i2c_speed(struct em28xx *dev) { const struct em28xx_board *board = &em28xx_boards[dev->model]; u8 xclk = board->xclk, i2c_speed = board->i2c_speed; /* * Those are the default values for the majority of boards * Use those values if not specified otherwise at boards entry */ if (!xclk) xclk = EM28XX_XCLK_IR_RC5_MODE | EM28XX_XCLK_FREQUENCY_12MHZ; em28xx_write_reg(dev, EM28XX_R0F_XCLK, xclk); if (!i2c_speed) i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ; dev->i2c_speed = i2c_speed & 0x03; if (!dev->board.is_em2800) em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, i2c_speed); msleep(50); } static inline void em28xx_set_model(struct em28xx *dev) { dev->board = em28xx_boards[dev->model]; dev->has_msp34xx = dev->board.has_msp34xx; dev->is_webcam = dev->board.is_webcam; em28xx_set_xclk_i2c_speed(dev); /* Should be initialized early, for I2C to work */ dev->def_i2c_bus = dev->board.def_i2c_bus; } /* * Wait until AC97_RESET reports the expected value reliably before proceeding. * We also check that two unrelated registers accesses don't return the same * value to avoid premature return. * This procedure helps ensuring AC97 register accesses are reliable. */ static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev, int expected_feat) { unsigned long timeout = jiffies + msecs_to_jiffies(2000); int feat, powerdown; while (time_is_after_jiffies(timeout)) { feat = em28xx_read_ac97(dev, AC97_RESET); if (feat < 0) return feat; powerdown = em28xx_read_ac97(dev, AC97_POWERDOWN); if (powerdown < 0) return powerdown; if (feat == expected_feat && feat != powerdown) return 0; msleep(50); } dev_warn(&dev->intf->dev, "AC97 registers access is not reliable !\n"); return -ETIMEDOUT; } /* * Since em28xx_pre_card_setup() requires a proper dev->model, * this won't work for boards with generic PCI IDs */ static void em28xx_pre_card_setup(struct em28xx *dev) { /* * Set the initial XCLK and I2C clock values based on the board * definition */ em28xx_set_xclk_i2c_speed(dev); /* request some modules */ switch (dev->model) { case EM2861_BOARD_PLEXTOR_PX_TV100U: /* Sets the msp34xx I2S speed */ dev->i2s_speed = 2048000; break; case EM2861_BOARD_KWORLD_PVRTV_300U: case EM2880_BOARD_KWORLD_DVB_305U: em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0x6d); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0x7d); usleep_range(10000, 11000); break; case EM2870_BOARD_COMPRO_VIDEOMATE: /* * TODO: someone can do some cleanup here... * not everything's needed */ em28xx_write_reg(dev, EM2880_R04_GPO, 0x00); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2880_R04_GPO, 0x01); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xdc); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc); msleep(70); break; case EM2870_BOARD_TERRATEC_XS_MT2060: /* * this device needs some gpio writes to get the DVB-T * demod work */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); break; case EM2870_BOARD_PINNACLE_PCTV_DVB: /* * this device needs some gpio writes to get the * DVB-T demod work */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); break; case EM2820_BOARD_GADMEI_UTV310: case EM2820_BOARD_MSI_VOX_USB_2: /* enables audio for that devices */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); break; case EM2882_BOARD_KWORLD_ATSC_315U: em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2880_R04_GPO, 0x00); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2880_R04_GPO, 0x08); usleep_range(10000, 11000); break; case EM2860_BOARD_KAIOMY_TVNPC_U2: em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x07", 1); em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1); em28xx_write_regs(dev, 0x0d, "\x42", 1); em28xx_write_regs(dev, 0x08, "\xfd", 1); usleep_range(10000, 11000); em28xx_write_regs(dev, 0x08, "\xff", 1); usleep_range(10000, 11000); em28xx_write_regs(dev, 0x08, "\x7f", 1); usleep_range(10000, 11000); em28xx_write_regs(dev, 0x08, "\x6b", 1); break; case EM2860_BOARD_EASYCAP: em28xx_write_regs(dev, 0x08, "\xf8", 1); break; case EM2820_BOARD_IODATA_GVMVP_SZ: em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe); msleep(70); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); msleep(70); break; case EM2860_BOARD_TERRATEC_GRABBY: /* * HACK?: Ensure AC97 register reading is reliable before * proceeding. In practice, this will wait about 1.6 seconds. */ em28xx_wait_until_ac97_features_equals(dev, 0x6a90); break; } em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); /* Unlock device */ em28xx_set_mode(dev, EM28XX_SUSPEND); } static int em28xx_hint_board(struct em28xx *dev) { int i; if (dev->is_webcam) { if (dev->em28xx_sensor == EM28XX_MT9V011) { dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; } else if (dev->em28xx_sensor == EM28XX_MT9M001 || dev->em28xx_sensor == EM28XX_MT9M111) { dev->model = EM2750_BOARD_UNKNOWN; } /* FIXME: IMPROVE ! */ return 0; } /* * HINT method: EEPROM * * This method works only for boards with eeprom. * Uses a hash of all eeprom bytes. The hash should be * unique for a vendor/tuner pair. * There are a high chance that tuners for different * video standards produce different hashes. */ for (i = 0; i < ARRAY_SIZE(em28xx_eeprom_hash); i++) { if (dev->hash == em28xx_eeprom_hash[i].hash) { dev->model = em28xx_eeprom_hash[i].model; dev->tuner_type = em28xx_eeprom_hash[i].tuner; dev_err(&dev->intf->dev, "Your board has no unique USB ID.\n" "A hint were successfully done, based on eeprom hash.\n" "This method is not 100%% failproof.\n" "If the board were misdetected, please email this log to:\n" "\tV4L Mailing List <linux-media@vger.kernel.org>\n" "Board detected as %s\n", em28xx_boards[dev->model].name); return 0; } } /* * HINT method: I2C attached devices * * This method works for all boards. * Uses a hash of i2c scanned devices. * Devices with the same i2c attached chips will * be considered equal. * This method is less precise than the eeprom one. */ /* user did not request i2c scanning => do it now */ if (!dev->i2c_hash) em28xx_do_i2c_scan(dev, dev->def_i2c_bus); for (i = 0; i < ARRAY_SIZE(em28xx_i2c_hash); i++) { if (dev->i2c_hash == em28xx_i2c_hash[i].hash) { dev->model = em28xx_i2c_hash[i].model; dev->tuner_type = em28xx_i2c_hash[i].tuner; dev_err(&dev->intf->dev, "Your board has no unique USB ID.\n" "A hint were successfully done, based on i2c devicelist hash.\n" "This method is not 100%% failproof.\n" "If the board were misdetected, please email this log to:\n" "\tV4L Mailing List <linux-media@vger.kernel.org>\n" "Board detected as %s\n", em28xx_boards[dev->model].name); return 0; } } dev_err(&dev->intf->dev, "Your board has no unique USB ID and thus need a hint to be detected.\n" "You may try to use card=<n> insmod option to workaround that.\n" "Please send an email with this log to:\n" "\tV4L Mailing List <linux-media@vger.kernel.org>\n" "Board eeprom hash is 0x%08lx\n" "Board i2c devicelist hash is 0x%08lx\n", dev->hash, dev->i2c_hash); dev_err(&dev->intf->dev, "Here is a list of valid choices for the card=<n> insmod option:\n"); for (i = 0; i < em28xx_bcount; i++) { dev_err(&dev->intf->dev, " card=%d -> %s\n", i, em28xx_boards[i].name); } return -1; } static void em28xx_card_setup(struct em28xx *dev) { int i, j, idx; bool duplicate_entry; /* * If the device can be a webcam, seek for a sensor. * If sensor is not found, then it isn't a webcam. */ if (dev->is_webcam) { em28xx_detect_sensor(dev); if (dev->em28xx_sensor == EM28XX_NOSENSOR) /* NOTE: error/unknown sensor/no sensor */ dev->is_webcam = 0; } switch (dev->model) { case EM2750_BOARD_UNKNOWN: case EM2820_BOARD_UNKNOWN: case EM2800_BOARD_UNKNOWN: /* * The K-WORLD DVB-T 310U is detected as an MSI Digivox AD. * * This occurs because they share identical USB vendor and * product IDs. * * What we do here is look up the EEPROM hash of the K-WORLD * and if it is found then we decide that we do not have * a DIGIVOX and reset the device to the K-WORLD instead. * * This solution is only valid if they do not share eeprom * hash identities which has not been determined as yet. */ if (em28xx_hint_board(dev) < 0) { dev_err(&dev->intf->dev, "Board not discovered\n"); } else { em28xx_set_model(dev); em28xx_pre_card_setup(dev); } break; default: em28xx_set_model(dev); } dev_info(&dev->intf->dev, "Identified as %s (card=%d)\n", dev->board.name, dev->model); dev->tuner_type = em28xx_boards[dev->model].tuner_type; /* request some modules */ switch (dev->model) { case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2: case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850: case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C: case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB: case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595: { struct tveeprom tv; if (!dev->eedata) break; #if defined(CONFIG_MODULES) && defined(MODULE) request_module("tveeprom"); #endif /* Call first TVeeprom */ tveeprom_hauppauge_analog(&tv, dev->eedata); dev->tuner_type = tv.tuner_type; if (tv.audio_processor == TVEEPROM_AUDPROC_MSP) { dev->i2s_speed = 2048000; dev->has_msp34xx = 1; } break; } case EM2882_BOARD_KWORLD_ATSC_315U: em28xx_write_reg(dev, 0x0d, 0x42); usleep_range(10000, 11000); em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd); usleep_range(10000, 11000); break; case EM2820_BOARD_KWORLD_PVRTV2800RF: /* GPIO enables sound on KWORLD PVR TV 2800RF */ em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf9); break; case EM2820_BOARD_UNKNOWN: case EM2800_BOARD_UNKNOWN: /* * The K-WORLD DVB-T 310U is detected as an MSI Digivox AD. * * This occurs because they share identical USB vendor and * product IDs. * * What we do here is look up the EEPROM hash of the K-WORLD * and if it is found then we decide that we do not have * a DIGIVOX and reset the device to the K-WORLD instead. * * This solution is only valid if they do not share eeprom * hash identities which has not been determined as yet. */ case EM2880_BOARD_MSI_DIGIVOX_AD: if (!em28xx_hint_board(dev)) em28xx_set_model(dev); /* * In cases where we had to use a board hint, the call to * em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, * so make the call now so the analog GPIOs are set properly * before probing the i2c bus. */ em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); break; /* * The Dikom DK300 is detected as an Kworld VS-DVB-T 323UR. * * This occurs because they share identical USB vendor and * product IDs. * * What we do here is look up the EEPROM hash of the Dikom * and if it is found then we decide that we do not have * a Kworld and reset the device to the Dikom instead. * * This solution is only valid if they do not share eeprom * hash identities which has not been determined as yet. */ case EM2882_BOARD_KWORLD_VS_DVBT: if (!em28xx_hint_board(dev)) em28xx_set_model(dev); /* * In cases where we had to use a board hint, the call to * em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, * so make the call now so the analog GPIOs are set properly * before probing the i2c bus. */ em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); break; } if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) { dev_err(&dev->intf->dev, "\n\n" "The support for this board weren't valid yet.\n" "Please send a report of having this working\n" "not to V4L mailing list (and/or to other addresses)\n\n"); } /* Free eeprom data memory */ kfree(dev->eedata); dev->eedata = NULL; /* Allow override tuner type by a module parameter */ if (tuner >= 0) dev->tuner_type = tuner; /* * Dynamically generate a list of valid audio inputs for this * specific board, mapping them via enum em28xx_amux. */ idx = 0; for (i = 0; i < MAX_EM28XX_INPUT; i++) { if (!INPUT(i)->type) continue; /* Skip already mapped audio inputs */ duplicate_entry = false; for (j = 0; j < idx; j++) { if (INPUT(i)->amux == dev->amux_map[j]) { duplicate_entry = true; break; } } if (duplicate_entry) continue; dev->amux_map[idx++] = INPUT(i)->amux; } for (; idx < MAX_EM28XX_INPUT; idx++) dev->amux_map[idx] = EM28XX_AMUX_UNUSED; } void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) { memset(ctl, 0, sizeof(*ctl)); ctl->fname = XC2028_DEFAULT_FIRMWARE; ctl->max_len = 64; ctl->mts = em28xx_boards[dev->model].mts_firmware; switch (dev->model) { case EM2880_BOARD_EMPIRE_DUAL_TV: case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: case EM2882_BOARD_TERRATEC_HYBRID_XS: case EM2880_BOARD_TERRATEC_HYBRID_XS: case EM2880_BOARD_TERRATEC_HYBRID_XS_FR: case EM2881_BOARD_PINNACLE_HYBRID_PRO: case EM2882_BOARD_ZOLID_HYBRID_TV_STICK: ctl->demod = XC3028_FE_ZARLINK456; break; case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E: ctl->demod = XC3028_FE_DEFAULT; break; case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600: ctl->demod = XC3028_FE_DEFAULT; ctl->fname = XC3028L_DEFAULT_FIRMWARE; break; case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850: case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: case EM2880_BOARD_PINNACLE_PCTV_HD_PRO: /* FIXME: Better to specify the needed IF */ ctl->demod = XC3028_FE_DEFAULT; break; case EM2883_BOARD_KWORLD_HYBRID_330U: case EM2882_BOARD_DIKOM_DK300: case EM2882_BOARD_KWORLD_VS_DVBT: ctl->demod = XC3028_FE_CHINA; ctl->fname = XC2028_DEFAULT_FIRMWARE; break; case EM2882_BOARD_EVGA_INDTUBE: ctl->demod = XC3028_FE_CHINA; ctl->fname = XC3028L_DEFAULT_FIRMWARE; break; default: ctl->demod = XC3028_FE_OREN538; } } EXPORT_SYMBOL_GPL(em28xx_setup_xc3028); static void request_module_async(struct work_struct *work) { struct em28xx *dev = container_of(work, struct em28xx, request_module_wk); /* * The em28xx extensions can be modules or builtin. If the * modules are already loaded or are built in, those extensions * can be initialised right now. Otherwise, the module init * code will do it. */ /* * Devices with an audio-only intf also have a V4L/DVB/RC * intf. Don't register extensions twice on those devices. */ if (dev->is_audio_only) { #if defined(CONFIG_MODULES) && defined(MODULE) request_module("em28xx-alsa"); #endif return; } em28xx_init_extension(dev); #if defined(CONFIG_MODULES) && defined(MODULE) if (dev->has_video) request_module("em28xx-v4l"); if (dev->usb_audio_type == EM28XX_USB_AUDIO_CLASS) request_module("snd-usb-audio"); else if (dev->usb_audio_type == EM28XX_USB_AUDIO_VENDOR) request_module("em28xx-alsa"); if (dev->board.has_dvb) request_module("em28xx-dvb"); if (dev->board.buttons || ((dev->board.ir_codes || dev->board.has_ir_i2c) && !disable_ir)) request_module("em28xx-rc"); #endif /* CONFIG_MODULES */ } static void request_modules(struct em28xx *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } static void flush_request_modules(struct em28xx *dev) { flush_work(&dev->request_module_wk); } static int em28xx_media_device_init(struct em28xx *dev, struct usb_device *udev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return -ENOMEM; if (udev->product) media_device_usb_init(mdev, udev, udev->product); else if (udev->manufacturer) media_device_usb_init(mdev, udev, udev->manufacturer); else media_device_usb_init(mdev, udev, dev_name(&dev->intf->dev)); dev->media_dev = mdev; #endif return 0; } static void em28xx_unregister_media_device(struct em28xx *dev) { #ifdef CONFIG_MEDIA_CONTROLLER if (dev->media_dev) { media_device_unregister(dev->media_dev); media_device_cleanup(dev->media_dev); kfree(dev->media_dev); dev->media_dev = NULL; } #endif } /* * em28xx_release_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconnected or at module unload */ static void em28xx_release_resources(struct em28xx *dev) { struct usb_device *udev = interface_to_usbdev(dev->intf); /*FIXME: I2C IR should be disconnected */ mutex_lock(&dev->lock); em28xx_unregister_media_device(dev); if (dev->def_i2c_bus) em28xx_i2c_unregister(dev, 1); em28xx_i2c_unregister(dev, 0); if (dev->ts == PRIMARY_TS) usb_put_dev(udev); /* Mark device as unused */ clear_bit(dev->devno, em28xx_devused); mutex_unlock(&dev->lock); }; /** * em28xx_free_device() - Free em28xx device * * @ref: struct kref for em28xx device * * This is called when all extensions and em28xx core unregisters a device */ void em28xx_free_device(struct kref *ref) { struct em28xx *dev = kref_to_dev(ref); dev_info(&dev->intf->dev, "Freeing device\n"); if (!dev->disconnected) em28xx_release_resources(dev); if (dev->ts == PRIMARY_TS) kfree(dev->alt_max_pkt_size_isoc); kfree(dev); } EXPORT_SYMBOL_GPL(em28xx_free_device); /* * em28xx_init_dev() * allocates and inits the device structs, registers i2c bus and v4l device */ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, struct usb_interface *intf, int minor) { int retval; const char *chip_name = NULL; dev->intf = intf; mutex_init(&dev->ctrl_urb_lock); spin_lock_init(&dev->slock); dev->em28xx_write_regs = em28xx_write_regs; dev->em28xx_read_reg = em28xx_read_reg; dev->em28xx_read_reg_req_len = em28xx_read_reg_req_len; dev->em28xx_write_regs_req = em28xx_write_regs_req; dev->em28xx_read_reg_req = em28xx_read_reg_req; dev->board.is_em2800 = em28xx_boards[dev->model].is_em2800; em28xx_set_model(dev); dev->wait_after_write = 5; /* Based on the Chip ID, set the device configuration */ retval = em28xx_read_reg(dev, EM28XX_R0A_CHIPID); if (retval > 0) { dev->chip_id = retval; switch (dev->chip_id) { case CHIP_ID_EM2800: chip_name = "em2800"; break; case CHIP_ID_EM2710: chip_name = "em2710"; break; case CHIP_ID_EM2750: chip_name = "em2750"; break; case CHIP_ID_EM2765: chip_name = "em2765"; dev->wait_after_write = 0; dev->is_em25xx = 1; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM2820: chip_name = "em2710/2820"; if (le16_to_cpu(udev->descriptor.idVendor) == 0xeb1a) { __le16 idProd = udev->descriptor.idProduct; if (le16_to_cpu(idProd) == 0x2710) chip_name = "em2710"; else if (le16_to_cpu(idProd) == 0x2820) chip_name = "em2820"; } /* NOTE: the em2820 is used in webcams, too ! */ break; case CHIP_ID_EM2840: chip_name = "em2840"; break; case CHIP_ID_EM2860: chip_name = "em2860"; break; case CHIP_ID_EM2870: chip_name = "em2870"; dev->wait_after_write = 0; break; case CHIP_ID_EM2874: chip_name = "em2874"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM28174: chip_name = "em28174"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM28178: chip_name = "em28178"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; case CHIP_ID_EM2883: chip_name = "em2882/3"; dev->wait_after_write = 0; break; case CHIP_ID_EM2884: chip_name = "em2884"; dev->wait_after_write = 0; dev->eeprom_addrwidth_16bit = 1; break; } } if (!chip_name) dev_info(&dev->intf->dev, "unknown em28xx chip ID (%d)\n", dev->chip_id); else dev_info(&dev->intf->dev, "chip ID is %s\n", chip_name); em28xx_media_device_init(dev, udev); if (dev->is_audio_only) { retval = em28xx_audio_setup(dev); if (retval) { retval = -ENODEV; goto err_deinit_media; } em28xx_init_extension(dev); return 0; } em28xx_pre_card_setup(dev); rt_mutex_init(&dev->i2c_bus_lock); /* register i2c bus 0 */ if (dev->board.is_em2800) retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM2800); else retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX); if (retval < 0) { dev_err(&dev->intf->dev, "%s: em28xx_i2c_register bus 0 - error [%d]!\n", __func__, retval); goto err_deinit_media; } /* register i2c bus 1 */ if (dev->def_i2c_bus) { if (dev->is_em25xx) retval = em28xx_i2c_register(dev, 1, EM28XX_I2C_ALGO_EM25XX_BUS_B); else retval = em28xx_i2c_register(dev, 1, EM28XX_I2C_ALGO_EM28XX); if (retval < 0) { dev_err(&dev->intf->dev, "%s: em28xx_i2c_register bus 1 - error [%d]!\n", __func__, retval); goto err_unreg_i2c; } } /* Do board specific init and eeprom reading */ em28xx_card_setup(dev); return 0; err_unreg_i2c: em28xx_i2c_unregister(dev, 0); err_deinit_media: em28xx_unregister_media_device(dev); return retval; } static int em28xx_duplicate_dev(struct em28xx *dev) { int nr; struct em28xx *sec_dev = kmemdup(dev, sizeof(*sec_dev), GFP_KERNEL); if (!sec_dev) { dev->dev_next = NULL; return -ENOMEM; } /* Check to see next free device and mark as used */ do { nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS); if (nr >= EM28XX_MAXBOARDS) { /* No free device slots */ dev_warn(&dev->intf->dev, ": Supports only %i em28xx boards.\n", EM28XX_MAXBOARDS); kfree(sec_dev); dev->dev_next = NULL; return -ENOMEM; } } while (test_and_set_bit(nr, em28xx_devused)); sec_dev->devno = nr; snprintf(sec_dev->name, 28, "em28xx #%d", nr); sec_dev->dev_next = NULL; dev->dev_next = sec_dev; return 0; } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) static void em28xx_check_usb_descriptor(struct em28xx *dev, struct usb_device *udev, struct usb_interface *intf, int alt, int ep, bool *has_vendor_audio, bool *has_video, bool *has_dvb) { const struct usb_endpoint_descriptor *e; int sizedescr, size; /* * NOTE: * * Old logic with support for isoc transfers only was: * 0x82 isoc => analog * 0x83 isoc => audio * 0x84 isoc => digital * * New logic with support for bulk transfers * 0x82 isoc => analog * 0x82 bulk => analog * 0x83 isoc* => audio * 0x84 isoc => digital * 0x84 bulk => analog or digital** * 0x85 isoc => digital TS2 * 0x85 bulk => digital TS2 * (*: audio should always be isoc) * (**: analog, if ep 0x82 is isoc, otherwise digital) * * The new logic preserves backwards compatibility and * reflects the endpoint configurations we have seen * so far. But there might be devices for which this * logic is not sufficient... */ e = &intf->altsetting[alt].endpoint[ep].desc; if (!usb_endpoint_dir_in(e)) return; sizedescr = le16_to_cpu(e->wMaxPacketSize); size = sizedescr & 0x7ff; if (udev->speed == USB_SPEED_HIGH) size = size * hb_mult(sizedescr); /* Only inspect input endpoints */ switch (e->bEndpointAddress) { case 0x82: *has_video = true; if (usb_endpoint_xfer_isoc(e)) { dev->analog_ep_isoc = e->bEndpointAddress; dev->alt_max_pkt_size_isoc[alt] = size; } else if (usb_endpoint_xfer_bulk(e)) { dev->analog_ep_bulk = e->bEndpointAddress; } return; case 0x83: if (usb_endpoint_xfer_isoc(e)) *has_vendor_audio = true; else dev_err(&intf->dev, "error: skipping audio endpoint 0x83, because it uses bulk transfers !\n"); return; case 0x84: if (*has_video && (usb_endpoint_xfer_bulk(e))) { dev->analog_ep_bulk = e->bEndpointAddress; } else { if (usb_endpoint_xfer_isoc(e)) { if (size > dev->dvb_max_pkt_size_isoc) { /* * 2) some manufacturers (e.g. Terratec) * disable endpoints by setting * wMaxPacketSize to 0 bytes for all * alt settings. So far, we've seen * this for DVB isoc endpoints only. */ *has_dvb = true; dev->dvb_ep_isoc = e->bEndpointAddress; dev->dvb_max_pkt_size_isoc = size; dev->dvb_alt_isoc = alt; } } else { *has_dvb = true; dev->dvb_ep_bulk = e->bEndpointAddress; } } return; case 0x85: if (usb_endpoint_xfer_isoc(e)) { if (size > dev->dvb_max_pkt_size_isoc_ts2) { dev->dvb_ep_isoc_ts2 = e->bEndpointAddress; dev->dvb_max_pkt_size_isoc_ts2 = size; dev->dvb_alt_isoc = alt; } } else { dev->dvb_ep_bulk_ts2 = e->bEndpointAddress; } return; } } /* * em28xx_usb_probe() * checks for supported devices */ static int em28xx_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev; struct em28xx *dev = NULL; int retval; bool has_vendor_audio = false, has_video = false, has_dvb = false; int i, nr, try_bulk; const int ifnum = intf->altsetting[0].desc.bInterfaceNumber; char *speed; udev = usb_get_dev(interface_to_usbdev(intf)); /* Check to see next free device and mark as used */ do { nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS); if (nr >= EM28XX_MAXBOARDS) { /* No free device slots */ dev_err(&intf->dev, "Driver supports up to %i em28xx boards.\n", EM28XX_MAXBOARDS); retval = -ENOMEM; goto err_no_slot; } } while (test_and_set_bit(nr, em28xx_devused)); /* Don't register audio interfaces */ if (intf->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) { dev_info(&intf->dev, "audio device (%04x:%04x): interface %i, class %i\n", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), ifnum, intf->altsetting[0].desc.bInterfaceClass); retval = -ENODEV; goto err; } /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto err; } /* compute alternate max packet sizes */ dev->alt_max_pkt_size_isoc = kcalloc(intf->num_altsetting, sizeof(dev->alt_max_pkt_size_isoc[0]), GFP_KERNEL); if (!dev->alt_max_pkt_size_isoc) { kfree(dev); retval = -ENOMEM; goto err; } /* Get endpoints */ for (i = 0; i < intf->num_altsetting; i++) { int ep; for (ep = 0; ep < intf->altsetting[i].desc.bNumEndpoints; ep++) em28xx_check_usb_descriptor(dev, udev, intf, i, ep, &has_vendor_audio, &has_video, &has_dvb); } if (!(has_vendor_audio || has_video || has_dvb)) { retval = -ENODEV; goto err_free; } switch (udev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } dev_info(&intf->dev, "New device %s %s @ %s Mbps (%04x:%04x, interface %d, class %d)\n", udev->manufacturer ? udev->manufacturer : "", udev->product ? udev->product : "", speed, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), ifnum, intf->altsetting->desc.bInterfaceNumber); /* * Make sure we have 480 Mbps of bandwidth, otherwise things like * video stream wouldn't likely work, since 12 Mbps is generally * not enough even for most Digital TV streams. */ if (udev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) { dev_err(&intf->dev, "Device initialization failed.\n"); dev_err(&intf->dev, "Device must be connected to a high-speed USB 2.0 port.\n"); retval = -ENODEV; goto err_free; } kref_init(&dev->ref); dev->devno = nr; dev->model = id->driver_info; dev->alt = -1; dev->is_audio_only = has_vendor_audio && !(has_video || has_dvb); dev->has_video = has_video; dev->ifnum = ifnum; dev->ts = PRIMARY_TS; snprintf(dev->name, 28, "em28xx"); dev->dev_next = NULL; if (has_vendor_audio) { dev_info(&intf->dev, "Audio interface %i found (Vendor Class)\n", ifnum); dev->usb_audio_type = EM28XX_USB_AUDIO_VENDOR; } /* Checks if audio is provided by a USB Audio Class intf */ for (i = 0; i < udev->config->desc.bNumInterfaces; i++) { struct usb_interface *uif = udev->config->interface[i]; if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) { if (has_vendor_audio) dev_err(&intf->dev, "em28xx: device seems to have vendor AND usb audio class interfaces !\n" "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n"); dev->usb_audio_type = EM28XX_USB_AUDIO_CLASS; break; } } if (has_video) dev_info(&intf->dev, "Video interface %i found:%s%s\n", ifnum, dev->analog_ep_bulk ? " bulk" : "", dev->analog_ep_isoc ? " isoc" : ""); if (has_dvb) dev_info(&intf->dev, "DVB interface %i found:%s%s\n", ifnum, dev->dvb_ep_bulk ? " bulk" : "", dev->dvb_ep_isoc ? " isoc" : ""); dev->num_alt = intf->num_altsetting; if ((unsigned int)card[nr] < em28xx_bcount) dev->model = card[nr]; /* save our data pointer in this intf device */ usb_set_intfdata(intf, dev); /* allocate device struct and check if the device is a webcam */ mutex_init(&dev->lock); retval = em28xx_init_dev(dev, udev, intf, nr); if (retval) goto err_free; if (usb_xfer_mode < 0) { if (dev->is_webcam) try_bulk = 1; else try_bulk = 0; } else { try_bulk = usb_xfer_mode > 0; } /* Disable V4L2 if the device doesn't have a decoder or image sensor */ if (has_video && dev->board.decoder == EM28XX_NODECODER && dev->em28xx_sensor == EM28XX_NOSENSOR) { dev_err(&intf->dev, "Currently, V4L2 is not supported on this model\n"); has_video = false; dev->has_video = false; } if (dev->board.has_dual_ts && (dev->tuner_type != TUNER_ABSENT || INPUT(0)->type)) { /* * The logic with sets alternate is not ready for dual-tuners * which analog modes. */ dev_err(&intf->dev, "We currently don't support analog TV or stream capture on dual tuners.\n"); has_video = false; } /* Select USB transfer types to use */ if (has_video) { if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk)) dev->analog_xfer_bulk = 1; dev_info(&intf->dev, "analog set to %s mode.\n", dev->analog_xfer_bulk ? "bulk" : "isoc"); } if (has_dvb) { if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk)) dev->dvb_xfer_bulk = 1; dev_info(&intf->dev, "dvb set to %s mode.\n", dev->dvb_xfer_bulk ? "bulk" : "isoc"); } if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) { kref_init(&dev->dev_next->ref); dev->dev_next->ts = SECONDARY_TS; dev->dev_next->alt = -1; dev->dev_next->is_audio_only = has_vendor_audio && !(has_video || has_dvb); dev->dev_next->has_video = false; dev->dev_next->ifnum = ifnum; dev->dev_next->model = id->driver_info; mutex_init(&dev->dev_next->lock); retval = em28xx_init_dev(dev->dev_next, udev, intf, dev->dev_next->devno); if (retval) goto err_free; dev->dev_next->board.ir_codes = NULL; /* No IR for 2nd tuner */ dev->dev_next->board.has_ir_i2c = 0; /* No IR for 2nd tuner */ if (usb_xfer_mode < 0) { if (dev->dev_next->is_webcam) try_bulk = 1; else try_bulk = 0; } else { try_bulk = usb_xfer_mode > 0; } /* Select USB transfer types to use */ if (has_dvb) { if (!dev->dvb_ep_isoc_ts2 || (try_bulk && dev->dvb_ep_bulk_ts2)) dev->dev_next->dvb_xfer_bulk = 1; dev_info(&dev->intf->dev, "dvb ts2 set to %s mode.\n", dev->dev_next->dvb_xfer_bulk ? "bulk" : "isoc"); } dev->dev_next->dvb_ep_isoc = dev->dvb_ep_isoc_ts2; dev->dev_next->dvb_ep_bulk = dev->dvb_ep_bulk_ts2; dev->dev_next->dvb_max_pkt_size_isoc = dev->dvb_max_pkt_size_isoc_ts2; dev->dev_next->dvb_alt_isoc = dev->dvb_alt_isoc; /* Configure hardware to support TS2*/ if (dev->dvb_xfer_bulk) { /* The ep4 and ep5 are configured for BULK */ em28xx_write_reg(dev, 0x0b, 0x96); mdelay(100); em28xx_write_reg(dev, 0x0b, 0x80); mdelay(100); } else { /* The ep4 and ep5 are configured for ISO */ em28xx_write_reg(dev, 0x0b, 0x96); mdelay(100); em28xx_write_reg(dev, 0x0b, 0x82); mdelay(100); } } request_modules(dev); /* * Do it at the end, to reduce dynamic configuration changes during * the device init. Yet, as request_modules() can be async, the * topology will likely change after the load of the em28xx subdrivers. */ #ifdef CONFIG_MEDIA_CONTROLLER /* * No need to check the return value, the device will still be * usable without media controller API. */ retval = media_device_register(dev->media_dev); #endif return 0; err_free: kfree(dev->alt_max_pkt_size_isoc); kfree(dev); err: clear_bit(nr, em28xx_devused); err_no_slot: usb_put_dev(udev); return retval; } /* * em28xx_usb_disconnect() * called when the device gets disconnected * video device will be unregistered on v4l2_close in case it is still open */ static void em28xx_usb_disconnect(struct usb_interface *intf) { struct em28xx *dev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; if (dev->dev_next) { dev->dev_next->disconnected = 1; dev_info(&dev->intf->dev, "Disconnecting %s\n", dev->dev_next->name); } dev->disconnected = 1; dev_info(&dev->intf->dev, "Disconnecting %s\n", dev->name); flush_request_modules(dev); em28xx_close_extension(dev); if (dev->dev_next) em28xx_release_resources(dev->dev_next); em28xx_release_resources(dev); if (dev->dev_next) { kref_put(&dev->dev_next->ref, em28xx_free_device); dev->dev_next = NULL; } kref_put(&dev->ref, em28xx_free_device); } static int em28xx_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct em28xx *dev; dev = usb_get_intfdata(intf); if (!dev) return 0; em28xx_suspend_extension(dev); return 0; } static int em28xx_usb_resume(struct usb_interface *intf) { struct em28xx *dev; dev = usb_get_intfdata(intf); if (!dev) return 0; em28xx_resume_extension(dev); return 0; } static struct usb_driver em28xx_usb_driver = { .name = "em28xx", .probe = em28xx_usb_probe, .disconnect = em28xx_usb_disconnect, .suspend = em28xx_usb_suspend, .resume = em28xx_usb_resume, .reset_resume = em28xx_usb_resume, .id_table = em28xx_id_table, }; module_usb_driver(em28xx_usb_driver);
1 1 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 // SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/svc.c * * High-level RPC service routines * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * * Multiple threads pools and NUMAisation * Copyright (c) 2006 Silicon Graphics, Inc. * by Greg Banks <gnb@melbourne.sgi.com> */ #include <linux/linkage.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/net.h> #include <linux/in.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/bc_xprt.h> #include <trace/events/sunrpc.h> #include "fail.h" #include "sunrpc.h" #define RPCDBG_FACILITY RPCDBG_SVCDSP static void svc_unregister(const struct svc_serv *serv, struct net *net); #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL /* * Mode for mapping cpus to pools. */ enum { SVC_POOL_AUTO = -1, /* choose one of the others */ SVC_POOL_GLOBAL, /* no mapping, just a single global pool * (legacy & UP mode) */ SVC_POOL_PERCPU, /* one pool per cpu */ SVC_POOL_PERNODE /* one pool per numa node */ }; /* * Structure for mapping cpus to pools and vice versa. * Setup once during sunrpc initialisation. */ struct svc_pool_map { int count; /* How many svc_servs use us */ int mode; /* Note: int not enum to avoid * warnings about "enumeration value * not handled in switch" */ unsigned int npools; unsigned int *pool_to; /* maps pool id to cpu or node */ unsigned int *to_pool; /* maps cpu or node to pool id */ }; static struct svc_pool_map svc_pool_map = { .mode = SVC_POOL_DEFAULT }; static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ static int __param_set_pool_mode(const char *val, struct svc_pool_map *m) { int err, mode; mutex_lock(&svc_pool_map_mutex); err = 0; if (!strncmp(val, "auto", 4)) mode = SVC_POOL_AUTO; else if (!strncmp(val, "global", 6)) mode = SVC_POOL_GLOBAL; else if (!strncmp(val, "percpu", 6)) mode = SVC_POOL_PERCPU; else if (!strncmp(val, "pernode", 7)) mode = SVC_POOL_PERNODE; else err = -EINVAL; if (err) goto out; if (m->count == 0) m->mode = mode; else if (mode != m->mode) err = -EBUSY; out: mutex_unlock(&svc_pool_map_mutex); return err; } static int param_set_pool_mode(const char *val, const struct kernel_param *kp) { struct svc_pool_map *m = kp->arg; return __param_set_pool_mode(val, m); } int sunrpc_set_pool_mode(const char *val) { return __param_set_pool_mode(val, &svc_pool_map); } EXPORT_SYMBOL(sunrpc_set_pool_mode); /** * sunrpc_get_pool_mode - get the current pool_mode for the host * @buf: where to write the current pool_mode * @size: size of @buf * * Grab the current pool_mode from the svc_pool_map and write * the resulting string to @buf. Returns the number of characters * written to @buf (a'la snprintf()). */ int sunrpc_get_pool_mode(char *buf, size_t size) { struct svc_pool_map *m = &svc_pool_map; switch (m->mode) { case SVC_POOL_AUTO: return snprintf(buf, size, "auto"); case SVC_POOL_GLOBAL: return snprintf(buf, size, "global"); case SVC_POOL_PERCPU: return snprintf(buf, size, "percpu"); case SVC_POOL_PERNODE: return snprintf(buf, size, "pernode"); default: return snprintf(buf, size, "%d", m->mode); } } EXPORT_SYMBOL(sunrpc_get_pool_mode); static int param_get_pool_mode(char *buf, const struct kernel_param *kp) { char str[16]; int len; len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str)); /* Ensure we have room for newline and NUL */ len = min_t(int, len, ARRAY_SIZE(str) - 2); /* tack on the newline */ str[len] = '\n'; str[len + 1] = '\0'; return sysfs_emit(buf, "%s", str); } module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, &svc_pool_map, 0644); /* * Detect best pool mapping mode heuristically, * according to the machine's topology. */ static int svc_pool_map_choose_mode(void) { unsigned int node; if (nr_online_nodes > 1) { /* * Actually have multiple NUMA nodes, * so split pools on NUMA node boundaries */ return SVC_POOL_PERNODE; } node = first_online_node; if (nr_cpus_node(node) > 2) { /* * Non-trivial SMP, or CONFIG_NUMA on * non-NUMA hardware, e.g. with a generic * x86_64 kernel on Xeons. In this case we * want to divide the pools on cpu boundaries. */ return SVC_POOL_PERCPU; } /* default: one global pool */ return SVC_POOL_GLOBAL; } /* * Allocate the to_pool[] and pool_to[] arrays. * Returns 0 on success or an errno. */ static int svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) { m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->to_pool) goto fail; m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->pool_to) goto fail_free; return 0; fail_free: kfree(m->to_pool); m->to_pool = NULL; fail: return -ENOMEM; } /* * Initialise the pool map for SVC_POOL_PERCPU mode. * Returns number of pools or <0 on error. */ static int svc_pool_map_init_percpu(struct svc_pool_map *m) { unsigned int maxpools = nr_cpu_ids; unsigned int pidx = 0; unsigned int cpu; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_online_cpu(cpu) { BUG_ON(pidx >= maxpools); m->to_pool[cpu] = pidx; m->pool_to[pidx] = cpu; pidx++; } /* cpus brought online later all get mapped to pool0, sorry */ return pidx; }; /* * Initialise the pool map for SVC_POOL_PERNODE mode. * Returns number of pools or <0 on error. */ static int svc_pool_map_init_pernode(struct svc_pool_map *m) { unsigned int maxpools = nr_node_ids; unsigned int pidx = 0; unsigned int node; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_node_with_cpus(node) { /* some architectures (e.g. SN2) have cpuless nodes */ BUG_ON(pidx > maxpools); m->to_pool[node] = pidx; m->pool_to[pidx] = node; pidx++; } /* nodes brought online later all get mapped to pool0, sorry */ return pidx; } /* * Add a reference to the global map of cpus to pools (and * vice versa) if pools are in use. * Initialise the map if we're the first user. * Returns the number of pools. If this is '1', no reference * was taken. */ static unsigned int svc_pool_map_get(void) { struct svc_pool_map *m = &svc_pool_map; int npools = -1; mutex_lock(&svc_pool_map_mutex); if (m->count++) { mutex_unlock(&svc_pool_map_mutex); return m->npools; } if (m->mode == SVC_POOL_AUTO) m->mode = svc_pool_map_choose_mode(); switch (m->mode) { case SVC_POOL_PERCPU: npools = svc_pool_map_init_percpu(m); break; case SVC_POOL_PERNODE: npools = svc_pool_map_init_pernode(m); break; } if (npools <= 0) { /* default, or memory allocation failure */ npools = 1; m->mode = SVC_POOL_GLOBAL; } m->npools = npools; mutex_unlock(&svc_pool_map_mutex); return npools; } /* * Drop a reference to the global map of cpus to pools. * When the last reference is dropped, the map data is * freed; this allows the sysadmin to change the pool. */ static void svc_pool_map_put(void) { struct svc_pool_map *m = &svc_pool_map; mutex_lock(&svc_pool_map_mutex); if (!--m->count) { kfree(m->to_pool); m->to_pool = NULL; kfree(m->pool_to); m->pool_to = NULL; m->npools = 0; } mutex_unlock(&svc_pool_map_mutex); } static int svc_pool_map_get_node(unsigned int pidx) { const struct svc_pool_map *m = &svc_pool_map; if (m->count) { if (m->mode == SVC_POOL_PERCPU) return cpu_to_node(m->pool_to[pidx]); if (m->mode == SVC_POOL_PERNODE) return m->pool_to[pidx]; } return NUMA_NO_NODE; } /* * Set the given thread's cpus_allowed mask so that it * will only run on cpus in the given pool. */ static inline void svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) { struct svc_pool_map *m = &svc_pool_map; unsigned int node = m->pool_to[pidx]; /* * The caller checks for sv_nrpools > 1, which * implies that we've been initialized. */ WARN_ON_ONCE(m->count == 0); if (m->count == 0) return; switch (m->mode) { case SVC_POOL_PERCPU: { set_cpus_allowed_ptr(task, cpumask_of(node)); break; } case SVC_POOL_PERNODE: { set_cpus_allowed_ptr(task, cpumask_of_node(node)); break; } } } /** * svc_pool_for_cpu - Select pool to run a thread on this cpu * @serv: An RPC service * * Use the active CPU and the svc_pool_map's mode setting to * select the svc thread pool to use. Once initialized, the * svc_pool_map does not change. * * Return value: * A pointer to an svc_pool */ struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) { struct svc_pool_map *m = &svc_pool_map; int cpu = raw_smp_processor_id(); unsigned int pidx = 0; if (serv->sv_nrpools <= 1) return serv->sv_pools; switch (m->mode) { case SVC_POOL_PERCPU: pidx = m->to_pool[cpu]; break; case SVC_POOL_PERNODE: pidx = m->to_pool[cpu_to_node(cpu)]; break; } return &serv->sv_pools[pidx % serv->sv_nrpools]; } static int svc_rpcb_setup(struct svc_serv *serv, struct net *net) { int err; err = rpcb_create_local(net); if (err) return err; /* Remove any stale portmap registrations */ svc_unregister(serv, net); return 0; } void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) { svc_unregister(serv, net); rpcb_put_local(net); } EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); static int svc_uses_rpcbind(struct svc_serv *serv) { unsigned int p, i; for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; if (!progp->pg_vers[i]->vs_hidden) return 1; } } return 0; } int svc_bind(struct svc_serv *serv, struct net *net) { if (!svc_uses_rpcbind(serv)) return 0; return svc_rpcb_setup(serv, net); } EXPORT_SYMBOL_GPL(svc_bind); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void __svc_init_bc(struct svc_serv *serv) { lwq_init(&serv->sv_cb_list); } #else static void __svc_init_bc(struct svc_serv *serv) { } #endif /* * Create an RPC service */ static struct svc_serv * __svc_create(struct svc_program *prog, int nprogs, struct svc_stat *stats, unsigned int bufsize, int npools, int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int vers; unsigned int xdrsize; unsigned int i; if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) return NULL; serv->sv_name = prog->pg_name; serv->sv_programs = prog; serv->sv_nprogs = nprogs; serv->sv_stats = stats; if (bufsize > RPCSVC_MAXPAYLOAD) bufsize = RPCSVC_MAXPAYLOAD; serv->sv_max_payload = bufsize? bufsize : 4096; serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); serv->sv_threadfn = threadfn; xdrsize = 0; for (i = 0; i < nprogs; i++) { struct svc_program *progp = &prog[i]; progp->pg_lovers = progp->pg_nvers-1; for (vers = 0; vers < progp->pg_nvers ; vers++) if (progp->pg_vers[vers]) { progp->pg_hivers = vers; if (progp->pg_lovers > vers) progp->pg_lovers = vers; if (progp->pg_vers[vers]->vs_xdrsize > xdrsize) xdrsize = progp->pg_vers[vers]->vs_xdrsize; } } serv->sv_xdrsize = xdrsize; INIT_LIST_HEAD(&serv->sv_tempsocks); INIT_LIST_HEAD(&serv->sv_permsocks); timer_setup(&serv->sv_temptimer, NULL, 0); spin_lock_init(&serv->sv_lock); __svc_init_bc(serv); serv->sv_nrpools = npools; serv->sv_pools = kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), GFP_KERNEL); if (!serv->sv_pools) { kfree(serv); return NULL; } for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; dprintk("svc: initialising pool %u for %s\n", i, serv->sv_name); pool->sp_id = i; lwq_init(&pool->sp_xprts); INIT_LIST_HEAD(&pool->sp_all_threads); init_llist_head(&pool->sp_idle_threads); percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); } return serv; } /** * svc_create - Create an RPC service * @prog: the RPC program the new service will handle * @bufsize: maximum message size for @prog * @threadfn: a function to service RPC requests for @prog * * Returns an instantiated struct svc_serv object or NULL. */ struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, int (*threadfn)(void *data)) { return __svc_create(prog, 1, NULL, bufsize, 1, threadfn); } EXPORT_SYMBOL_GPL(svc_create); /** * svc_create_pooled - Create an RPC service with pooled threads * @prog: Array of RPC programs the new service will handle * @nprogs: Number of programs in the array * @stats: the stats struct if desired * @bufsize: maximum message size for @prog * @threadfn: a function to service RPC requests for @prog * * Returns an instantiated struct svc_serv object or NULL. */ struct svc_serv *svc_create_pooled(struct svc_program *prog, unsigned int nprogs, struct svc_stat *stats, unsigned int bufsize, int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int npools = svc_pool_map_get(); serv = __svc_create(prog, nprogs, stats, bufsize, npools, threadfn); if (!serv) goto out_err; serv->sv_is_pooled = true; return serv; out_err: svc_pool_map_put(); return NULL; } EXPORT_SYMBOL_GPL(svc_create_pooled); /* * Destroy an RPC service. Should be called with appropriate locking to * protect sv_permsocks and sv_tempsocks. */ void svc_destroy(struct svc_serv **servp) { struct svc_serv *serv = *servp; unsigned int i; *servp = NULL; dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name); timer_shutdown_sync(&serv->sv_temptimer); /* * Remaining transports at this point are not expected. */ WARN_ONCE(!list_empty(&serv->sv_permsocks), "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name); WARN_ONCE(!list_empty(&serv->sv_tempsocks), "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name); cache_clean_deferred(serv); if (serv->sv_is_pooled) svc_pool_map_put(); for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; percpu_counter_destroy(&pool->sp_messages_arrived); percpu_counter_destroy(&pool->sp_sockets_queued); percpu_counter_destroy(&pool->sp_threads_woken); } kfree(serv->sv_pools); kfree(serv); } EXPORT_SYMBOL_GPL(svc_destroy); static bool svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) { unsigned long pages, ret; /* bc_xprt uses fore channel allocated buffers */ if (svc_is_backchannel(rqstp)) return true; pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. * We assume one is at most one page */ WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); if (pages > RPCSVC_MAXPAGES) pages = RPCSVC_MAXPAGES; ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages, rqstp->rq_pages); return ret == pages; } /* * Release an RPC server buffer */ static void svc_release_buffer(struct svc_rqst *rqstp) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) if (rqstp->rq_pages[i]) put_page(rqstp->rq_pages[i]); } static void svc_rqst_free(struct svc_rqst *rqstp) { folio_batch_release(&rqstp->rq_fbatch); svc_release_buffer(rqstp); if (rqstp->rq_scratch_page) put_page(rqstp->rq_scratch_page); kfree(rqstp->rq_resp); kfree(rqstp->rq_argp); kfree(rqstp->rq_auth_data); kfree_rcu(rqstp, rq_rcu_head); } static struct svc_rqst * svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) { struct svc_rqst *rqstp; rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); if (!rqstp) return rqstp; folio_batch_init(&rqstp->rq_fbatch); rqstp->rq_server = serv; rqstp->rq_pool = pool; rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0); if (!rqstp->rq_scratch_page) goto out_enomem; rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_argp) goto out_enomem; rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_resp) goto out_enomem; if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) goto out_enomem; rqstp->rq_err = -EAGAIN; /* No error yet */ serv->sv_nrthreads += 1; pool->sp_nrthreads += 1; /* Protected by whatever lock the service uses when calling * svc_set_num_threads() */ list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); return rqstp; out_enomem: svc_rqst_free(rqstp); return NULL; } /** * svc_pool_wake_idle_thread - Awaken an idle thread in @pool * @pool: service thread pool * * Can be called from soft IRQ or process context. Finding an idle * service thread and marking it BUSY is atomic with respect to * other calls to svc_pool_wake_idle_thread(). * */ void svc_pool_wake_idle_thread(struct svc_pool *pool) { struct svc_rqst *rqstp; struct llist_node *ln; rcu_read_lock(); ln = READ_ONCE(pool->sp_idle_threads.first); if (ln) { rqstp = llist_entry(ln, struct svc_rqst, rq_idle); WRITE_ONCE(rqstp->rq_qtime, ktime_get()); if (!task_is_running(rqstp->rq_task)) { wake_up_process(rqstp->rq_task); trace_svc_wake_up(rqstp->rq_task->pid); percpu_counter_inc(&pool->sp_threads_woken); } rcu_read_unlock(); return; } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread); static struct svc_pool * svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; } static struct svc_pool * svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, unsigned int *state) { struct svc_pool *pool; unsigned int i; pool = target_pool; if (!pool) { for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; if (pool->sp_nrthreads) break; } } if (pool && pool->sp_nrthreads) { set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); set_bit(SP_NEED_VICTIM, &pool->sp_flags); return pool; } return NULL; } static int svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { struct svc_rqst *rqstp; struct task_struct *task; struct svc_pool *chosen_pool; unsigned int state = serv->sv_nrthreads-1; int node; int err; do { nrservs--; chosen_pool = svc_pool_next(serv, pool, &state); node = svc_pool_map_get_node(chosen_pool->sp_id); rqstp = svc_prepare_thread(serv, chosen_pool, node); if (!rqstp) return -ENOMEM; task = kthread_create_on_node(serv->sv_threadfn, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { svc_exit_thread(rqstp); return PTR_ERR(task); } rqstp->rq_task = task; if (serv->sv_nrpools > 1) svc_pool_map_set_cpumask(task, chosen_pool->sp_id); svc_sock_update_bufs(serv); wake_up_process(task); wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN); err = rqstp->rq_err; if (err) { svc_exit_thread(rqstp); return err; } } while (nrservs > 0); return 0; } static int svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { unsigned int state = serv->sv_nrthreads-1; struct svc_pool *victim; do { victim = svc_pool_victim(serv, pool, &state); if (!victim) break; svc_pool_wake_idle_thread(victim); wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS, TASK_IDLE); nrservs++; } while (nrservs < 0); return 0; } /** * svc_set_num_threads - adjust number of threads per RPC service * @serv: RPC service to adjust * @pool: Specific pool from which to choose threads, or NULL * @nrservs: New number of threads for @serv (0 or less means kill all threads) * * Create or destroy threads to make the number of threads for @serv the * given number. If @pool is non-NULL, change only threads in that pool; * otherwise, round-robin between all pools for @serv. @serv's * sv_nrthreads is adjusted for each thread created or destroyed. * * Caller must ensure mutual exclusion between this and server startup or * shutdown. * * Returns zero on success or a negative errno if an error occurred while * starting a thread. */ int svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { if (!pool) nrservs -= serv->sv_nrthreads; else nrservs -= pool->sp_nrthreads; if (nrservs > 0) return svc_start_kthreads(serv, pool, nrservs); if (nrservs < 0) return svc_stop_kthreads(serv, pool, nrservs); return 0; } EXPORT_SYMBOL_GPL(svc_set_num_threads); /** * svc_rqst_replace_page - Replace one page in rq_pages[] * @rqstp: svc_rqst with pages to replace * @page: replacement page * * When replacing a page in rq_pages, batch the release of the * replaced pages to avoid hammering the page allocator. * * Return values: * %true: page replaced * %false: array bounds checking failed */ bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) { struct page **begin = rqstp->rq_pages; struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES]; if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { trace_svc_replace_page_err(rqstp); return false; } if (*rqstp->rq_next_page) { if (!folio_batch_add(&rqstp->rq_fbatch, page_folio(*rqstp->rq_next_page))) __folio_batch_release(&rqstp->rq_fbatch); } get_page(page); *(rqstp->rq_next_page++) = page; return true; } EXPORT_SYMBOL_GPL(svc_rqst_replace_page); /** * svc_rqst_release_pages - Release Reply buffer pages * @rqstp: RPC transaction context * * Release response pages that might still be in flight after * svc_send, and any spliced filesystem-owned pages. */ void svc_rqst_release_pages(struct svc_rqst *rqstp) { int i, count = rqstp->rq_next_page - rqstp->rq_respages; if (count) { release_pages(rqstp->rq_respages, count); for (i = 0; i < count; i++) rqstp->rq_respages[i] = NULL; } } /** * svc_exit_thread - finalise the termination of a sunrpc server thread * @rqstp: the svc_rqst which represents the thread. * * When a thread started with svc_new_thread() exits it must call * svc_exit_thread() as its last act. This must be done with the * service mutex held. Normally this is held by a DIFFERENT thread, the * one that is calling svc_set_num_threads() and which will wait for * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the * thread exits for any reason other than svc_thread_should_stop() * returning %true (which indicated that svc_set_num_threads() is * waiting for it to exit), then it must take the service mutex itself, * which can only safely be done using mutex_try_lock(). */ void svc_exit_thread(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct svc_pool *pool = rqstp->rq_pool; list_del_rcu(&rqstp->rq_all); pool->sp_nrthreads -= 1; serv->sv_nrthreads -= 1; svc_sock_update_bufs(serv); svc_rqst_free(rqstp); clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); } EXPORT_SYMBOL_GPL(svc_exit_thread); /* * Register an "inet" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. * * No netconfig infrastructure is available in the kernel, so * we map IP_ protocol numbers to netids by hand. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_rpcb_register4(struct net *net, const u32 program, const u32 version, const unsigned short protocol, const unsigned short port) { const struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; const char *netid; int error; switch (protocol) { case IPPROTO_UDP: netid = RPCBIND_NETID_UDP; break; case IPPROTO_TCP: netid = RPCBIND_NETID_TCP; break; default: return -ENOPROTOOPT; } error = rpcb_v4_register(net, program, version, (const struct sockaddr *)&sin, netid); /* * User space didn't support rpcbind v4, so retry this * registration request with the legacy rpcbind v2 protocol. */ if (error == -EPROTONOSUPPORT) error = rpcb_register(net, program, version, protocol, port); return error; } #if IS_ENABLED(CONFIG_IPV6) /* * Register an "inet6" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. * * No netconfig infrastructure is available in the kernel, so * we map IP_ protocol numbers to netids by hand. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_rpcb_register6(struct net *net, const u32 program, const u32 version, const unsigned short protocol, const unsigned short port) { const struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, .sin6_port = htons(port), }; const char *netid; int error; switch (protocol) { case IPPROTO_UDP: netid = RPCBIND_NETID_UDP6; break; case IPPROTO_TCP: netid = RPCBIND_NETID_TCP6; break; default: return -ENOPROTOOPT; } error = rpcb_v4_register(net, program, version, (const struct sockaddr *)&sin6, netid); /* * User space didn't support rpcbind version 4, so we won't * use a PF_INET6 listener. */ if (error == -EPROTONOSUPPORT) error = -EAFNOSUPPORT; return error; } #endif /* IS_ENABLED(CONFIG_IPV6) */ /* * Register a kernel RPC service via rpcbind version 4. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_register(struct net *net, const char *progname, const u32 program, const u32 version, const int family, const unsigned short protocol, const unsigned short port) { int error = -EAFNOSUPPORT; switch (family) { case PF_INET: error = __svc_rpcb_register4(net, program, version, protocol, port); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: error = __svc_rpcb_register6(net, program, version, protocol, port); #endif } trace_svc_register(progname, version, family, protocol, port, error); return error; } static int svc_rpcbind_set_version(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port) { return __svc_register(net, progp->pg_name, progp->pg_prog, version, family, proto, port); } int svc_generic_rpcbind_set(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port) { const struct svc_version *vers = progp->pg_vers[version]; int error; if (vers == NULL) return 0; if (vers->vs_hidden) { trace_svc_noregister(progp->pg_name, version, proto, port, family, 0); return 0; } /* * Don't register a UDP port if we need congestion * control. */ if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) return 0; error = svc_rpcbind_set_version(net, progp, version, family, proto, port); return (vers->vs_rpcb_optnl) ? 0 : error; } EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set); /** * svc_register - register an RPC service with the local portmapper * @serv: svc_serv struct for the service to register * @net: net namespace for the service to register * @family: protocol family of service's listener socket * @proto: transport protocol number to advertise * @port: port to advertise * * Service is registered for any address in the passed-in protocol family */ int svc_register(const struct svc_serv *serv, struct net *net, const int family, const unsigned short proto, const unsigned short port) { unsigned int p, i; int error = 0; WARN_ON_ONCE(proto == 0 && port == 0); if (proto == 0 && port == 0) return -EINVAL; for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { error = progp->pg_rpcbind_set(net, progp, i, family, proto, port); if (error < 0) { printk(KERN_WARNING "svc: failed to register " "%sv%u RPC service (errno %d).\n", progp->pg_name, i, -error); break; } } } return error; } /* * If user space is running rpcbind, it should take the v4 UNSET * and clear everything for this [program, version]. If user space * is running portmap, it will reject the v4 UNSET, but won't have * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient * in this case to clear all existing entries for [program, version]. */ static void __svc_unregister(struct net *net, const u32 program, const u32 version, const char *progname) { int error; error = rpcb_v4_register(net, program, version, NULL, ""); /* * User space didn't support rpcbind v4, so retry this * request with the legacy rpcbind v2 protocol. */ if (error == -EPROTONOSUPPORT) error = rpcb_register(net, program, version, 0, 0); trace_svc_unregister(progname, version, error); } /* * All netids, bind addresses and ports registered for [program, version] * are removed from the local rpcbind database (if the service is not * hidden) to make way for a new instance of the service. * * The result of unregistration is reported via dprintk for those who want * verification of the result, but is otherwise not important. */ static void svc_unregister(const struct svc_serv *serv, struct net *net) { struct sighand_struct *sighand; unsigned long flags; unsigned int p, i; clear_thread_flag(TIF_SIGPENDING); for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; if (progp->pg_vers[i]->vs_hidden) continue; __svc_unregister(net, progp->pg_prog, i, progp->pg_name); } } rcu_read_lock(); sighand = rcu_dereference(current->sighand); spin_lock_irqsave(&sighand->siglock, flags); recalc_sigpending(); spin_unlock_irqrestore(&sighand->siglock, flags); rcu_read_unlock(); } /* * dprintk the given error with the address of the client that caused it. */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static __printf(2, 3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) { struct va_format vaf; va_list args; char buf[RPC_MAX_ADDRBUFLEN]; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); va_end(args); } #else static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} #endif __be32 svc_generic_init_request(struct svc_rqst *rqstp, const struct svc_program *progp, struct svc_process_info *ret) { const struct svc_version *versp = NULL; /* compiler food */ const struct svc_procedure *procp = NULL; if (rqstp->rq_vers >= progp->pg_nvers ) goto err_bad_vers; versp = progp->pg_vers[rqstp->rq_vers]; if (!versp) goto err_bad_vers; /* * Some protocol versions (namely NFSv4) require some form of * congestion control. (See RFC 7530 section 3.1 paragraph 2) * In other words, UDP is not allowed. We mark those when setting * up the svc_xprt, and verify that here. * * The spec is not very clear about what error should be returned * when someone tries to access a server that is listening on UDP * for lower versions. RPC_PROG_MISMATCH seems to be the closest * fit. */ if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) goto err_bad_vers; if (rqstp->rq_proc >= versp->vs_nproc) goto err_bad_proc; rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; /* Initialize storage for argp and resp */ memset(rqstp->rq_argp, 0, procp->pc_argzero); memset(rqstp->rq_resp, 0, procp->pc_ressize); /* Bump per-procedure stats counter */ this_cpu_inc(versp->vs_count[rqstp->rq_proc]); ret->dispatch = versp->vs_dispatch; return rpc_success; err_bad_vers: ret->mismatch.lovers = progp->pg_lovers; ret->mismatch.hivers = progp->pg_hivers; return rpc_prog_mismatch; err_bad_proc: return rpc_proc_unavail; } EXPORT_SYMBOL_GPL(svc_generic_init_request); /* * Common routine for processing the RPC request. */ static int svc_process_common(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_res_stream; struct svc_program *progp = NULL; const struct svc_procedure *procp = NULL; struct svc_serv *serv = rqstp->rq_server; struct svc_process_info process; enum svc_auth_status auth_res; unsigned int aoffset; int pr, rc; __be32 *p; /* Will be turned off only when NFSv4 Sessions are used */ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); clear_bit(RQ_DROPME, &rqstp->rq_flags); /* Construct the first words of the reply: */ svcxdr_init_encode(rqstp); xdr_stream_encode_be32(xdr, rqstp->rq_xid); xdr_stream_encode_be32(xdr, rpc_reply); p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); if (unlikely(!p)) goto err_short_len; if (*p++ != cpu_to_be32(RPC_VERSION)) goto err_bad_rpc; xdr_stream_encode_be32(xdr, rpc_msg_accepted); rqstp->rq_prog = be32_to_cpup(p++); rqstp->rq_vers = be32_to_cpup(p++); rqstp->rq_proc = be32_to_cpup(p); for (pr = 0; pr < serv->sv_nprogs; pr++) if (rqstp->rq_prog == serv->sv_programs[pr].pg_prog) progp = &serv->sv_programs[pr]; /* * Decode auth data, and add verifier to reply buffer. * We do this before anything else in order to get a decent * auth verifier. */ auth_res = svc_authenticate(rqstp); /* Also give the program a chance to reject this call: */ if (auth_res == SVC_OK && progp) auth_res = progp->pg_authenticate(rqstp); trace_svc_authenticate(rqstp, auth_res); switch (auth_res) { case SVC_OK: break; case SVC_GARBAGE: goto err_garbage_args; case SVC_SYSERR: goto err_system_err; case SVC_DENIED: goto err_bad_auth; case SVC_CLOSE: goto close; case SVC_DROP: goto dropit; case SVC_COMPLETE: goto sendit; default: pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); goto err_system_err; } if (progp == NULL) goto err_bad_prog; switch (progp->pg_init_request(rqstp, progp, &process)) { case rpc_success: break; case rpc_prog_unavail: goto err_bad_prog; case rpc_prog_mismatch: goto err_bad_vers; case rpc_proc_unavail: goto err_bad_proc; } procp = rqstp->rq_procinfo; /* Should this check go into the dispatcher? */ if (!procp || !procp->pc_func) goto err_bad_proc; /* Syntactic check complete */ if (serv->sv_stats) serv->sv_stats->rpccnt++; trace_svc_process(rqstp, progp->pg_name); aoffset = xdr_stream_pos(xdr); /* un-reserve some of the out-queue now that we have a * better idea of reply size */ if (procp->pc_xdrressize) svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); /* Call the function that processes the request. */ rc = process.dispatch(rqstp); if (procp->pc_release) procp->pc_release(rqstp); xdr_finish_decode(xdr); if (!rc) goto dropit; if (rqstp->rq_auth_stat != rpc_auth_ok) goto err_bad_auth; if (*rqstp->rq_accept_statp != rpc_success) xdr_truncate_encode(xdr, aoffset); if (procp->pc_encode == NULL) goto dropit; sendit: if (svc_authorise(rqstp)) goto close_xprt; return 1; /* Caller can now send it */ dropit: svc_authorise(rqstp); /* doesn't hurt to call this twice */ dprintk("svc: svc_process dropit\n"); return 0; close: svc_authorise(rqstp); close_xprt: if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) svc_xprt_close(rqstp->rq_xprt); dprintk("svc: svc_process close\n"); return 0; err_short_len: svc_printk(rqstp, "short len %u, dropping request\n", rqstp->rq_arg.len); goto close_xprt; err_bad_rpc: if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); xdr_stream_encode_u32(xdr, RPC_MISMATCH); /* Only RPCv2 supported */ xdr_stream_encode_u32(xdr, RPC_VERSION); xdr_stream_encode_u32(xdr, RPC_VERSION); return 1; /* don't wrap */ err_bad_auth: dprintk("svc: authentication failed (%d)\n", be32_to_cpu(rqstp->rq_auth_stat)); if (serv->sv_stats) serv->sv_stats->rpcbadauth++; /* Restore write pointer to location of reply status: */ xdr_truncate_encode(xdr, XDR_UNIT * 2); xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR); xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); goto sendit; err_bad_prog: dprintk("svc: unknown program %d\n", rqstp->rq_prog); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_prog_unavail; goto sendit; err_bad_vers: svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_prog_mismatch; /* * svc_authenticate() has already added the verifier and * advanced the stream just past rq_accept_statp. */ xdr_stream_encode_u32(xdr, process.mismatch.lovers); xdr_stream_encode_u32(xdr, process.mismatch.hivers); goto sendit; err_bad_proc: svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_proc_unavail; goto sendit; err_garbage_args: svc_printk(rqstp, "failed to decode RPC header\n"); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_garbage_args; goto sendit; err_system_err: if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_system_err; goto sendit; } /* * Drop request */ static void svc_drop(struct svc_rqst *rqstp) { trace_svc_drop(rqstp); } /** * svc_process - Execute one RPC transaction * @rqstp: RPC transaction context * */ void svc_process(struct svc_rqst *rqstp) { struct kvec *resv = &rqstp->rq_res.head[0]; __be32 *p; #if IS_ENABLED(CONFIG_FAIL_SUNRPC) if (!fail_sunrpc.ignore_server_disconnect && should_fail(&fail_sunrpc.attr, 1)) svc_xprt_deferred_close(rqstp->rq_xprt); #endif /* * Setup response xdr_buf. * Initially it has just one page */ rqstp->rq_next_page = &rqstp->rq_respages[1]; resv->iov_base = page_address(rqstp->rq_respages[0]); resv->iov_len = 0; rqstp->rq_res.pages = rqstp->rq_next_page; rqstp->rq_res.len = 0; rqstp->rq_res.page_base = 0; rqstp->rq_res.page_len = 0; rqstp->rq_res.buflen = PAGE_SIZE; rqstp->rq_res.tail[0].iov_base = NULL; rqstp->rq_res.tail[0].iov_len = 0; svcxdr_init_decode(rqstp); p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); if (unlikely(!p)) goto out_drop; rqstp->rq_xid = *p++; if (unlikely(*p != rpc_call)) goto out_baddir; if (!svc_process_common(rqstp)) goto out_drop; svc_send(rqstp); return; out_baddir: svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", be32_to_cpu(*p)); if (rqstp->rq_server->sv_stats) rqstp->rq_server->sv_stats->rpcbadfmt++; out_drop: svc_drop(rqstp); } #if defined(CONFIG_SUNRPC_BACKCHANNEL) /** * svc_process_bc - process a reverse-direction RPC request * @req: RPC request to be used for client-side processing * @rqstp: server-side execution context * */ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp) { struct rpc_timeout timeout = { .to_increment = 0, }; struct rpc_task *task; int proc_error; /* Build the svc_rqst used by the common processing routine */ rqstp->rq_xid = req->rq_xid; rqstp->rq_prot = req->rq_xprt->prot; rqstp->rq_bc_net = req->rq_xprt->xprt_net; rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); /* Adjust the argument buffer length */ rqstp->rq_arg.len = req->rq_private_buf.len; if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; rqstp->rq_arg.page_len = 0; } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len) rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; else rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len; /* Reset the response buffer */ rqstp->rq_res.head[0].iov_len = 0; /* * Skip the XID and calldir fields because they've already * been processed by the caller. */ svcxdr_init_decode(rqstp); if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) return; /* Parse and execute the bc call */ proc_error = svc_process_common(rqstp); atomic_dec(&req->rq_xprt->bc_slot_count); if (!proc_error) { /* Processing error: drop the request */ xprt_free_bc_request(req); return; } /* Finally, send the reply synchronously */ if (rqstp->bc_to_initval > 0) { timeout.to_initval = rqstp->bc_to_initval; timeout.to_retries = rqstp->bc_to_retries; } else { timeout.to_initval = req->rq_xprt->timeout->to_initval; timeout.to_retries = req->rq_xprt->timeout->to_retries; } timeout.to_maxval = timeout.to_initval; memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); task = rpc_run_bc_task(req, &timeout); if (IS_ERR(task)) return; WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); rpc_put_task(task); } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /** * svc_max_payload - Return transport-specific limit on the RPC payload * @rqstp: RPC transaction context * * Returns the maximum number of payload bytes the current transport * allows. */ u32 svc_max_payload(const struct svc_rqst *rqstp) { u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; if (rqstp->rq_server->sv_max_payload < max) max = rqstp->rq_server->sv_max_payload; return max; } EXPORT_SYMBOL_GPL(svc_max_payload); /** * svc_proc_name - Return RPC procedure name in string form * @rqstp: svc_rqst to operate on * * Return value: * Pointer to a NUL-terminated string */ const char *svc_proc_name(const struct svc_rqst *rqstp) { if (rqstp && rqstp->rq_procinfo) return rqstp->rq_procinfo->pc_name; return "unknown"; } /** * svc_encode_result_payload - mark a range of bytes as a result payload * @rqstp: svc_rqst to operate on * @offset: payload's byte offset in rqstp->rq_res * @length: size of payload, in bytes * * Returns zero on success, or a negative errno if a permanent * error occurred. */ int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length) { return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, length); } EXPORT_SYMBOL_GPL(svc_encode_result_payload); /** * svc_fill_write_vector - Construct data argument for VFS write call * @rqstp: svc_rqst to operate on * @payload: xdr_buf containing only the write data payload * * Fills in rqstp::rq_vec, and returns the number of elements. */ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct xdr_buf *payload) { struct page **pages = payload->pages; struct kvec *first = payload->head; struct kvec *vec = rqstp->rq_vec; size_t total = payload->len; unsigned int i; /* Some types of transport can present the write payload * entirely in rq_arg.pages. In this case, @first is empty. */ i = 0; if (first->iov_len) { vec[i].iov_base = first->iov_base; vec[i].iov_len = min_t(size_t, total, first->iov_len); total -= vec[i].iov_len; ++i; } while (total) { vec[i].iov_base = page_address(*pages); vec[i].iov_len = min_t(size_t, total, PAGE_SIZE); total -= vec[i].iov_len; ++i; ++pages; } WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec)); return i; } EXPORT_SYMBOL_GPL(svc_fill_write_vector); /** * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call * @rqstp: svc_rqst to operate on * @first: buffer containing first section of pathname * @p: buffer containing remaining section of pathname * @total: total length of the pathname argument * * The VFS symlink API demands a NUL-terminated pathname in mapped memory. * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free * the returned string. */ char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total) { size_t len, remaining; char *result, *dst; result = kmalloc(total + 1, GFP_KERNEL); if (!result) return ERR_PTR(-ESERVERFAULT); dst = result; remaining = total; len = min_t(size_t, total, first->iov_len); if (len) { memcpy(dst, first->iov_base, len); dst += len; remaining -= len; } if (remaining) { len = min_t(size_t, remaining, PAGE_SIZE); memcpy(dst, p, len); dst += len; } *dst = '\0'; /* Sanity check: Linux doesn't allow the pathname argument to * contain a NUL byte. */ if (strlen(result) != total) { kfree(result); return ERR_PTR(-EINVAL); } return result; } EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);
11 2 3 1 26 27 27 27 27 3 3 3 8 8 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 3 2 2 2 2 2 3 2 1 2 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 /* * net/tipc/monitor.c * * Copyright (c) 2016, Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <net/genetlink.h> #include "core.h" #include "addr.h" #include "monitor.h" #include "bearer.h" #define MAX_MON_DOMAIN 64 #define MON_TIMEOUT 120000 #define MAX_PEER_DOWN_EVENTS 4 /* struct tipc_mon_domain: domain record to be transferred between peers * @len: actual size of domain record * @gen: current generation of sender's domain * @ack_gen: most recent generation of self's domain acked by peer * @member_cnt: number of domain member nodes described in this record * @up_map: bit map indicating which of the members the sender considers up * @members: identity of the domain members */ struct tipc_mon_domain { u16 len; u16 gen; u16 ack_gen; u16 member_cnt; u64 up_map; u32 members[MAX_MON_DOMAIN]; }; /* struct tipc_peer: state of a peer node and its domain * @addr: tipc node identity of peer * @head_map: shows which other nodes currently consider peer 'up' * @domain: most recent domain record from peer * @hash: position in hashed lookup list * @list: position in linked list, in circular ascending order by 'addr' * @applied: number of reported domain members applied on this monitor list * @is_up: peer is up as seen from this node * @is_head: peer is assigned domain head as seen from this node * @is_local: peer is in local domain and should be continuously monitored * @down_cnt: - numbers of other peers which have reported this on lost */ struct tipc_peer { u32 addr; struct tipc_mon_domain *domain; struct hlist_node hash; struct list_head list; u8 applied; u8 down_cnt; bool is_up; bool is_head; bool is_local; }; struct tipc_monitor { struct hlist_head peers[NODE_HTABLE_SIZE]; int peer_cnt; struct tipc_peer *self; rwlock_t lock; struct tipc_mon_domain cache; u16 list_gen; u16 dom_gen; struct net *net; struct timer_list timer; unsigned long timer_intv; }; static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id) { return tipc_net(net)->monitors[bearer_id]; } const int tipc_max_domain_size = sizeof(struct tipc_mon_domain); static inline u16 mon_cpu_to_le16(u16 val) { return (__force __u16)htons(val); } static inline u32 mon_cpu_to_le32(u32 val) { return (__force __u32)htonl(val); } static inline u64 mon_cpu_to_le64(u64 val) { return (__force __u64)cpu_to_be64(val); } static inline u16 mon_le16_to_cpu(u16 val) { return ntohs((__force __be16)val); } static inline u32 mon_le32_to_cpu(u32 val) { return ntohl((__force __be32)val); } static inline u64 mon_le64_to_cpu(u64 val) { return be64_to_cpu((__force __be64)val); } /* dom_rec_len(): actual length of domain record for transport */ static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt) { return (offsetof(struct tipc_mon_domain, members)) + (mcnt * sizeof(u32)); } /* dom_size() : calculate size of own domain based on number of peers */ static int dom_size(int peers) { int i = 0; while ((i * i) < peers) i++; return min(i, MAX_MON_DOMAIN); } static void map_set(u64 *up_map, int i, unsigned int v) { *up_map &= ~(1ULL << i); *up_map |= ((u64)v << i); } static int map_get(u64 up_map, int i) { return (up_map & (1ULL << i)) >> i; } static struct tipc_peer *peer_prev(struct tipc_peer *peer) { return list_last_entry(&peer->list, struct tipc_peer, list); } static struct tipc_peer *peer_nxt(struct tipc_peer *peer) { return list_first_entry(&peer->list, struct tipc_peer, list); } static struct tipc_peer *peer_head(struct tipc_peer *peer) { while (!peer->is_head) peer = peer_prev(peer); return peer; } static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr) { struct tipc_peer *peer; unsigned int thash = tipc_hashfn(addr); hlist_for_each_entry(peer, &mon->peers[thash], hash) { if (peer->addr == addr) return peer; } return NULL; } static struct tipc_peer *get_self(struct net *net, int bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); return mon->self; } static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon) { struct tipc_net *tn = tipc_net(net); return mon->peer_cnt > tn->mon_threshold; } /* mon_identify_lost_members() : - identify amd mark potentially lost members */ static void mon_identify_lost_members(struct tipc_peer *peer, struct tipc_mon_domain *dom_bef, int applied_bef) { struct tipc_peer *member = peer; struct tipc_mon_domain *dom_aft = peer->domain; int applied_aft = peer->applied; int i; for (i = 0; i < applied_bef; i++) { member = peer_nxt(member); /* Do nothing if self or peer already see member as down */ if (!member->is_up || !map_get(dom_bef->up_map, i)) continue; /* Loss of local node must be detected by active probing */ if (member->is_local) continue; /* Start probing if member was removed from applied domain */ if (!applied_aft || (applied_aft < i)) { member->down_cnt = 1; continue; } /* Member loss is confirmed if it is still in applied domain */ if (!map_get(dom_aft->up_map, i)) member->down_cnt++; } } /* mon_apply_domain() : match a peer's domain record against monitor list */ static void mon_apply_domain(struct tipc_monitor *mon, struct tipc_peer *peer) { struct tipc_mon_domain *dom = peer->domain; struct tipc_peer *member; u32 addr; int i; if (!dom || !peer->is_up) return; /* Scan across domain members and match against monitor list */ peer->applied = 0; member = peer_nxt(peer); for (i = 0; i < dom->member_cnt; i++) { addr = dom->members[i]; if (addr != member->addr) return; peer->applied++; member = peer_nxt(member); } } /* mon_update_local_domain() : update after peer addition/removal/up/down */ static void mon_update_local_domain(struct tipc_monitor *mon) { struct tipc_peer *self = mon->self; struct tipc_mon_domain *cache = &mon->cache; struct tipc_mon_domain *dom = self->domain; struct tipc_peer *peer = self; u64 prev_up_map = dom->up_map; u16 member_cnt, i; bool diff; /* Update local domain size based on current size of cluster */ member_cnt = dom_size(mon->peer_cnt) - 1; self->applied = member_cnt; /* Update native and cached outgoing local domain records */ dom->len = dom_rec_len(dom, member_cnt); diff = dom->member_cnt != member_cnt; dom->member_cnt = member_cnt; for (i = 0; i < member_cnt; i++) { peer = peer_nxt(peer); diff |= dom->members[i] != peer->addr; dom->members[i] = peer->addr; map_set(&dom->up_map, i, peer->is_up); cache->members[i] = mon_cpu_to_le32(peer->addr); } diff |= dom->up_map != prev_up_map; if (!diff) return; dom->gen = ++mon->dom_gen; cache->len = mon_cpu_to_le16(dom->len); cache->gen = mon_cpu_to_le16(dom->gen); cache->member_cnt = mon_cpu_to_le16(member_cnt); cache->up_map = mon_cpu_to_le64(dom->up_map); mon_apply_domain(mon, self); } /* mon_update_neighbors() : update preceding neighbors of added/removed peer */ static void mon_update_neighbors(struct tipc_monitor *mon, struct tipc_peer *peer) { int dz, i; dz = dom_size(mon->peer_cnt); for (i = 0; i < dz; i++) { mon_apply_domain(mon, peer); peer = peer_prev(peer); } } /* mon_assign_roles() : reassign peer roles after a network change * The monitor list is consistent at this stage; i.e., each peer is monitoring * a set of domain members as matched between domain record and the monitor list */ static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head) { struct tipc_peer *peer = peer_nxt(head); struct tipc_peer *self = mon->self; int i = 0; for (; peer != self; peer = peer_nxt(peer)) { peer->is_local = false; /* Update domain member */ if (i++ < head->applied) { peer->is_head = false; if (head == self) peer->is_local = true; continue; } /* Assign next domain head */ if (!peer->is_up) continue; if (peer->is_head) break; head = peer; head->is_head = true; i = 0; } mon->list_gen++; } void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_peer *self; struct tipc_peer *peer, *prev, *head; if (!mon) return; self = get_self(net, bearer_id); write_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (!peer) goto exit; prev = peer_prev(peer); list_del(&peer->list); hlist_del(&peer->hash); kfree(peer->domain); kfree(peer); mon->peer_cnt--; head = peer_head(prev); if (head == self) mon_update_local_domain(mon); mon_update_neighbors(mon, prev); /* Revert to full-mesh monitoring if we reach threshold */ if (!tipc_mon_is_active(net, mon)) { list_for_each_entry(peer, &self->list, list) { kfree(peer->domain); peer->domain = NULL; peer->applied = 0; } } mon_assign_roles(mon, head); exit: write_unlock_bh(&mon->lock); } static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr, struct tipc_peer **peer) { struct tipc_peer *self = mon->self; struct tipc_peer *cur, *prev, *p; p = kzalloc(sizeof(*p), GFP_ATOMIC); *peer = p; if (!p) return false; p->addr = addr; /* Add new peer to lookup list */ INIT_LIST_HEAD(&p->list); hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]); /* Sort new peer into iterator list, in ascending circular order */ prev = self; list_for_each_entry(cur, &self->list, list) { if ((addr > prev->addr) && (addr < cur->addr)) break; if (((addr < cur->addr) || (addr > prev->addr)) && (prev->addr > cur->addr)) break; prev = cur; } list_add_tail(&p->list, &cur->list); mon->peer_cnt++; mon_update_neighbors(mon, p); return true; } void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_peer *self = get_self(net, bearer_id); struct tipc_peer *peer, *head; write_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (!peer && !tipc_mon_add_peer(mon, addr, &peer)) goto exit; peer->is_up = true; head = peer_head(peer); if (head == self) mon_update_local_domain(mon); mon_assign_roles(mon, head); exit: write_unlock_bh(&mon->lock); } void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_peer *self; struct tipc_peer *peer, *head; struct tipc_mon_domain *dom; int applied; if (!mon) return; self = get_self(net, bearer_id); write_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (!peer) { pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id); goto exit; } applied = peer->applied; peer->applied = 0; dom = peer->domain; peer->domain = NULL; if (peer->is_head) mon_identify_lost_members(peer, dom, applied); kfree(dom); peer->is_up = false; peer->is_head = false; peer->is_local = false; peer->down_cnt = 0; head = peer_head(peer); if (head == self) mon_update_local_domain(mon); mon_assign_roles(mon, head); exit: write_unlock_bh(&mon->lock); } /* tipc_mon_rcv - process monitor domain event message */ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, struct tipc_mon_state *state, int bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_mon_domain *arrv_dom = data; struct tipc_mon_domain dom_bef; struct tipc_mon_domain *dom; struct tipc_peer *peer; u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt); int new_dlen = dom_rec_len(arrv_dom, new_member_cnt); u16 new_gen = mon_le16_to_cpu(arrv_dom->gen); u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen); u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len); bool probing = state->probing; int i, applied_bef; state->probing = false; /* Sanity check received domain record */ if (new_member_cnt > MAX_MON_DOMAIN) return; if (dlen < dom_rec_len(arrv_dom, 0)) return; if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) return; if (dlen < new_dlen || arrv_dlen != new_dlen) return; /* Synch generation numbers with peer if link just came up */ if (!state->synched) { state->peer_gen = new_gen - 1; state->acked_gen = acked_gen; state->synched = true; } if (more(acked_gen, state->acked_gen)) state->acked_gen = acked_gen; /* Drop duplicate unless we are waiting for a probe response */ if (!more(new_gen, state->peer_gen) && !probing) return; write_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (!peer || !peer->is_up) goto exit; /* Peer is confirmed, stop any ongoing probing */ peer->down_cnt = 0; /* Task is done for duplicate record */ if (!more(new_gen, state->peer_gen)) goto exit; state->peer_gen = new_gen; /* Cache current domain record for later use */ dom_bef.member_cnt = 0; dom = peer->domain; if (dom) memcpy(&dom_bef, dom, dom->len); /* Transform and store received domain record */ if (!dom || (dom->len < new_dlen)) { kfree(dom); dom = kmalloc(new_dlen, GFP_ATOMIC); peer->domain = dom; if (!dom) goto exit; } dom->len = new_dlen; dom->gen = new_gen; dom->member_cnt = new_member_cnt; dom->up_map = mon_le64_to_cpu(arrv_dom->up_map); for (i = 0; i < new_member_cnt; i++) dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]); /* Update peers affected by this domain record */ applied_bef = peer->applied; mon_apply_domain(mon, peer); mon_identify_lost_members(peer, &dom_bef, applied_bef); mon_assign_roles(mon, peer_head(peer)); exit: write_unlock_bh(&mon->lock); } void tipc_mon_prep(struct net *net, void *data, int *dlen, struct tipc_mon_state *state, int bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_mon_domain *dom = data; u16 gen = mon->dom_gen; u16 len; /* Send invalid record if not active */ if (!tipc_mon_is_active(net, mon)) { dom->len = 0; return; } /* Send only a dummy record with ack if peer has acked our last sent */ if (likely(state->acked_gen == gen)) { len = dom_rec_len(dom, 0); *dlen = len; dom->len = mon_cpu_to_le16(len); dom->gen = mon_cpu_to_le16(gen); dom->ack_gen = mon_cpu_to_le16(state->peer_gen); dom->member_cnt = 0; return; } /* Send the full record */ read_lock_bh(&mon->lock); len = mon_le16_to_cpu(mon->cache.len); *dlen = len; memcpy(data, &mon->cache, len); read_unlock_bh(&mon->lock); dom->ack_gen = mon_cpu_to_le16(state->peer_gen); } void tipc_mon_get_state(struct net *net, u32 addr, struct tipc_mon_state *state, int bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_peer *peer; if (!tipc_mon_is_active(net, mon)) { state->probing = false; state->monitoring = true; return; } /* Used cached state if table has not changed */ if (!state->probing && (state->list_gen == mon->list_gen) && (state->acked_gen == mon->dom_gen)) return; read_lock_bh(&mon->lock); peer = get_peer(mon, addr); if (peer) { state->probing = state->acked_gen != mon->dom_gen; state->probing |= peer->down_cnt; state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS; state->monitoring = peer->is_local; state->monitoring |= peer->is_head; state->list_gen = mon->list_gen; } read_unlock_bh(&mon->lock); } static void mon_timeout(struct timer_list *t) { struct tipc_monitor *mon = from_timer(mon, t, timer); struct tipc_peer *self; int best_member_cnt = dom_size(mon->peer_cnt) - 1; write_lock_bh(&mon->lock); self = mon->self; if (self && (best_member_cnt != self->applied)) { mon_update_local_domain(mon); mon_assign_roles(mon, self); } write_unlock_bh(&mon->lock); mod_timer(&mon->timer, jiffies + mon->timer_intv); } int tipc_mon_create(struct net *net, int bearer_id) { struct tipc_net *tn = tipc_net(net); struct tipc_monitor *mon; struct tipc_peer *self; struct tipc_mon_domain *dom; if (tn->monitors[bearer_id]) return 0; mon = kzalloc(sizeof(*mon), GFP_ATOMIC); self = kzalloc(sizeof(*self), GFP_ATOMIC); dom = kzalloc(sizeof(*dom), GFP_ATOMIC); if (!mon || !self || !dom) { kfree(mon); kfree(self); kfree(dom); return -ENOMEM; } tn->monitors[bearer_id] = mon; rwlock_init(&mon->lock); mon->net = net; mon->peer_cnt = 1; mon->self = self; self->domain = dom; self->addr = tipc_own_addr(net); self->is_up = true; self->is_head = true; INIT_LIST_HEAD(&self->list); timer_setup(&mon->timer, mon_timeout, 0); mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff)); mod_timer(&mon->timer, jiffies + mon->timer_intv); return 0; } void tipc_mon_delete(struct net *net, int bearer_id) { struct tipc_net *tn = tipc_net(net); struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_peer *self; struct tipc_peer *peer, *tmp; if (!mon) return; self = get_self(net, bearer_id); write_lock_bh(&mon->lock); tn->monitors[bearer_id] = NULL; list_for_each_entry_safe(peer, tmp, &self->list, list) { list_del(&peer->list); hlist_del(&peer->hash); kfree(peer->domain); kfree(peer); } mon->self = NULL; write_unlock_bh(&mon->lock); timer_shutdown_sync(&mon->timer); kfree(self->domain); kfree(self); kfree(mon); } void tipc_mon_reinit_self(struct net *net) { struct tipc_monitor *mon; int bearer_id; for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { mon = tipc_monitor(net, bearer_id); if (!mon) continue; write_lock_bh(&mon->lock); mon->self->addr = tipc_own_addr(net); write_unlock_bh(&mon->lock); } } int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size) { struct tipc_net *tn = tipc_net(net); if (cluster_size > TIPC_CLUSTER_SIZE) return -EINVAL; tn->mon_threshold = cluster_size; return 0; } int tipc_nl_monitor_get_threshold(struct net *net) { struct tipc_net *tn = tipc_net(net); return tn->mon_threshold; } static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, struct tipc_nl_msg *msg) { struct tipc_mon_domain *dom = peer->domain; struct nlattr *attrs; void *hdr; hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_MON_PEER_GET); if (!hdr) return -EMSGSIZE; attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER); if (!attrs) goto msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied)) goto attr_msg_full; if (peer->is_up) if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP)) goto attr_msg_full; if (peer->is_local) if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL)) goto attr_msg_full; if (peer->is_head) if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD)) goto attr_msg_full; if (dom) { if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen)) goto attr_msg_full; if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP, dom->up_map, TIPC_NLA_MON_PEER_PAD)) goto attr_msg_full; if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS, dom->member_cnt * sizeof(u32), &dom->members)) goto attr_msg_full; } nla_nest_end(msg->skb, attrs); genlmsg_end(msg->skb, hdr); return 0; attr_msg_full: nla_nest_cancel(msg->skb, attrs); msg_full: genlmsg_cancel(msg->skb, hdr); return -EMSGSIZE; } int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg, u32 bearer_id, u32 *prev_node) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); struct tipc_peer *peer; if (!mon) return -EINVAL; read_lock_bh(&mon->lock); peer = mon->self; do { if (*prev_node) { if (peer->addr == *prev_node) *prev_node = 0; else continue; } if (__tipc_nl_add_monitor_peer(peer, msg)) { *prev_node = peer->addr; read_unlock_bh(&mon->lock); return -EMSGSIZE; } } while ((peer = peer_nxt(peer)) != mon->self); read_unlock_bh(&mon->lock); return 0; } int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg, u32 bearer_id) { struct tipc_monitor *mon = tipc_monitor(net, bearer_id); char bearer_name[TIPC_MAX_BEARER_NAME]; struct nlattr *attrs; void *hdr; int ret; ret = tipc_bearer_get_name(net, bearer_name, bearer_id); if (ret || !mon) return 0; hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_MON_GET); if (!hdr) return -EMSGSIZE; attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON); if (!attrs) goto msg_full; read_lock_bh(&mon->lock); if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id)) goto attr_msg_full; if (tipc_mon_is_active(net, mon)) if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE)) goto attr_msg_full; if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen)) goto attr_msg_full; read_unlock_bh(&mon->lock); nla_nest_end(msg->skb, attrs); genlmsg_end(msg->skb, hdr); return 0; attr_msg_full: read_unlock_bh(&mon->lock); nla_nest_cancel(msg->skb, attrs); msg_full: genlmsg_cancel(msg->skb, hdr); return -EMSGSIZE; }
4 3 2 4 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Patrick McHardy <kaber@trash.net> */ #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_SYNPROXY.h> #include <net/netfilter/nf_synproxy.h> static unsigned int synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_synproxy_info *info = par->targinfo; struct net *net = xt_net(par); struct synproxy_net *snet = synproxy_pernet(net); struct synproxy_options opts = {}; struct tcphdr *th, _th; if (nf_ip_checksum(skb, xt_hooknum(par), par->thoff, IPPROTO_TCP)) return NF_DROP; th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th); if (th == NULL) return NF_DROP; if (!synproxy_parse_options(skb, par->thoff, th, &opts)) return NF_DROP; if (th->syn && !(th->ack || th->fin || th->rst)) { /* Initial SYN from client */ this_cpu_inc(snet->stats->syn_received); if (th->ece && th->cwr) opts.options |= XT_SYNPROXY_OPT_ECN; opts.options &= info->options; opts.mss_encode = opts.mss_option; opts.mss_option = info->mss; if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, &opts); else opts.options &= ~(XT_SYNPROXY_OPT_WSCALE | XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(net, skb, th, &opts); consume_skb(skb); return NF_STOLEN; } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) { consume_skb(skb); return NF_STOLEN; } else { return NF_DROP; } } return XT_CONTINUE; } static int synproxy_tg4_check(const struct xt_tgchk_param *par) { struct synproxy_net *snet = synproxy_pernet(par->net); const struct ipt_entry *e = par->entryinfo; int err; if (e->ip.proto != IPPROTO_TCP || e->ip.invflags & XT_INV_PROTO) return -EINVAL; err = nf_ct_netns_get(par->net, par->family); if (err) return err; err = nf_synproxy_ipv4_init(snet, par->net); if (err) { nf_ct_netns_put(par->net, par->family); return err; } return err; } static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) { struct synproxy_net *snet = synproxy_pernet(par->net); nf_synproxy_ipv4_fini(snet, par->net); nf_ct_netns_put(par->net, par->family); } static struct xt_target synproxy_tg4_reg __read_mostly = { .name = "SYNPROXY", .family = NFPROTO_IPV4, .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .target = synproxy_tg4, .targetsize = sizeof(struct xt_synproxy_info), .checkentry = synproxy_tg4_check, .destroy = synproxy_tg4_destroy, .me = THIS_MODULE, }; static int __init synproxy_tg4_init(void) { return xt_register_target(&synproxy_tg4_reg); } static void __exit synproxy_tg4_exit(void) { xt_unregister_target(&synproxy_tg4_reg); } module_init(synproxy_tg4_init); module_exit(synproxy_tg4_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies");
10 10 28 28 28 149 5 150 146 147 146 32 112 81 50 131 140 68 148 113 1 113 4 28 85 82 113 3 2 110 112 111 113 174 114 64 112 111 111 112 2 1 1 2 9 5 1 4 5 5 1 9 4 4 4 4 4 4 39 26 11 29 4 4 4 4 4 4 4 4 18 17 18 16 15 8 8 7 6 6 6 6 6 8 4 3 3 2 2 2 2 2 4 75 1 9 16 17 10 17 1 16 3 11 8 5 4 11 3 3 2 3 6 6 6 4 3 1 2 1 1 3 45 44 45 18 18 18 18 5 4 3 3 1 4 5 16 16 15 9 10 5 5 10 4 16 17 15 5 12 17 4 16 7 8 6 6 1 3 5 5 5 3 10 10 16 2 2 16 16 16 16 10 2 4 3 10 9 9 7 7 7 7 7 6 3 21 21 1 20 21 7 7 7 1 1 2 2 2 2 2 1 5 4 5 2 1 3 3 7 5 1 4 5 2 3 3 3 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016,2017 Facebook */ #include <linux/bpf.h> #include <linux/btf.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/filter.h> #include <linux/perf_event.h> #include <uapi/linux/btf.h> #include <linux/rcupdate_trace.h> #include <linux/btf_ids.h> #include "map_in_map.h" #define ARRAY_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) static void bpf_array_free_percpu(struct bpf_array *array) { int i; for (i = 0; i < array->map.max_entries; i++) { free_percpu(array->pptrs[i]); cond_resched(); } } static int bpf_array_alloc_percpu(struct bpf_array *array) { void __percpu *ptr; int i; for (i = 0; i < array->map.max_entries; i++) { ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, GFP_USER | __GFP_NOWARN); if (!ptr) { bpf_array_free_percpu(array); return -ENOMEM; } array->pptrs[i] = ptr; cond_resched(); } return 0; } /* Called from syscall */ int array_map_alloc_check(union bpf_attr *attr) { bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; int numa_node = bpf_map_attr_numa_node(attr); /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || attr->value_size == 0 || attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || !bpf_map_flags_access_ok(attr->map_flags) || (percpu && numa_node != NUMA_NO_NODE)) return -EINVAL; if (attr->map_type != BPF_MAP_TYPE_ARRAY && attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) return -EINVAL; if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && attr->map_flags & BPF_F_PRESERVE_ELEMS) return -EINVAL; /* avoid overflow on round_up(map->value_size) */ if (attr->value_size > INT_MAX) return -E2BIG; /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) return -E2BIG; return 0; } static struct bpf_map *array_map_alloc(union bpf_attr *attr) { bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; int numa_node = bpf_map_attr_numa_node(attr); u32 elem_size, index_mask, max_entries; bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL); u64 array_size, mask64; struct bpf_array *array; elem_size = round_up(attr->value_size, 8); max_entries = attr->max_entries; /* On 32 bit archs roundup_pow_of_two() with max_entries that has * upper most bit set in u32 space is undefined behavior due to * resulting 1U << 32, so do it manually here in u64 space. */ mask64 = fls_long(max_entries - 1); mask64 = 1ULL << mask64; mask64 -= 1; index_mask = mask64; if (!bypass_spec_v1) { /* round up array size to nearest power of 2, * since cpu will speculate within index_mask limits */ max_entries = index_mask + 1; /* Check for overflows. */ if (max_entries < attr->max_entries) return ERR_PTR(-E2BIG); } array_size = sizeof(*array); if (percpu) { array_size += (u64) max_entries * sizeof(void *); } else { /* rely on vmalloc() to return page-aligned memory and * ensure array->value is exactly page-aligned */ if (attr->map_flags & BPF_F_MMAPABLE) { array_size = PAGE_ALIGN(array_size); array_size += PAGE_ALIGN((u64) max_entries * elem_size); } else { array_size += (u64) max_entries * elem_size; } } /* allocate all map elements and zero-initialize them */ if (attr->map_flags & BPF_F_MMAPABLE) { void *data; /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ data = bpf_map_area_mmapable_alloc(array_size, numa_node); if (!data) return ERR_PTR(-ENOMEM); array = data + PAGE_ALIGN(sizeof(struct bpf_array)) - offsetof(struct bpf_array, value); } else { array = bpf_map_area_alloc(array_size, numa_node); } if (!array) return ERR_PTR(-ENOMEM); array->index_mask = index_mask; array->map.bypass_spec_v1 = bypass_spec_v1; /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); array->elem_size = elem_size; if (percpu && bpf_array_alloc_percpu(array)) { bpf_map_area_free(array); return ERR_PTR(-ENOMEM); } return &array->map; } static void *array_map_elem_ptr(struct bpf_array* array, u32 index) { return array->value + (u64)array->elem_size * index; } /* Called from syscall or from eBPF program */ static void *array_map_lookup_elem(struct bpf_map *map, void *key) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = *(u32 *)key; if (unlikely(index >= array->map.max_entries)) return NULL; return array->value + (u64)array->elem_size * (index & array->index_mask); } static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off) { struct bpf_array *array = container_of(map, struct bpf_array, map); if (map->max_entries != 1) return -ENOTSUPP; if (off >= map->value_size) return -EINVAL; *imm = (unsigned long)array->value; return 0; } static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, u32 *off) { struct bpf_array *array = container_of(map, struct bpf_array, map); u64 base = (unsigned long)array->value; u64 range = array->elem_size; if (map->max_entries != 1) return -ENOTSUPP; if (imm < base || imm >= base + range) return -ENOENT; *off = imm - base; return 0; } /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_insn *insn = insn_buf; u32 elem_size = array->elem_size; const int ret = BPF_REG_0; const int map_ptr = BPF_REG_1; const int index = BPF_REG_2; if (map->map_flags & BPF_F_INNER_MAP) return -EOPNOTSUPP; *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); if (!map->bypass_spec_v1) { *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); } else { *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); } if (is_power_of_2(elem_size)) { *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); } else { *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); } *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); *insn++ = BPF_MOV64_IMM(ret, 0); return insn - insn_buf; } /* Called from eBPF program */ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = *(u32 *)key; if (unlikely(index >= array->map.max_entries)) return NULL; return this_cpu_ptr(array->pptrs[index & array->index_mask]); } /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */ static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_insn *insn = insn_buf; if (!bpf_jit_supports_percpu_insn()) return -EOPNOTSUPP; if (map->map_flags & BPF_F_INNER_MAP) return -EOPNOTSUPP; BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0); *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs)); *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0); if (!map->bypass_spec_v1) { *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6); *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask); } else { *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5); } *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0); return insn - insn_buf; } static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = *(u32 *)key; if (cpu >= nr_cpu_ids) return NULL; if (unlikely(index >= array->map.max_entries)) return NULL; return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); } int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = *(u32 *)key; void __percpu *pptr; int cpu, off = 0; u32 size; if (unlikely(index >= array->map.max_entries)) return -ENOENT; /* per_cpu areas are zero-filled and bpf programs can only * access 'value_size' of them, so copying rounded areas * will not leak any kernel data */ size = array->elem_size; rcu_read_lock(); pptr = array->pptrs[index & array->index_mask]; for_each_possible_cpu(cpu) { copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); check_and_init_map_value(map, value + off); off += size; } rcu_read_unlock(); return 0; } /* Called from syscall */ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = key ? *(u32 *)key : U32_MAX; u32 *next = (u32 *)next_key; if (index >= array->map.max_entries) { *next = 0; return 0; } if (index == array->map.max_entries - 1) return -ENOENT; *next = index + 1; return 0; } /* Called from syscall or from eBPF program */ static long array_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = *(u32 *)key; char *val; if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) /* unknown flags */ return -EINVAL; if (unlikely(index >= array->map.max_entries)) /* all elements were pre-allocated, cannot insert a new one */ return -E2BIG; if (unlikely(map_flags & BPF_NOEXIST)) /* all elements already exist */ return -EEXIST; if (unlikely((map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))) return -EINVAL; if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { val = this_cpu_ptr(array->pptrs[index & array->index_mask]); copy_map_value(map, val, value); bpf_obj_free_fields(array->map.record, val); } else { val = array->value + (u64)array->elem_size * (index & array->index_mask); if (map_flags & BPF_F_LOCK) copy_map_value_locked(map, val, value, false); else copy_map_value(map, val, value); bpf_obj_free_fields(array->map.record, val); } return 0; } int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = *(u32 *)key; void __percpu *pptr; int cpu, off = 0; u32 size; if (unlikely(map_flags > BPF_EXIST)) /* unknown flags */ return -EINVAL; if (unlikely(index >= array->map.max_entries)) /* all elements were pre-allocated, cannot insert a new one */ return -E2BIG; if (unlikely(map_flags == BPF_NOEXIST)) /* all elements already exist */ return -EEXIST; /* the user space will provide round_up(value_size, 8) bytes that * will be copied into per-cpu area. bpf programs can only access * value_size of it. During lookup the same extra bytes will be * returned or zeros which were zero-filled by percpu_alloc, * so no kernel data leaks possible */ size = array->elem_size; rcu_read_lock(); pptr = array->pptrs[index & array->index_mask]; for_each_possible_cpu(cpu) { copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); off += size; } rcu_read_unlock(); return 0; } /* Called from syscall or from eBPF program */ static long array_map_delete_elem(struct bpf_map *map, void *key) { return -EINVAL; } static void *array_map_vmalloc_addr(struct bpf_array *array) { return (void *)round_down((unsigned long)array, PAGE_SIZE); } static void array_map_free_timers_wq(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; /* We don't reset or free fields other than timer and workqueue * on uref dropping to zero. */ if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) { for (i = 0; i < array->map.max_entries; i++) { if (btf_record_has_field(map->record, BPF_TIMER)) bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); if (btf_record_has_field(map->record, BPF_WORKQUEUE)) bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); } } } /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ static void array_map_free(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; if (!IS_ERR_OR_NULL(map->record)) { if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { for (i = 0; i < array->map.max_entries; i++) { void __percpu *pptr = array->pptrs[i & array->index_mask]; int cpu; for_each_possible_cpu(cpu) { bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); cond_resched(); } } } else { for (i = 0; i < array->map.max_entries; i++) bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); } } if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) bpf_array_free_percpu(array); if (array->map.map_flags & BPF_F_MMAPABLE) bpf_map_area_free(array_map_vmalloc_addr(array)); else bpf_map_area_free(array); } static void array_map_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { void *value; rcu_read_lock(); value = array_map_lookup_elem(map, key); if (!value) { rcu_read_unlock(); return; } if (map->btf_key_type_id) seq_printf(m, "%u: ", *(u32 *)key); btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); seq_putc(m, '\n'); rcu_read_unlock(); } static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 index = *(u32 *)key; void __percpu *pptr; int cpu; rcu_read_lock(); seq_printf(m, "%u: {\n", *(u32 *)key); pptr = array->pptrs[index & array->index_mask]; for_each_possible_cpu(cpu) { seq_printf(m, "\tcpu%d: ", cpu); btf_type_seq_show(map->btf, map->btf_value_type_id, per_cpu_ptr(pptr, cpu), m); seq_putc(m, '\n'); } seq_puts(m, "}\n"); rcu_read_unlock(); } static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf, const struct btf_type *key_type, const struct btf_type *value_type) { u32 int_data; /* One exception for keyless BTF: .bss/.data/.rodata map */ if (btf_type_is_void(key_type)) { if (map->map_type != BPF_MAP_TYPE_ARRAY || map->max_entries != 1) return -EINVAL; if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) return -EINVAL; return 0; } if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) return -EINVAL; int_data = *(u32 *)(key_type + 1); /* bpf array can only take a u32 key. This check makes sure * that the btf matches the attr used during map_create. */ if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) return -EINVAL; return 0; } static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) { struct bpf_array *array = container_of(map, struct bpf_array, map); pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; if (!(map->map_flags & BPF_F_MMAPABLE)) return -EINVAL; if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) return -EINVAL; return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), vma->vm_pgoff + pgoff); } static bool array_map_meta_equal(const struct bpf_map *meta0, const struct bpf_map *meta1) { if (!bpf_map_meta_equal(meta0, meta1)) return false; return meta0->map_flags & BPF_F_INNER_MAP ? true : meta0->max_entries == meta1->max_entries; } struct bpf_iter_seq_array_map_info { struct bpf_map *map; void *percpu_value_buf; u32 index; }; static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) { struct bpf_iter_seq_array_map_info *info = seq->private; struct bpf_map *map = info->map; struct bpf_array *array; u32 index; if (info->index >= map->max_entries) return NULL; if (*pos == 0) ++*pos; array = container_of(map, struct bpf_array, map); index = info->index & array->index_mask; if (info->percpu_value_buf) return (void *)(uintptr_t)array->pptrs[index]; return array_map_elem_ptr(array, index); } static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bpf_iter_seq_array_map_info *info = seq->private; struct bpf_map *map = info->map; struct bpf_array *array; u32 index; ++*pos; ++info->index; if (info->index >= map->max_entries) return NULL; array = container_of(map, struct bpf_array, map); index = info->index & array->index_mask; if (info->percpu_value_buf) return (void *)(uintptr_t)array->pptrs[index]; return array_map_elem_ptr(array, index); } static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) { struct bpf_iter_seq_array_map_info *info = seq->private; struct bpf_iter__bpf_map_elem ctx = {}; struct bpf_map *map = info->map; struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_iter_meta meta; struct bpf_prog *prog; int off = 0, cpu = 0; void __percpu *pptr; u32 size; meta.seq = seq; prog = bpf_iter_get_info(&meta, v == NULL); if (!prog) return 0; ctx.meta = &meta; ctx.map = info->map; if (v) { ctx.key = &info->index; if (!info->percpu_value_buf) { ctx.value = v; } else { pptr = (void __percpu *)(uintptr_t)v; size = array->elem_size; for_each_possible_cpu(cpu) { copy_map_value_long(map, info->percpu_value_buf + off, per_cpu_ptr(pptr, cpu)); check_and_init_map_value(map, info->percpu_value_buf + off); off += size; } ctx.value = info->percpu_value_buf; } } return bpf_iter_run_prog(prog, &ctx); } static int bpf_array_map_seq_show(struct seq_file *seq, void *v) { return __bpf_array_map_seq_show(seq, v); } static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) { if (!v) (void)__bpf_array_map_seq_show(seq, NULL); } static int bpf_iter_init_array_map(void *priv_data, struct bpf_iter_aux_info *aux) { struct bpf_iter_seq_array_map_info *seq_info = priv_data; struct bpf_map *map = aux->map; struct bpf_array *array = container_of(map, struct bpf_array, map); void *value_buf; u32 buf_size; if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { buf_size = array->elem_size * num_possible_cpus(); value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); if (!value_buf) return -ENOMEM; seq_info->percpu_value_buf = value_buf; } /* bpf_iter_attach_map() acquires a map uref, and the uref may be * released before or in the middle of iterating map elements, so * acquire an extra map uref for iterator. */ bpf_map_inc_with_uref(map); seq_info->map = map; return 0; } static void bpf_iter_fini_array_map(void *priv_data) { struct bpf_iter_seq_array_map_info *seq_info = priv_data; bpf_map_put_with_uref(seq_info->map); kfree(seq_info->percpu_value_buf); } static const struct seq_operations bpf_array_map_seq_ops = { .start = bpf_array_map_seq_start, .next = bpf_array_map_seq_next, .stop = bpf_array_map_seq_stop, .show = bpf_array_map_seq_show, }; static const struct bpf_iter_seq_info iter_seq_info = { .seq_ops = &bpf_array_map_seq_ops, .init_seq_private = bpf_iter_init_array_map, .fini_seq_private = bpf_iter_fini_array_map, .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), }; static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { u32 i, key, num_elems = 0; struct bpf_array *array; bool is_percpu; u64 ret = 0; void *val; cant_migrate(); if (flags != 0) return -EINVAL; is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; array = container_of(map, struct bpf_array, map); for (i = 0; i < map->max_entries; i++) { if (is_percpu) val = this_cpu_ptr(array->pptrs[i]); else val = array_map_elem_ptr(array, i); num_elems++; key = i; ret = callback_fn((u64)(long)map, (u64)(long)&key, (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) break; } return num_elems; } static u64 array_map_mem_usage(const struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; u32 elem_size = array->elem_size; u64 entries = map->max_entries; u64 usage = sizeof(*array); if (percpu) { usage += entries * sizeof(void *); usage += entries * elem_size * num_possible_cpus(); } else { if (map->map_flags & BPF_F_MMAPABLE) { usage = PAGE_ALIGN(usage); usage += PAGE_ALIGN(entries * elem_size); } else { usage += entries * elem_size; } } return usage; } BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) const struct bpf_map_ops array_map_ops = { .map_meta_equal = array_map_meta_equal, .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, .map_free = array_map_free, .map_get_next_key = array_map_get_next_key, .map_release_uref = array_map_free_timers_wq, .map_lookup_elem = array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, .map_gen_lookup = array_map_gen_lookup, .map_direct_value_addr = array_map_direct_value_addr, .map_direct_value_meta = array_map_direct_value_meta, .map_mmap = array_map_mmap, .map_seq_show_elem = array_map_seq_show_elem, .map_check_btf = array_map_check_btf, .map_lookup_batch = generic_map_lookup_batch, .map_update_batch = generic_map_update_batch, .map_set_for_each_callback_args = map_set_for_each_callback_args, .map_for_each_callback = bpf_for_each_array_elem, .map_mem_usage = array_map_mem_usage, .map_btf_id = &array_map_btf_ids[0], .iter_seq_info = &iter_seq_info, }; const struct bpf_map_ops percpu_array_map_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, .map_free = array_map_free, .map_get_next_key = array_map_get_next_key, .map_lookup_elem = percpu_array_map_lookup_elem, .map_gen_lookup = percpu_array_map_gen_lookup, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, .map_seq_show_elem = percpu_array_map_seq_show_elem, .map_check_btf = array_map_check_btf, .map_lookup_batch = generic_map_lookup_batch, .map_update_batch = generic_map_update_batch, .map_set_for_each_callback_args = map_set_for_each_callback_args, .map_for_each_callback = bpf_for_each_array_elem, .map_mem_usage = array_map_mem_usage, .map_btf_id = &array_map_btf_ids[0], .iter_seq_info = &iter_seq_info, }; static int fd_array_map_alloc_check(union bpf_attr *attr) { /* only file descriptors can be stored in this type of map */ if (attr->value_size != sizeof(u32)) return -EINVAL; /* Program read-only/write-only not supported for special maps yet. */ if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) return -EINVAL; return array_map_alloc_check(attr); } static void fd_array_map_free(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; /* make sure it's empty */ for (i = 0; i < array->map.max_entries; i++) BUG_ON(array->ptrs[i] != NULL); bpf_map_area_free(array); } static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) { return ERR_PTR(-EOPNOTSUPP); } /* only called from syscall */ int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) { void **elem, *ptr; int ret = 0; if (!map->ops->map_fd_sys_lookup_elem) return -ENOTSUPP; rcu_read_lock(); elem = array_map_lookup_elem(map, key); if (elem && (ptr = READ_ONCE(*elem))) *value = map->ops->map_fd_sys_lookup_elem(ptr); else ret = -ENOENT; rcu_read_unlock(); return ret; } /* only called from syscall */ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags) { struct bpf_array *array = container_of(map, struct bpf_array, map); void *new_ptr, *old_ptr; u32 index = *(u32 *)key, ufd; if (map_flags != BPF_ANY) return -EINVAL; if (index >= array->map.max_entries) return -E2BIG; ufd = *(u32 *)value; new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); if (IS_ERR(new_ptr)) return PTR_ERR(new_ptr); if (map->ops->map_poke_run) { mutex_lock(&array->aux->poke_mutex); old_ptr = xchg(array->ptrs + index, new_ptr); map->ops->map_poke_run(map, index, old_ptr, new_ptr); mutex_unlock(&array->aux->poke_mutex); } else { old_ptr = xchg(array->ptrs + index, new_ptr); } if (old_ptr) map->ops->map_fd_put_ptr(map, old_ptr, true); return 0; } static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) { struct bpf_array *array = container_of(map, struct bpf_array, map); void *old_ptr; u32 index = *(u32 *)key; if (index >= array->map.max_entries) return -E2BIG; if (map->ops->map_poke_run) { mutex_lock(&array->aux->poke_mutex); old_ptr = xchg(array->ptrs + index, NULL); map->ops->map_poke_run(map, index, old_ptr, NULL); mutex_unlock(&array->aux->poke_mutex); } else { old_ptr = xchg(array->ptrs + index, NULL); } if (old_ptr) { map->ops->map_fd_put_ptr(map, old_ptr, need_defer); return 0; } else { return -ENOENT; } } static long fd_array_map_delete_elem(struct bpf_map *map, void *key) { return __fd_array_map_delete_elem(map, key, true); } static void *prog_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { struct bpf_prog *prog = bpf_prog_get(fd); bool is_extended; if (IS_ERR(prog)) return prog; if (prog->type == BPF_PROG_TYPE_EXT || !bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return ERR_PTR(-EINVAL); } mutex_lock(&prog->aux->ext_mutex); is_extended = prog->aux->is_extended; if (!is_extended) prog->aux->prog_array_member_cnt++; mutex_unlock(&prog->aux->ext_mutex); if (is_extended) { /* Extended prog can not be tail callee. It's to prevent a * potential infinite loop like: * tail callee prog entry -> tail callee prog subprog -> * freplace prog entry --tailcall-> tail callee prog entry. */ bpf_prog_put(prog); return ERR_PTR(-EBUSY); } return prog; } static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { struct bpf_prog *prog = ptr; mutex_lock(&prog->aux->ext_mutex); prog->aux->prog_array_member_cnt--; mutex_unlock(&prog->aux->ext_mutex); /* bpf_prog is freed after one RCU or tasks trace grace period */ bpf_prog_put(prog); } static u32 prog_fd_array_sys_lookup_elem(void *ptr) { return ((struct bpf_prog *)ptr)->aux->id; } /* decrement refcnt of all bpf_progs that are stored in this map */ static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; for (i = 0; i < array->map.max_entries; i++) __fd_array_map_delete_elem(map, &i, need_defer); } static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { void **elem, *ptr; u32 prog_id; rcu_read_lock(); elem = array_map_lookup_elem(map, key); if (elem) { ptr = READ_ONCE(*elem); if (ptr) { seq_printf(m, "%u: ", *(u32 *)key); prog_id = prog_fd_array_sys_lookup_elem(ptr); btf_type_seq_show(map->btf, map->btf_value_type_id, &prog_id, m); seq_putc(m, '\n'); } } rcu_read_unlock(); } struct prog_poke_elem { struct list_head list; struct bpf_prog_aux *aux; }; static int prog_array_map_poke_track(struct bpf_map *map, struct bpf_prog_aux *prog_aux) { struct prog_poke_elem *elem; struct bpf_array_aux *aux; int ret = 0; aux = container_of(map, struct bpf_array, map)->aux; mutex_lock(&aux->poke_mutex); list_for_each_entry(elem, &aux->poke_progs, list) { if (elem->aux == prog_aux) goto out; } elem = kmalloc(sizeof(*elem), GFP_KERNEL); if (!elem) { ret = -ENOMEM; goto out; } INIT_LIST_HEAD(&elem->list); /* We must track the program's aux info at this point in time * since the program pointer itself may not be stable yet, see * also comment in prog_array_map_poke_run(). */ elem->aux = prog_aux; list_add_tail(&elem->list, &aux->poke_progs); out: mutex_unlock(&aux->poke_mutex); return ret; } static void prog_array_map_poke_untrack(struct bpf_map *map, struct bpf_prog_aux *prog_aux) { struct prog_poke_elem *elem, *tmp; struct bpf_array_aux *aux; aux = container_of(map, struct bpf_array, map)->aux; mutex_lock(&aux->poke_mutex); list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { if (elem->aux == prog_aux) { list_del_init(&elem->list); kfree(elem); break; } } mutex_unlock(&aux->poke_mutex); } void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, struct bpf_prog *new, struct bpf_prog *old) { WARN_ON_ONCE(1); } static void prog_array_map_poke_run(struct bpf_map *map, u32 key, struct bpf_prog *old, struct bpf_prog *new) { struct prog_poke_elem *elem; struct bpf_array_aux *aux; aux = container_of(map, struct bpf_array, map)->aux; WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); list_for_each_entry(elem, &aux->poke_progs, list) { struct bpf_jit_poke_descriptor *poke; int i; for (i = 0; i < elem->aux->size_poke_tab; i++) { poke = &elem->aux->poke_tab[i]; /* Few things to be aware of: * * 1) We can only ever access aux in this context, but * not aux->prog since it might not be stable yet and * there could be danger of use after free otherwise. * 2) Initially when we start tracking aux, the program * is not JITed yet and also does not have a kallsyms * entry. We skip these as poke->tailcall_target_stable * is not active yet. The JIT will do the final fixup * before setting it stable. The various * poke->tailcall_target_stable are successively * activated, so tail call updates can arrive from here * while JIT is still finishing its final fixup for * non-activated poke entries. * 3) Also programs reaching refcount of zero while patching * is in progress is okay since we're protected under * poke_mutex and untrack the programs before the JIT * buffer is freed. */ if (!READ_ONCE(poke->tailcall_target_stable)) continue; if (poke->reason != BPF_POKE_REASON_TAIL_CALL) continue; if (poke->tail_call.map != map || poke->tail_call.key != key) continue; bpf_arch_poke_desc_update(poke, new, old); } } } static void prog_array_map_clear_deferred(struct work_struct *work) { struct bpf_map *map = container_of(work, struct bpf_array_aux, work)->map; bpf_fd_array_map_clear(map, true); bpf_map_put(map); } static void prog_array_map_clear(struct bpf_map *map) { struct bpf_array_aux *aux = container_of(map, struct bpf_array, map)->aux; bpf_map_inc(map); schedule_work(&aux->work); } static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) { struct bpf_array_aux *aux; struct bpf_map *map; aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); if (!aux) return ERR_PTR(-ENOMEM); INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_LIST_HEAD(&aux->poke_progs); mutex_init(&aux->poke_mutex); map = array_map_alloc(attr); if (IS_ERR(map)) { kfree(aux); return map; } container_of(map, struct bpf_array, map)->aux = aux; aux->map = map; return map; } static void prog_array_map_free(struct bpf_map *map) { struct prog_poke_elem *elem, *tmp; struct bpf_array_aux *aux; aux = container_of(map, struct bpf_array, map)->aux; list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { list_del_init(&elem->list); kfree(elem); } kfree(aux); fd_array_map_free(map); } /* prog_array->aux->{type,jited} is a runtime binding. * Doing static check alone in the verifier is not enough. * Thus, prog_array_map cannot be used as an inner_map * and map_meta_equal is not implemented. */ const struct bpf_map_ops prog_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = prog_array_map_alloc, .map_free = prog_array_map_free, .map_poke_track = prog_array_map_poke_track, .map_poke_untrack = prog_array_map_poke_untrack, .map_poke_run = prog_array_map_poke_run, .map_get_next_key = array_map_get_next_key, .map_lookup_elem = fd_array_map_lookup_elem, .map_delete_elem = fd_array_map_delete_elem, .map_fd_get_ptr = prog_fd_array_get_ptr, .map_fd_put_ptr = prog_fd_array_put_ptr, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_release_uref = prog_array_map_clear, .map_seq_show_elem = prog_array_map_seq_show_elem, .map_mem_usage = array_map_mem_usage, .map_btf_id = &array_map_btf_ids[0], }; static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, struct file *map_file) { struct bpf_event_entry *ee; ee = kzalloc(sizeof(*ee), GFP_KERNEL); if (ee) { ee->event = perf_file->private_data; ee->perf_file = perf_file; ee->map_file = map_file; } return ee; } static void __bpf_event_entry_free(struct rcu_head *rcu) { struct bpf_event_entry *ee; ee = container_of(rcu, struct bpf_event_entry, rcu); fput(ee->perf_file); kfree(ee); } static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) { call_rcu(&ee->rcu, __bpf_event_entry_free); } static void *perf_event_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { struct bpf_event_entry *ee; struct perf_event *event; struct file *perf_file; u64 value; perf_file = perf_event_get(fd); if (IS_ERR(perf_file)) return perf_file; ee = ERR_PTR(-EOPNOTSUPP); event = perf_file->private_data; if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) goto err_out; ee = bpf_event_entry_gen(perf_file, map_file); if (ee) return ee; ee = ERR_PTR(-ENOMEM); err_out: fput(perf_file); return ee; } static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { /* bpf_perf_event is freed after one RCU grace period */ bpf_event_entry_free_rcu(ptr); } static void perf_event_fd_array_release(struct bpf_map *map, struct file *map_file) { struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_event_entry *ee; int i; if (map->map_flags & BPF_F_PRESERVE_ELEMS) return; rcu_read_lock(); for (i = 0; i < array->map.max_entries; i++) { ee = READ_ONCE(array->ptrs[i]); if (ee && ee->map_file == map_file) __fd_array_map_delete_elem(map, &i, true); } rcu_read_unlock(); } static void perf_event_fd_array_map_free(struct bpf_map *map) { if (map->map_flags & BPF_F_PRESERVE_ELEMS) bpf_fd_array_map_clear(map, false); fd_array_map_free(map); } const struct bpf_map_ops perf_event_array_map_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, .map_free = perf_event_fd_array_map_free, .map_get_next_key = array_map_get_next_key, .map_lookup_elem = fd_array_map_lookup_elem, .map_delete_elem = fd_array_map_delete_elem, .map_fd_get_ptr = perf_event_fd_array_get_ptr, .map_fd_put_ptr = perf_event_fd_array_put_ptr, .map_release = perf_event_fd_array_release, .map_check_btf = map_check_no_btf, .map_mem_usage = array_map_mem_usage, .map_btf_id = &array_map_btf_ids[0], }; #ifdef CONFIG_CGROUPS static void *cgroup_fd_array_get_ptr(struct bpf_map *map, struct file *map_file /* not used */, int fd) { return cgroup_get_from_fd(fd); } static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { /* cgroup_put free cgrp after a rcu grace period */ cgroup_put(ptr); } static void cgroup_fd_array_free(struct bpf_map *map) { bpf_fd_array_map_clear(map, false); fd_array_map_free(map); } const struct bpf_map_ops cgroup_array_map_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, .map_free = cgroup_fd_array_free, .map_get_next_key = array_map_get_next_key, .map_lookup_elem = fd_array_map_lookup_elem, .map_delete_elem = fd_array_map_delete_elem, .map_fd_get_ptr = cgroup_fd_array_get_ptr, .map_fd_put_ptr = cgroup_fd_array_put_ptr, .map_check_btf = map_check_no_btf, .map_mem_usage = array_map_mem_usage, .map_btf_id = &array_map_btf_ids[0], }; #endif static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) { struct bpf_map *map, *inner_map_meta; inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); if (IS_ERR(inner_map_meta)) return inner_map_meta; map = array_map_alloc(attr); if (IS_ERR(map)) { bpf_map_meta_free(inner_map_meta); return map; } map->inner_map_meta = inner_map_meta; return map; } static void array_of_map_free(struct bpf_map *map) { /* map->inner_map_meta is only accessed by syscall which * is protected by fdget/fdput. */ bpf_map_meta_free(map->inner_map_meta); bpf_fd_array_map_clear(map, false); fd_array_map_free(map); } static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) { struct bpf_map **inner_map = array_map_lookup_elem(map, key); if (!inner_map) return NULL; return READ_ONCE(*inner_map); } static int array_of_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_array *array = container_of(map, struct bpf_array, map); u32 elem_size = array->elem_size; struct bpf_insn *insn = insn_buf; const int ret = BPF_REG_0; const int map_ptr = BPF_REG_1; const int index = BPF_REG_2; *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); if (!map->bypass_spec_v1) { *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); } else { *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); } if (is_power_of_2(elem_size)) *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); else *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); *insn++ = BPF_MOV64_IMM(ret, 0); return insn - insn_buf; } const struct bpf_map_ops array_of_maps_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_of_map_alloc, .map_free = array_of_map_free, .map_get_next_key = array_map_get_next_key, .map_lookup_elem = array_of_map_lookup_elem, .map_delete_elem = fd_array_map_delete_elem, .map_fd_get_ptr = bpf_map_fd_get_ptr, .map_fd_put_ptr = bpf_map_fd_put_ptr, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_gen_lookup = array_of_map_gen_lookup, .map_lookup_batch = generic_map_lookup_batch, .map_update_batch = generic_map_update_batch, .map_check_btf = map_check_no_btf, .map_mem_usage = array_map_mem_usage, .map_btf_id = &array_map_btf_ids[0], };
4 4 4 4 9 9 9 9 9 9 9 9 8 8 8 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 // SPDX-License-Identifier: GPL-2.0-only /* * Stream Parser * * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */ #include <linux/bpf.h> #include <linux/errno.h> #include <linux/errqueue.h> #include <linux/file.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/poll.h> #include <linux/rculist.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/uaccess.h> #include <linux/workqueue.h> #include <net/strparser.h> #include <net/netns/generic.h> #include <net/sock.h> static struct workqueue_struct *strp_wq; static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) { return (struct _strp_msg *)((void *)skb->cb + offsetof(struct sk_skb_cb, strp)); } /* Lower lock held */ static void strp_abort_strp(struct strparser *strp, int err) { /* Unrecoverable error in receive */ cancel_delayed_work(&strp->msg_timer_work); if (strp->stopped) return; strp->stopped = 1; if (strp->sk) { struct sock *sk = strp->sk; /* Report an error on the lower socket */ sk->sk_err = -err; sk_error_report(sk); } } static void strp_start_timer(struct strparser *strp, long timeo) { if (timeo && timeo != LONG_MAX) mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); } /* Lower lock held */ static void strp_parser_err(struct strparser *strp, int err, read_descriptor_t *desc) { desc->error = err; kfree_skb(strp->skb_head); strp->skb_head = NULL; strp->cb.abort_parser(strp, err); } static inline int strp_peek_len(struct strparser *strp) { if (strp->sk) { struct socket *sock = strp->sk->sk_socket; return sock->ops->peek_len(sock); } /* If we don't have an associated socket there's nothing to peek. * Return int max to avoid stopping the strparser. */ return INT_MAX; } /* Lower socket lock held */ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, unsigned int orig_offset, size_t orig_len, size_t max_msg_size, long timeo) { struct strparser *strp = (struct strparser *)desc->arg.data; struct _strp_msg *stm; struct sk_buff *head, *skb; size_t eaten = 0, cand_len; ssize_t extra; int err; bool cloned_orig = false; if (strp->paused) return 0; head = strp->skb_head; if (head) { /* Message already in progress */ if (unlikely(orig_offset)) { /* Getting data with a non-zero offset when a message is * in progress is not expected. If it does happen, we * need to clone and pull since we can't deal with * offsets in the skbs for a message expect in the head. */ orig_skb = skb_clone(orig_skb, GFP_ATOMIC); if (!orig_skb) { STRP_STATS_INCR(strp->stats.mem_fail); desc->error = -ENOMEM; return 0; } if (!pskb_pull(orig_skb, orig_offset)) { STRP_STATS_INCR(strp->stats.mem_fail); kfree_skb(orig_skb); desc->error = -ENOMEM; return 0; } cloned_orig = true; orig_offset = 0; } if (!strp->skb_nextp) { /* We are going to append to the frags_list of head. * Need to unshare the frag_list. */ err = skb_unclone(head, GFP_ATOMIC); if (err) { STRP_STATS_INCR(strp->stats.mem_fail); desc->error = err; return 0; } if (unlikely(skb_shinfo(head)->frag_list)) { /* We can't append to an sk_buff that already * has a frag_list. We create a new head, point * the frag_list of that to the old head, and * then are able to use the old head->next for * appending to the message. */ if (WARN_ON(head->next)) { desc->error = -EINVAL; return 0; } skb = alloc_skb_for_msg(head); if (!skb) { STRP_STATS_INCR(strp->stats.mem_fail); desc->error = -ENOMEM; return 0; } strp->skb_nextp = &head->next; strp->skb_head = skb; head = skb; } else { strp->skb_nextp = &skb_shinfo(head)->frag_list; } } } while (eaten < orig_len) { /* Always clone since we will consume something */ skb = skb_clone(orig_skb, GFP_ATOMIC); if (!skb) { STRP_STATS_INCR(strp->stats.mem_fail); desc->error = -ENOMEM; break; } cand_len = orig_len - eaten; head = strp->skb_head; if (!head) { head = skb; strp->skb_head = head; /* Will set skb_nextp on next packet if needed */ strp->skb_nextp = NULL; stm = _strp_msg(head); memset(stm, 0, sizeof(*stm)); stm->strp.offset = orig_offset + eaten; } else { /* Unclone if we are appending to an skb that we * already share a frag_list with. */ if (skb_has_frag_list(skb)) { err = skb_unclone(skb, GFP_ATOMIC); if (err) { STRP_STATS_INCR(strp->stats.mem_fail); desc->error = err; break; } } stm = _strp_msg(head); *strp->skb_nextp = skb; strp->skb_nextp = &skb->next; head->data_len += skb->len; head->len += skb->len; head->truesize += skb->truesize; } if (!stm->strp.full_len) { ssize_t len; len = (*strp->cb.parse_msg)(strp, head); if (!len) { /* Need more header to determine length */ if (!stm->accum_len) { /* Start RX timer for new message */ strp_start_timer(strp, timeo); } stm->accum_len += cand_len; eaten += cand_len; STRP_STATS_INCR(strp->stats.need_more_hdr); WARN_ON(eaten != orig_len); break; } else if (len < 0) { if (len == -ESTRPIPE && stm->accum_len) { len = -ENODATA; strp->unrecov_intr = 1; } else { strp->interrupted = 1; } strp_parser_err(strp, len, desc); break; } else if (len > max_msg_size) { /* Message length exceeds maximum allowed */ STRP_STATS_INCR(strp->stats.msg_too_big); strp_parser_err(strp, -EMSGSIZE, desc); break; } else if (len <= (ssize_t)head->len - skb->len - stm->strp.offset) { /* Length must be into new skb (and also * greater than zero) */ STRP_STATS_INCR(strp->stats.bad_hdr_len); strp_parser_err(strp, -EPROTO, desc); break; } stm->strp.full_len = len; } extra = (ssize_t)(stm->accum_len + cand_len) - stm->strp.full_len; if (extra < 0) { /* Message not complete yet. */ if (stm->strp.full_len - stm->accum_len > strp_peek_len(strp)) { /* Don't have the whole message in the socket * buffer. Set strp->need_bytes to wait for * the rest of the message. Also, set "early * eaten" since we've already buffered the skb * but don't consume yet per strp_read_sock. */ if (!stm->accum_len) { /* Start RX timer for new message */ strp_start_timer(strp, timeo); } stm->accum_len += cand_len; eaten += cand_len; strp->need_bytes = stm->strp.full_len - stm->accum_len; STRP_STATS_ADD(strp->stats.bytes, cand_len); desc->count = 0; /* Stop reading socket */ break; } stm->accum_len += cand_len; eaten += cand_len; WARN_ON(eaten != orig_len); break; } /* Positive extra indicates more bytes than needed for the * message */ WARN_ON(extra > cand_len); eaten += (cand_len - extra); /* Hurray, we have a new message! */ cancel_delayed_work(&strp->msg_timer_work); strp->skb_head = NULL; strp->need_bytes = 0; STRP_STATS_INCR(strp->stats.msgs); /* Give skb to upper layer */ strp->cb.rcv_msg(strp, head); if (unlikely(strp->paused)) { /* Upper layer paused strp */ break; } } if (cloned_orig) kfree_skb(orig_skb); STRP_STATS_ADD(strp->stats.bytes, eaten); return eaten; } int strp_process(struct strparser *strp, struct sk_buff *orig_skb, unsigned int orig_offset, size_t orig_len, size_t max_msg_size, long timeo) { read_descriptor_t desc; /* Dummy arg to strp_recv */ desc.arg.data = strp; return __strp_recv(&desc, orig_skb, orig_offset, orig_len, max_msg_size, timeo); } EXPORT_SYMBOL_GPL(strp_process); static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, unsigned int orig_offset, size_t orig_len) { struct strparser *strp = (struct strparser *)desc->arg.data; return __strp_recv(desc, orig_skb, orig_offset, orig_len, strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo); } static int default_read_sock_done(struct strparser *strp, int err) { return err; } /* Called with lock held on lower socket */ static int strp_read_sock(struct strparser *strp) { struct socket *sock = strp->sk->sk_socket; read_descriptor_t desc; if (unlikely(!sock || !sock->ops)) return -EBUSY; if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock)) return -EBUSY; desc.arg.data = strp; desc.error = 0; desc.count = 1; /* give more than one skb per call */ /* sk should be locked here, so okay to do read_sock */ if (strp->cb.read_sock) strp->cb.read_sock(strp, &desc, strp_recv); else sock->ops->read_sock(strp->sk, &desc, strp_recv); desc.error = strp->cb.read_sock_done(strp, desc.error); return desc.error; } /* Lower sock lock held */ void strp_data_ready(struct strparser *strp) { if (unlikely(strp->stopped) || strp->paused) return; /* This check is needed to synchronize with do_strp_work. * do_strp_work acquires a process lock (lock_sock) whereas * the lock held here is bh_lock_sock. The two locks can be * held by different threads at the same time, but bh_lock_sock * allows a thread in BH context to safely check if the process * lock is held. In this case, if the lock is held, queue work. */ if (sock_owned_by_user_nocheck(strp->sk)) { queue_work(strp_wq, &strp->work); return; } if (strp->need_bytes) { if (strp_peek_len(strp) < strp->need_bytes) return; } if (strp_read_sock(strp) == -ENOMEM) queue_work(strp_wq, &strp->work); } EXPORT_SYMBOL_GPL(strp_data_ready); static void do_strp_work(struct strparser *strp) { /* We need the read lock to synchronize with strp_data_ready. We * need the socket lock for calling strp_read_sock. */ strp->cb.lock(strp); if (unlikely(strp->stopped)) goto out; if (strp->paused) goto out; if (strp_read_sock(strp) == -ENOMEM) queue_work(strp_wq, &strp->work); out: strp->cb.unlock(strp); } static void strp_work(struct work_struct *w) { do_strp_work(container_of(w, struct strparser, work)); } static void strp_msg_timeout(struct work_struct *w) { struct strparser *strp = container_of(w, struct strparser, msg_timer_work.work); /* Message assembly timed out */ STRP_STATS_INCR(strp->stats.msg_timeouts); strp->cb.lock(strp); strp->cb.abort_parser(strp, -ETIMEDOUT); strp->cb.unlock(strp); } static void strp_sock_lock(struct strparser *strp) { lock_sock(strp->sk); } static void strp_sock_unlock(struct strparser *strp) { release_sock(strp->sk); } int strp_init(struct strparser *strp, struct sock *sk, const struct strp_callbacks *cb) { if (!cb || !cb->rcv_msg || !cb->parse_msg) return -EINVAL; /* The sk (sock) arg determines the mode of the stream parser. * * If the sock is set then the strparser is in receive callback mode. * The upper layer calls strp_data_ready to kick receive processing * and strparser calls the read_sock function on the socket to * get packets. * * If the sock is not set then the strparser is in general mode. * The upper layer calls strp_process for each skb to be parsed. */ if (!sk) { if (!cb->lock || !cb->unlock) return -EINVAL; } memset(strp, 0, sizeof(*strp)); strp->sk = sk; strp->cb.lock = cb->lock ? : strp_sock_lock; strp->cb.unlock = cb->unlock ? : strp_sock_unlock; strp->cb.rcv_msg = cb->rcv_msg; strp->cb.parse_msg = cb->parse_msg; strp->cb.read_sock = cb->read_sock; strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout); INIT_WORK(&strp->work, strp_work); return 0; } EXPORT_SYMBOL_GPL(strp_init); /* Sock process lock held (lock_sock) */ void __strp_unpause(struct strparser *strp) { strp->paused = 0; if (strp->need_bytes) { if (strp_peek_len(strp) < strp->need_bytes) return; } strp_read_sock(strp); } EXPORT_SYMBOL_GPL(__strp_unpause); void strp_unpause(struct strparser *strp) { strp->paused = 0; /* Sync setting paused with RX work */ smp_mb(); queue_work(strp_wq, &strp->work); } EXPORT_SYMBOL_GPL(strp_unpause); /* strp must already be stopped so that strp_recv will no longer be called. * Note that strp_done is not called with the lower socket held. */ void strp_done(struct strparser *strp) { WARN_ON(!strp->stopped); cancel_delayed_work_sync(&strp->msg_timer_work); cancel_work_sync(&strp->work); if (strp->skb_head) { kfree_skb(strp->skb_head); strp->skb_head = NULL; } } EXPORT_SYMBOL_GPL(strp_done); void strp_stop(struct strparser *strp) { strp->stopped = 1; } EXPORT_SYMBOL_GPL(strp_stop); void strp_check_rcv(struct strparser *strp) { queue_work(strp_wq, &strp->work); } EXPORT_SYMBOL_GPL(strp_check_rcv); static int __init strp_dev_init(void) { BUILD_BUG_ON(sizeof(struct sk_skb_cb) > sizeof_field(struct sk_buff, cb)); strp_wq = create_singlethread_workqueue("kstrp"); if (unlikely(!strp_wq)) return -ENOMEM; return 0; } device_initcall(strp_dev_init);
20 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* Note : this module is expected to remain private, do not expose it */ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE /* **************************************** * Dependencies ******************************************/ #include <linux/zstd_errors.h> /* enum list */ #include "compiler.h" #include "debug.h" #include "zstd_deps.h" /* size_t */ /* **************************************** * Compiler-specific ******************************************/ #define ERR_STATIC static __attribute__((unused)) /*-**************************************** * Customization (error_public.h) ******************************************/ typedef ZSTD_ErrorCode ERR_enum; #define PREFIX(name) ZSTD_error_##name /*-**************************************** * Error codes handling ******************************************/ #undef ERROR /* already defined on Visual Studio */ #define ERROR(name) ZSTD_ERROR(name) #define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } /* check and forward error code */ #define CHECK_V_F(e, f) \ size_t const e = f; \ do { \ if (ERR_isError(e)) \ return e; \ } while (0) #define CHECK_F(f) do { CHECK_V_F(_var_err__, f); } while (0) /*-**************************************** * Error Strings ******************************************/ const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ ERR_STATIC const char* ERR_getErrorName(size_t code) { return ERR_getErrorString(ERR_getErrorCode(code)); } /* * Ignore: this is an internal helper. * * This is a helper function to help force C99-correctness during compilation. * Under strict compilation modes, variadic macro arguments can't be empty. * However, variadic function arguments can be. Using a function therefore lets * us statically check that at least one (string) argument was passed, * independent of the compilation flags. */ static INLINE_KEYWORD UNUSED_ATTR void _force_has_format_string(const char *format, ...) { (void)format; } /* * Ignore: this is an internal helper. * * We want to force this function invocation to be syntactically correct, but * we don't want to force runtime evaluation of its arguments. */ #define _FORCE_HAS_FORMAT_STRING(...) \ do { \ if (0) { \ _force_has_format_string(__VA_ARGS__); \ } \ } while (0) #define ERR_QUOTE(str) #str /* * Return the specified error if the condition evaluates to true. * * In debug modes, prints additional information. * In order to do that (particularly, printing the conditional that failed), * this can't just wrap RETURN_ERROR(). */ #define RETURN_ERROR_IF(cond, err, ...) \ do { \ if (cond) { \ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return ERROR(err); \ } \ } while (0) /* * Unconditionally return the specified error. * * In debug modes, prints additional information. */ #define RETURN_ERROR(err, ...) \ do { \ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return ERROR(err); \ } while(0) /* * If the provided expression evaluates to an error code, returns that error code. * * In debug modes, prints additional information. */ #define FORWARD_IF_ERROR(err, ...) \ do { \ size_t const err_code = (err); \ if (ERR_isError(err_code)) { \ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return err_code; \ } \ } while(0) #endif /* ERROR_H_MODULE */
1 159 1 1 1 2 2 1 1 1 1 4 2 2 2 1 1 1 2 9 9 1 9 3 9 9 8 1 9 9 27 27 27 32 5 5 1 26 27 26 1 26 32 3 1 3 3 3 3 3 9 9 9 9 8 9 2 2 2 1 2 2 2 2 2 2 7 7 6 1 1 1 7 8 7 7 2 2 2 7 8 23 23 10 8 5 2 6 10 3 1 2 2 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 // SPDX-License-Identifier: GPL-2.0 /* * To speed up listener socket lookup, create an array to store all sockets * listening on the same port. This allows a decision to be made after finding * the first socket. An optional BPF program can also be configured for * selecting the socket index from the array of available sockets. */ #include <net/ip.h> #include <net/sock_reuseport.h> #include <linux/bpf.h> #include <linux/idr.h> #include <linux/filter.h> #include <linux/rcupdate.h> #define INIT_SOCKS 128 DEFINE_SPINLOCK(reuseport_lock); static DEFINE_IDA(reuseport_ida); static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, struct sock_reuseport *reuse, bool bind_inany); void reuseport_has_conns_set(struct sock *sk) { struct sock_reuseport *reuse; if (!rcu_access_pointer(sk->sk_reuseport_cb)) return; spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); if (likely(reuse)) reuse->has_conns = 1; spin_unlock_bh(&reuseport_lock); } EXPORT_SYMBOL(reuseport_has_conns_set); static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse) { /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1); } static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse) { /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1); } static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) { if (sk->sk_incoming_cpu >= 0) __reuseport_get_incoming_cpu(reuse); } static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) { if (sk->sk_incoming_cpu >= 0) __reuseport_put_incoming_cpu(reuse); } void reuseport_update_incoming_cpu(struct sock *sk, int val) { struct sock_reuseport *reuse; int old_sk_incoming_cpu; if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) { /* Paired with REAE_ONCE() in sk_incoming_cpu_update() * and compute_score(). */ WRITE_ONCE(sk->sk_incoming_cpu, val); return; } spin_lock_bh(&reuseport_lock); /* This must be done under reuseport_lock to avoid a race with * reuseport_grow(), which accesses sk->sk_incoming_cpu without * lock_sock() when detaching a shutdown()ed sk. * * Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ old_sk_incoming_cpu = sk->sk_incoming_cpu; WRITE_ONCE(sk->sk_incoming_cpu, val); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); /* reuseport_grow() has detached a closed sk. */ if (!reuse) goto out; if (old_sk_incoming_cpu < 0 && val >= 0) __reuseport_get_incoming_cpu(reuse); else if (old_sk_incoming_cpu >= 0 && val < 0) __reuseport_put_incoming_cpu(reuse); out: spin_unlock_bh(&reuseport_lock); } static int reuseport_sock_index(struct sock *sk, const struct sock_reuseport *reuse, bool closed) { int left, right; if (!closed) { left = 0; right = reuse->num_socks; } else { left = reuse->max_socks - reuse->num_closed_socks; right = reuse->max_socks; } for (; left < right; left++) if (reuse->socks[left] == sk) return left; return -1; } static void __reuseport_add_sock(struct sock *sk, struct sock_reuseport *reuse) { reuse->socks[reuse->num_socks] = sk; /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */ smp_wmb(); reuse->num_socks++; reuseport_get_incoming_cpu(sk, reuse); } static bool __reuseport_detach_sock(struct sock *sk, struct sock_reuseport *reuse) { int i = reuseport_sock_index(sk, reuse, false); if (i == -1) return false; reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; reuse->num_socks--; reuseport_put_incoming_cpu(sk, reuse); return true; } static void __reuseport_add_closed_sock(struct sock *sk, struct sock_reuseport *reuse) { reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk; /* paired with READ_ONCE() in inet_csk_bind_conflict() */ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1); reuseport_get_incoming_cpu(sk, reuse); } static bool __reuseport_detach_closed_sock(struct sock *sk, struct sock_reuseport *reuse) { int i = reuseport_sock_index(sk, reuse, true); if (i == -1) return false; reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; /* paired with READ_ONCE() in inet_csk_bind_conflict() */ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1); reuseport_put_incoming_cpu(sk, reuse); return true; } static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) { struct sock_reuseport *reuse; reuse = kzalloc(struct_size(reuse, socks, max_socks), GFP_ATOMIC); if (!reuse) return NULL; reuse->max_socks = max_socks; RCU_INIT_POINTER(reuse->prog, NULL); return reuse; } int reuseport_alloc(struct sock *sk, bool bind_inany) { struct sock_reuseport *reuse; int id, ret = 0; /* bh lock used since this function call may precede hlist lock in * soft irq of receive path or setsockopt from process context */ spin_lock_bh(&reuseport_lock); /* Allocation attempts can occur concurrently via the setsockopt path * and the bind/hash path. Nothing to do when we lose the race. */ reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); if (reuse) { if (reuse->num_closed_socks) { /* sk was shutdown()ed before */ ret = reuseport_resurrect(sk, reuse, NULL, bind_inany); goto out; } /* Only set reuse->bind_inany if the bind_inany is true. * Otherwise, it will overwrite the reuse->bind_inany * which was set by the bind/hash path. */ if (bind_inany) reuse->bind_inany = bind_inany; goto out; } reuse = __reuseport_alloc(INIT_SOCKS); if (!reuse) { ret = -ENOMEM; goto out; } id = ida_alloc(&reuseport_ida, GFP_ATOMIC); if (id < 0) { kfree(reuse); ret = id; goto out; } reuse->reuseport_id = id; reuse->bind_inany = bind_inany; reuse->socks[0] = sk; reuse->num_socks = 1; reuseport_get_incoming_cpu(sk, reuse); rcu_assign_pointer(sk->sk_reuseport_cb, reuse); out: spin_unlock_bh(&reuseport_lock); return ret; } EXPORT_SYMBOL(reuseport_alloc); static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) { struct sock_reuseport *more_reuse; u32 more_socks_size, i; more_socks_size = reuse->max_socks * 2U; if (more_socks_size > U16_MAX) { if (reuse->num_closed_socks) { /* Make room by removing a closed sk. * The child has already been migrated. * Only reqsk left at this point. */ struct sock *sk; sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL); __reuseport_detach_closed_sock(sk, reuse); return reuse; } return NULL; } more_reuse = __reuseport_alloc(more_socks_size); if (!more_reuse) return NULL; more_reuse->num_socks = reuse->num_socks; more_reuse->num_closed_socks = reuse->num_closed_socks; more_reuse->prog = reuse->prog; more_reuse->reuseport_id = reuse->reuseport_id; more_reuse->bind_inany = reuse->bind_inany; more_reuse->has_conns = reuse->has_conns; more_reuse->incoming_cpu = reuse->incoming_cpu; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); memcpy(more_reuse->socks + (more_reuse->max_socks - more_reuse->num_closed_socks), reuse->socks + (reuse->max_socks - reuse->num_closed_socks), reuse->num_closed_socks * sizeof(struct sock *)); more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); for (i = 0; i < reuse->max_socks; ++i) rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, more_reuse); /* Note: we use kfree_rcu here instead of reuseport_free_rcu so * that reuse and more_reuse can temporarily share a reference * to prog. */ kfree_rcu(reuse, rcu); return more_reuse; } static void reuseport_free_rcu(struct rcu_head *head) { struct sock_reuseport *reuse; reuse = container_of(head, struct sock_reuseport, rcu); sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); ida_free(&reuseport_ida, reuse->reuseport_id); kfree(reuse); } /** * reuseport_add_sock - Add a socket to the reuseport group of another. * @sk: New socket to add to the group. * @sk2: Socket belonging to the existing reuseport group. * @bind_inany: Whether or not the group is bound to a local INANY address. * * May return ENOMEM and not add socket to group under memory pressure. */ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany) { struct sock_reuseport *old_reuse, *reuse; if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { int err = reuseport_alloc(sk2, bind_inany); if (err) return err; } spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); if (old_reuse && old_reuse->num_closed_socks) { /* sk was shutdown()ed before */ int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany); spin_unlock_bh(&reuseport_lock); return err; } if (old_reuse && old_reuse->num_socks != 1) { spin_unlock_bh(&reuseport_lock); return -EBUSY; } if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) { reuse = reuseport_grow(reuse); if (!reuse) { spin_unlock_bh(&reuseport_lock); return -ENOMEM; } } __reuseport_add_sock(sk, reuse); rcu_assign_pointer(sk->sk_reuseport_cb, reuse); spin_unlock_bh(&reuseport_lock); if (old_reuse) call_rcu(&old_reuse->rcu, reuseport_free_rcu); return 0; } EXPORT_SYMBOL(reuseport_add_sock); static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, struct sock_reuseport *reuse, bool bind_inany) { if (old_reuse == reuse) { /* If sk was in the same reuseport group, just pop sk out of * the closed section and push sk into the listening section. */ __reuseport_detach_closed_sock(sk, old_reuse); __reuseport_add_sock(sk, old_reuse); return 0; } if (!reuse) { /* In bind()/listen() path, we cannot carry over the eBPF prog * for the shutdown()ed socket. In setsockopt() path, we should * not change the eBPF prog of listening sockets by attaching a * prog to the shutdown()ed socket. Thus, we will allocate a new * reuseport group and detach sk from the old group. */ int id; reuse = __reuseport_alloc(INIT_SOCKS); if (!reuse) return -ENOMEM; id = ida_alloc(&reuseport_ida, GFP_ATOMIC); if (id < 0) { kfree(reuse); return id; } reuse->reuseport_id = id; reuse->bind_inany = bind_inany; } else { /* Move sk from the old group to the new one if * - all the other listeners in the old group were close()d or * shutdown()ed, and then sk2 has listen()ed on the same port * OR * - sk listen()ed without bind() (or with autobind), was * shutdown()ed, and then listen()s on another port which * sk2 listen()s on. */ if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) { reuse = reuseport_grow(reuse); if (!reuse) return -ENOMEM; } } __reuseport_detach_closed_sock(sk, old_reuse); __reuseport_add_sock(sk, reuse); rcu_assign_pointer(sk->sk_reuseport_cb, reuse); if (old_reuse->num_socks + old_reuse->num_closed_socks == 0) call_rcu(&old_reuse->rcu, reuseport_free_rcu); return 0; } void reuseport_detach_sock(struct sock *sk) { struct sock_reuseport *reuse; spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); /* reuseport_grow() has detached a closed sk */ if (!reuse) goto out; /* Notify the bpf side. The sk may be added to a sockarray * map. If so, sockarray logic will remove it from the map. * * Other bpf map types that work with reuseport, like sockmap, * don't need an explicit callback from here. They override sk * unhash/close ops to remove the sk from the map before we * get to this point. */ bpf_sk_reuseport_detach(sk); rcu_assign_pointer(sk->sk_reuseport_cb, NULL); if (!__reuseport_detach_closed_sock(sk, reuse)) __reuseport_detach_sock(sk, reuse); if (reuse->num_socks + reuse->num_closed_socks == 0) call_rcu(&reuse->rcu, reuseport_free_rcu); out: spin_unlock_bh(&reuseport_lock); } EXPORT_SYMBOL(reuseport_detach_sock); void reuseport_stop_listen_sock(struct sock *sk) { if (sk->sk_protocol == IPPROTO_TCP) { struct sock_reuseport *reuse; struct bpf_prog *prog; spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); prog = rcu_dereference_protected(reuse->prog, lockdep_is_held(&reuseport_lock)); if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) || (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) { /* Migration capable, move sk from the listening section * to the closed section. */ bpf_sk_reuseport_detach(sk); __reuseport_detach_sock(sk, reuse); __reuseport_add_closed_sock(sk, reuse); spin_unlock_bh(&reuseport_lock); return; } spin_unlock_bh(&reuseport_lock); } /* Not capable to do migration, detach immediately */ reuseport_detach_sock(sk); } EXPORT_SYMBOL(reuseport_stop_listen_sock); static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, struct bpf_prog *prog, struct sk_buff *skb, int hdr_len) { struct sk_buff *nskb = NULL; u32 index; if (skb_shared(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return NULL; skb = nskb; } /* temporarily advance data past protocol header */ if (!pskb_pull(skb, hdr_len)) { kfree_skb(nskb); return NULL; } index = bpf_prog_run_save_cb(prog, skb); __skb_push(skb, hdr_len); consume_skb(nskb); if (index >= socks) return NULL; return reuse->socks[index]; } static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse, u32 hash, u16 num_socks) { struct sock *first_valid_sk = NULL; int i, j; i = j = reciprocal_scale(hash, num_socks); do { struct sock *sk = reuse->socks[i]; if (sk->sk_state != TCP_ESTABLISHED) { /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */ if (!READ_ONCE(reuse->incoming_cpu)) return sk; /* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) return sk; if (!first_valid_sk) first_valid_sk = sk; } i++; if (i >= num_socks) i = 0; } while (i != j); return first_valid_sk; } /** * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. * @sk: First socket in the group. * @hash: When no BPF filter is available, use this hash to select. * @skb: skb to run through BPF filter. * @hdr_len: BPF filter expects skb data pointer at payload data. If * the skb does not yet point at the payload, this parameter represents * how far the pointer needs to advance to reach the payload. * Returns a socket that should receive the packet (or NULL on error). */ struct sock *reuseport_select_sock(struct sock *sk, u32 hash, struct sk_buff *skb, int hdr_len) { struct sock_reuseport *reuse; struct bpf_prog *prog; struct sock *sk2 = NULL; u16 socks; rcu_read_lock(); reuse = rcu_dereference(sk->sk_reuseport_cb); /* if memory allocation failed or add call is not yet complete */ if (!reuse) goto out; prog = rcu_dereference(reuse->prog); socks = READ_ONCE(reuse->num_socks); if (likely(socks)) { /* paired with smp_wmb() in __reuseport_add_sock() */ smp_rmb(); if (!prog || !skb) goto select_by_hash; if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash); else sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len); select_by_hash: /* no bpf or invalid bpf result: fall back to hash usage */ if (!sk2) sk2 = reuseport_select_sock_by_hash(reuse, hash, socks); } out: rcu_read_unlock(); return sk2; } EXPORT_SYMBOL(reuseport_select_sock); /** * reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group. * @sk: close()ed or shutdown()ed socket in the group. * @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or * NEW_SYN_RECV request socket during 3WHS. * @skb: skb to run through BPF filter. * Returns a socket (with sk_refcnt +1) that should accept the child socket * (or NULL on error). */ struct sock *reuseport_migrate_sock(struct sock *sk, struct sock *migrating_sk, struct sk_buff *skb) { struct sock_reuseport *reuse; struct sock *nsk = NULL; bool allocated = false; struct bpf_prog *prog; u16 socks; u32 hash; rcu_read_lock(); reuse = rcu_dereference(sk->sk_reuseport_cb); if (!reuse) goto out; socks = READ_ONCE(reuse->num_socks); if (unlikely(!socks)) goto failure; /* paired with smp_wmb() in __reuseport_add_sock() */ smp_rmb(); hash = migrating_sk->sk_hash; prog = rcu_dereference(reuse->prog); if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) { if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req)) goto select_by_hash; goto failure; } if (!skb) { skb = alloc_skb(0, GFP_ATOMIC); if (!skb) goto failure; allocated = true; } nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash); if (allocated) kfree_skb(skb); select_by_hash: if (!nsk) nsk = reuseport_select_sock_by_hash(reuse, hash, socks); if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) { nsk = NULL; goto failure; } out: rcu_read_unlock(); return nsk; failure: __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); goto out; } EXPORT_SYMBOL(reuseport_migrate_sock); int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) { struct sock_reuseport *reuse; struct bpf_prog *old_prog; if (sk_unhashed(sk)) { int err; if (!sk->sk_reuseport) return -EINVAL; err = reuseport_alloc(sk, false); if (err) return err; } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { /* The socket wasn't bound with SO_REUSEPORT */ return -EINVAL; } spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); old_prog = rcu_dereference_protected(reuse->prog, lockdep_is_held(&reuseport_lock)); rcu_assign_pointer(reuse->prog, prog); spin_unlock_bh(&reuseport_lock); sk_reuseport_prog_free(old_prog); return 0; } EXPORT_SYMBOL(reuseport_attach_prog); int reuseport_detach_prog(struct sock *sk) { struct sock_reuseport *reuse; struct bpf_prog *old_prog; old_prog = NULL; spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); /* reuse must be checked after acquiring the reuseport_lock * because reuseport_grow() can detach a closed sk. */ if (!reuse) { spin_unlock_bh(&reuseport_lock); return sk->sk_reuseport ? -ENOENT : -EINVAL; } if (sk_unhashed(sk) && reuse->num_closed_socks) { spin_unlock_bh(&reuseport_lock); return -ENOENT; } old_prog = rcu_replace_pointer(reuse->prog, old_prog, lockdep_is_held(&reuseport_lock)); spin_unlock_bh(&reuseport_lock); if (!old_prog) return -ENOENT; sk_reuseport_prog_free(old_prog); return 0; } EXPORT_SYMBOL(reuseport_detach_prog);
7 6 3 3 3 3 3 33 33 33 99 98 36 18 35 34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_CHECKSUM_H #define _BCACHEFS_CHECKSUM_H #include "bcachefs.h" #include "extents_types.h" #include "super-io.h" #include <linux/crc64.h> #include <crypto/chacha.h> static inline bool bch2_checksum_mergeable(unsigned type) { switch (type) { case BCH_CSUM_none: case BCH_CSUM_crc32c: case BCH_CSUM_crc64: return true; default: return false; } } struct bch_csum bch2_checksum_merge(unsigned, struct bch_csum, struct bch_csum, size_t); #define BCH_NONCE_EXTENT cpu_to_le32(1 << 28) #define BCH_NONCE_BTREE cpu_to_le32(2 << 28) #define BCH_NONCE_JOURNAL cpu_to_le32(3 << 28) #define BCH_NONCE_PRIO cpu_to_le32(4 << 28) #define BCH_NONCE_POLY cpu_to_le32(1 << 31) struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce, const void *, size_t); /* * This is used for various on disk data structures - bch_sb, prio_set, bset, * jset: The checksum is _always_ the first field of these structs */ #define csum_vstruct(_c, _type, _nonce, _i) \ ({ \ const void *_start = ((const void *) (_i)) + sizeof((_i)->csum);\ \ bch2_checksum(_c, _type, _nonce, _start, vstruct_end(_i) - _start);\ }) static inline void bch2_csum_to_text(struct printbuf *out, enum bch_csum_type type, struct bch_csum csum) { const u8 *p = (u8 *) &csum; unsigned bytes = type < BCH_CSUM_NR ? bch_crc_bytes[type] : 16; for (unsigned i = 0; i < bytes; i++) prt_hex_byte(out, p[i]); } static inline void bch2_csum_err_msg(struct printbuf *out, enum bch_csum_type type, struct bch_csum expected, struct bch_csum got) { prt_str(out, "checksum error, type "); bch2_prt_csum_type(out, type); prt_str(out, ": got "); bch2_csum_to_text(out, type, got); prt_str(out, " should be "); bch2_csum_to_text(out, type, expected); } int bch2_request_key(struct bch_sb *, struct bch_key *); #ifndef __KERNEL__ int bch2_revoke_key(struct bch_sb *); #endif int bch2_encrypt(struct bch_fs *, unsigned, struct nonce, void *data, size_t); struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned, struct nonce, struct bio *); int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion, struct bch_extent_crc_unpacked, struct bch_extent_crc_unpacked *, struct bch_extent_crc_unpacked *, unsigned, unsigned, unsigned); int __bch2_encrypt_bio(struct bch_fs *, unsigned, struct nonce, struct bio *); static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type, struct nonce nonce, struct bio *bio) { return bch2_csum_type_is_encryption(type) ? __bch2_encrypt_bio(c, type, nonce, bio) : 0; } extern const struct bch_sb_field_ops bch_sb_field_ops_crypt; int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *, struct bch_key *); #if 0 int bch2_disable_encryption(struct bch_fs *); int bch2_enable_encryption(struct bch_fs *, bool); #endif void bch2_fs_encryption_exit(struct bch_fs *); int bch2_fs_encryption_init(struct bch_fs *); static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opt type, bool data) { switch (type) { case BCH_CSUM_OPT_none: return BCH_CSUM_none; case BCH_CSUM_OPT_crc32c: return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero; case BCH_CSUM_OPT_crc64: return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero; case BCH_CSUM_OPT_xxhash: return BCH_CSUM_xxhash; default: BUG(); } } static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c, struct bch_io_opts opts) { if (opts.nocow) return 0; if (c->sb.encryption_type) return c->opts.wide_macs ? BCH_CSUM_chacha20_poly1305_128 : BCH_CSUM_chacha20_poly1305_80; return bch2_csum_opt_to_type(opts.data_checksum, true); } static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c) { if (c->sb.encryption_type) return BCH_CSUM_chacha20_poly1305_128; return bch2_csum_opt_to_type(c->opts.metadata_checksum, false); } static inline bool bch2_checksum_type_valid(const struct bch_fs *c, unsigned type) { if (type >= BCH_CSUM_NR) return false; if (bch2_csum_type_is_encryption(type) && !c->chacha20_key_set) return false; return true; } /* returns true if not equal */ static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r) { /* * XXX: need some way of preventing the compiler from optimizing this * into a form that isn't constant time.. */ return ((l.lo ^ r.lo) | (l.hi ^ r.hi)) != 0; } /* for skipping ahead and encrypting/decrypting at an offset: */ static inline struct nonce nonce_add(struct nonce nonce, unsigned offset) { EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1)); le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE); return nonce; } static inline struct nonce null_nonce(void) { struct nonce ret; memset(&ret, 0, sizeof(ret)); return ret; } static inline struct nonce extent_nonce(struct bversion version, struct bch_extent_crc_unpacked crc) { unsigned compression_type = crc_is_compressed(crc) ? crc.compression_type : 0; unsigned size = compression_type ? crc.uncompressed_size : 0; struct nonce nonce = (struct nonce) {{ [0] = cpu_to_le32(size << 22), [1] = cpu_to_le32(version.lo), [2] = cpu_to_le32(version.lo >> 32), [3] = cpu_to_le32(version.hi| (compression_type << 24))^BCH_NONCE_EXTENT, }}; return nonce_add(nonce, crc.nonce << 9); } static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key) { return le64_to_cpu(key->magic) != BCH_KEY_MAGIC; } static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb) { __le64 magic = __bch2_sb_magic(sb); return (struct nonce) {{ [0] = 0, [1] = 0, [2] = ((__le32 *) &magic)[0], [3] = ((__le32 *) &magic)[1], }}; } static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c) { __le64 magic = bch2_sb_magic(c); return (struct nonce) {{ [0] = 0, [1] = 0, [2] = ((__le32 *) &magic)[0], [3] = ((__le32 *) &magic)[1], }}; } #endif /* _BCACHEFS_CHECKSUM_H */
27 27 27 23 23 23 23 23 23 23 23 23 23 23 22 22 21 22 12 12 12 12 12 12 12 7 7 7 7 7 7 7 7 12 12 12 7 24 24 24 24 3 3 3 3 24 23 23 23 23 23 23 23 23 23 23 23 23 23 23 27 27 26 24 24 24 7 24 24 24 23 12 12 12 12 12 12 12 7 7 7 7 7 7 7 7 7 7 4 3 3 3 27 27 27 27 24 27 27 35 35 35 35 35 35 31 31 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 // SPDX-License-Identifier: 0BSD /* * .xz Stream decoder * * Author: Lasse Collin <lasse.collin@tukaani.org> */ #include "xz_private.h" #include "xz_stream.h" /* Hash used to validate the Index field */ struct xz_dec_hash { vli_type unpadded; vli_type uncompressed; uint32_t crc32; }; struct xz_dec { /* Position in dec_main() */ enum { SEQ_STREAM_HEADER, SEQ_BLOCK_START, SEQ_BLOCK_HEADER, SEQ_BLOCK_UNCOMPRESS, SEQ_BLOCK_PADDING, SEQ_BLOCK_CHECK, SEQ_INDEX, SEQ_INDEX_PADDING, SEQ_INDEX_CRC32, SEQ_STREAM_FOOTER } sequence; /* Position in variable-length integers and Check fields */ uint32_t pos; /* Variable-length integer decoded by dec_vli() */ vli_type vli; /* Saved in_pos and out_pos */ size_t in_start; size_t out_start; /* CRC32 value in Block or Index */ uint32_t crc32; /* Type of the integrity check calculated from uncompressed data */ enum xz_check check_type; /* Operation mode */ enum xz_mode mode; /* * True if the next call to xz_dec_run() is allowed to return * XZ_BUF_ERROR. */ bool allow_buf_error; /* Information stored in Block Header */ struct { /* * Value stored in the Compressed Size field, or * VLI_UNKNOWN if Compressed Size is not present. */ vli_type compressed; /* * Value stored in the Uncompressed Size field, or * VLI_UNKNOWN if Uncompressed Size is not present. */ vli_type uncompressed; /* Size of the Block Header field */ uint32_t size; } block_header; /* Information collected when decoding Blocks */ struct { /* Observed compressed size of the current Block */ vli_type compressed; /* Observed uncompressed size of the current Block */ vli_type uncompressed; /* Number of Blocks decoded so far */ vli_type count; /* * Hash calculated from the Block sizes. This is used to * validate the Index field. */ struct xz_dec_hash hash; } block; /* Variables needed when verifying the Index field */ struct { /* Position in dec_index() */ enum { SEQ_INDEX_COUNT, SEQ_INDEX_UNPADDED, SEQ_INDEX_UNCOMPRESSED } sequence; /* Size of the Index in bytes */ vli_type size; /* Number of Records (matches block.count in valid files) */ vli_type count; /* * Hash calculated from the Records (matches block.hash in * valid files). */ struct xz_dec_hash hash; } index; /* * Temporary buffer needed to hold Stream Header, Block Header, * and Stream Footer. The Block Header is the biggest (1 KiB) * so we reserve space according to that. buf[] has to be aligned * to a multiple of four bytes; the size_t variables before it * should guarantee this. */ struct { size_t pos; size_t size; uint8_t buf[1024]; } temp; struct xz_dec_lzma2 *lzma2; #ifdef XZ_DEC_BCJ struct xz_dec_bcj *bcj; bool bcj_active; #endif }; #ifdef XZ_DEC_ANY_CHECK /* Sizes of the Check field with different Check IDs */ static const uint8_t check_sizes[16] = { 0, 4, 4, 4, 8, 8, 8, 16, 16, 16, 32, 32, 32, 64, 64, 64 }; #endif /* * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller * must have set s->temp.pos to indicate how much data we are supposed * to copy into s->temp.buf. Return true once s->temp.pos has reached * s->temp.size. */ static bool fill_temp(struct xz_dec *s, struct xz_buf *b) { size_t copy_size = min_t(size_t, b->in_size - b->in_pos, s->temp.size - s->temp.pos); memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); b->in_pos += copy_size; s->temp.pos += copy_size; if (s->temp.pos == s->temp.size) { s->temp.pos = 0; return true; } return false; } /* Decode a variable-length integer (little-endian base-128 encoding) */ static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, size_t *in_pos, size_t in_size) { uint8_t byte; if (s->pos == 0) s->vli = 0; while (*in_pos < in_size) { byte = in[*in_pos]; ++*in_pos; s->vli |= (vli_type)(byte & 0x7F) << s->pos; if ((byte & 0x80) == 0) { /* Don't allow non-minimal encodings. */ if (byte == 0 && s->pos != 0) return XZ_DATA_ERROR; s->pos = 0; return XZ_STREAM_END; } s->pos += 7; if (s->pos == 7 * VLI_BYTES_MAX) return XZ_DATA_ERROR; } return XZ_OK; } /* * Decode the Compressed Data field from a Block. Update and validate * the observed compressed and uncompressed sizes of the Block so that * they don't exceed the values possibly stored in the Block Header * (validation assumes that no integer overflow occurs, since vli_type * is normally uint64_t). Update the CRC32 if presence of the CRC32 * field was indicated in Stream Header. * * Once the decoding is finished, validate that the observed sizes match * the sizes possibly stored in the Block Header. Update the hash and * Block count, which are later used to validate the Index field. */ static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) { enum xz_ret ret; s->in_start = b->in_pos; s->out_start = b->out_pos; #ifdef XZ_DEC_BCJ if (s->bcj_active) ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); else #endif ret = xz_dec_lzma2_run(s->lzma2, b); s->block.compressed += b->in_pos - s->in_start; s->block.uncompressed += b->out_pos - s->out_start; /* * There is no need to separately check for VLI_UNKNOWN, since * the observed sizes are always smaller than VLI_UNKNOWN. */ if (s->block.compressed > s->block_header.compressed || s->block.uncompressed > s->block_header.uncompressed) return XZ_DATA_ERROR; if (s->check_type == XZ_CHECK_CRC32) s->crc32 = xz_crc32(b->out + s->out_start, b->out_pos - s->out_start, s->crc32); if (ret == XZ_STREAM_END) { if (s->block_header.compressed != VLI_UNKNOWN && s->block_header.compressed != s->block.compressed) return XZ_DATA_ERROR; if (s->block_header.uncompressed != VLI_UNKNOWN && s->block_header.uncompressed != s->block.uncompressed) return XZ_DATA_ERROR; s->block.hash.unpadded += s->block_header.size + s->block.compressed; #ifdef XZ_DEC_ANY_CHECK s->block.hash.unpadded += check_sizes[s->check_type]; #else if (s->check_type == XZ_CHECK_CRC32) s->block.hash.unpadded += 4; #endif s->block.hash.uncompressed += s->block.uncompressed; s->block.hash.crc32 = xz_crc32( (const uint8_t *)&s->block.hash, sizeof(s->block.hash), s->block.hash.crc32); ++s->block.count; } return ret; } /* Update the Index size and the CRC32 value. */ static void index_update(struct xz_dec *s, const struct xz_buf *b) { size_t in_used = b->in_pos - s->in_start; s->index.size += in_used; s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); } /* * Decode the Number of Records, Unpadded Size, and Uncompressed Size * fields from the Index field. That is, Index Padding and CRC32 are not * decoded by this function. * * This can return XZ_OK (more input needed), XZ_STREAM_END (everything * successfully decoded), or XZ_DATA_ERROR (input is corrupt). */ static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) { enum xz_ret ret; do { ret = dec_vli(s, b->in, &b->in_pos, b->in_size); if (ret != XZ_STREAM_END) { index_update(s, b); return ret; } switch (s->index.sequence) { case SEQ_INDEX_COUNT: s->index.count = s->vli; /* * Validate that the Number of Records field * indicates the same number of Records as * there were Blocks in the Stream. */ if (s->index.count != s->block.count) return XZ_DATA_ERROR; s->index.sequence = SEQ_INDEX_UNPADDED; break; case SEQ_INDEX_UNPADDED: s->index.hash.unpadded += s->vli; s->index.sequence = SEQ_INDEX_UNCOMPRESSED; break; case SEQ_INDEX_UNCOMPRESSED: s->index.hash.uncompressed += s->vli; s->index.hash.crc32 = xz_crc32( (const uint8_t *)&s->index.hash, sizeof(s->index.hash), s->index.hash.crc32); --s->index.count; s->index.sequence = SEQ_INDEX_UNPADDED; break; } } while (s->index.count > 0); return XZ_STREAM_END; } /* * Validate that the next four input bytes match the value of s->crc32. * s->pos must be zero when starting to validate the first byte. */ static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b) { do { if (b->in_pos == b->in_size) return XZ_OK; if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) return XZ_DATA_ERROR; s->pos += 8; } while (s->pos < 32); s->crc32 = 0; s->pos = 0; return XZ_STREAM_END; } #ifdef XZ_DEC_ANY_CHECK /* * Skip over the Check field when the Check ID is not supported. * Returns true once the whole Check field has been skipped over. */ static bool check_skip(struct xz_dec *s, struct xz_buf *b) { while (s->pos < check_sizes[s->check_type]) { if (b->in_pos == b->in_size) return false; ++b->in_pos; ++s->pos; } s->pos = 0; return true; } #endif /* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ static enum xz_ret dec_stream_header(struct xz_dec *s) { if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) return XZ_FORMAT_ERROR; if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) return XZ_DATA_ERROR; if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) return XZ_OPTIONS_ERROR; /* * Of integrity checks, we support only none (Check ID = 0) and * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, * we will accept other check types too, but then the check won't * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. */ if (s->temp.buf[HEADER_MAGIC_SIZE + 1] > XZ_CHECK_MAX) return XZ_OPTIONS_ERROR; s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; #ifdef XZ_DEC_ANY_CHECK if (s->check_type > XZ_CHECK_CRC32) return XZ_UNSUPPORTED_CHECK; #else if (s->check_type > XZ_CHECK_CRC32) return XZ_OPTIONS_ERROR; #endif return XZ_OK; } /* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ static enum xz_ret dec_stream_footer(struct xz_dec *s) { if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) return XZ_DATA_ERROR; if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) return XZ_DATA_ERROR; /* * Validate Backward Size. Note that we never added the size of the * Index CRC32 field to s->index.size, thus we use s->index.size / 4 * instead of s->index.size / 4 - 1. */ if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) return XZ_DATA_ERROR; if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) return XZ_DATA_ERROR; /* * Use XZ_STREAM_END instead of XZ_OK to be more convenient * for the caller. */ return XZ_STREAM_END; } /* Decode the Block Header and initialize the filter chain. */ static enum xz_ret dec_block_header(struct xz_dec *s) { enum xz_ret ret; /* * Validate the CRC32. We know that the temp buffer is at least * eight bytes so this is safe. */ s->temp.size -= 4; if (xz_crc32(s->temp.buf, s->temp.size, 0) != get_le32(s->temp.buf + s->temp.size)) return XZ_DATA_ERROR; s->temp.pos = 2; /* * Catch unsupported Block Flags. We support only one or two filters * in the chain, so we catch that with the same test. */ #ifdef XZ_DEC_BCJ if (s->temp.buf[1] & 0x3E) #else if (s->temp.buf[1] & 0x3F) #endif return XZ_OPTIONS_ERROR; /* Compressed Size */ if (s->temp.buf[1] & 0x40) { if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) return XZ_DATA_ERROR; s->block_header.compressed = s->vli; } else { s->block_header.compressed = VLI_UNKNOWN; } /* Uncompressed Size */ if (s->temp.buf[1] & 0x80) { if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) != XZ_STREAM_END) return XZ_DATA_ERROR; s->block_header.uncompressed = s->vli; } else { s->block_header.uncompressed = VLI_UNKNOWN; } #ifdef XZ_DEC_BCJ /* If there are two filters, the first one must be a BCJ filter. */ s->bcj_active = s->temp.buf[1] & 0x01; if (s->bcj_active) { if (s->temp.size - s->temp.pos < 2) return XZ_OPTIONS_ERROR; ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); if (ret != XZ_OK) return ret; /* * We don't support custom start offset, * so Size of Properties must be zero. */ if (s->temp.buf[s->temp.pos++] != 0x00) return XZ_OPTIONS_ERROR; } #endif /* Valid Filter Flags always take at least two bytes. */ if (s->temp.size - s->temp.pos < 2) return XZ_DATA_ERROR; /* Filter ID = LZMA2 */ if (s->temp.buf[s->temp.pos++] != 0x21) return XZ_OPTIONS_ERROR; /* Size of Properties = 1-byte Filter Properties */ if (s->temp.buf[s->temp.pos++] != 0x01) return XZ_OPTIONS_ERROR; /* Filter Properties contains LZMA2 dictionary size. */ if (s->temp.size - s->temp.pos < 1) return XZ_DATA_ERROR; ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); if (ret != XZ_OK) return ret; /* The rest must be Header Padding. */ while (s->temp.pos < s->temp.size) if (s->temp.buf[s->temp.pos++] != 0x00) return XZ_OPTIONS_ERROR; s->temp.pos = 0; s->block.compressed = 0; s->block.uncompressed = 0; return XZ_OK; } static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) { enum xz_ret ret; /* * Store the start position for the case when we are in the middle * of the Index field. */ s->in_start = b->in_pos; while (true) { switch (s->sequence) { case SEQ_STREAM_HEADER: /* * Stream Header is copied to s->temp, and then * decoded from there. This way if the caller * gives us only little input at a time, we can * still keep the Stream Header decoding code * simple. Similar approach is used in many places * in this file. */ if (!fill_temp(s, b)) return XZ_OK; /* * If dec_stream_header() returns * XZ_UNSUPPORTED_CHECK, it is still possible * to continue decoding if working in multi-call * mode. Thus, update s->sequence before calling * dec_stream_header(). */ s->sequence = SEQ_BLOCK_START; ret = dec_stream_header(s); if (ret != XZ_OK) return ret; fallthrough; case SEQ_BLOCK_START: /* We need one byte of input to continue. */ if (b->in_pos == b->in_size) return XZ_OK; /* See if this is the beginning of the Index field. */ if (b->in[b->in_pos] == 0) { s->in_start = b->in_pos++; s->sequence = SEQ_INDEX; break; } /* * Calculate the size of the Block Header and * prepare to decode it. */ s->block_header.size = ((uint32_t)b->in[b->in_pos] + 1) * 4; s->temp.size = s->block_header.size; s->temp.pos = 0; s->sequence = SEQ_BLOCK_HEADER; fallthrough; case SEQ_BLOCK_HEADER: if (!fill_temp(s, b)) return XZ_OK; ret = dec_block_header(s); if (ret != XZ_OK) return ret; s->sequence = SEQ_BLOCK_UNCOMPRESS; fallthrough; case SEQ_BLOCK_UNCOMPRESS: ret = dec_block(s, b); if (ret != XZ_STREAM_END) return ret; s->sequence = SEQ_BLOCK_PADDING; fallthrough; case SEQ_BLOCK_PADDING: /* * Size of Compressed Data + Block Padding * must be a multiple of four. We don't need * s->block.compressed for anything else * anymore, so we use it here to test the size * of the Block Padding field. */ while (s->block.compressed & 3) { if (b->in_pos == b->in_size) return XZ_OK; if (b->in[b->in_pos++] != 0) return XZ_DATA_ERROR; ++s->block.compressed; } s->sequence = SEQ_BLOCK_CHECK; fallthrough; case SEQ_BLOCK_CHECK: if (s->check_type == XZ_CHECK_CRC32) { ret = crc32_validate(s, b); if (ret != XZ_STREAM_END) return ret; } #ifdef XZ_DEC_ANY_CHECK else if (!check_skip(s, b)) { return XZ_OK; } #endif s->sequence = SEQ_BLOCK_START; break; case SEQ_INDEX: ret = dec_index(s, b); if (ret != XZ_STREAM_END) return ret; s->sequence = SEQ_INDEX_PADDING; fallthrough; case SEQ_INDEX_PADDING: while ((s->index.size + (b->in_pos - s->in_start)) & 3) { if (b->in_pos == b->in_size) { index_update(s, b); return XZ_OK; } if (b->in[b->in_pos++] != 0) return XZ_DATA_ERROR; } /* Finish the CRC32 value and Index size. */ index_update(s, b); /* Compare the hashes to validate the Index field. */ if (!memeq(&s->block.hash, &s->index.hash, sizeof(s->block.hash))) return XZ_DATA_ERROR; s->sequence = SEQ_INDEX_CRC32; fallthrough; case SEQ_INDEX_CRC32: ret = crc32_validate(s, b); if (ret != XZ_STREAM_END) return ret; s->temp.size = STREAM_HEADER_SIZE; s->sequence = SEQ_STREAM_FOOTER; fallthrough; case SEQ_STREAM_FOOTER: if (!fill_temp(s, b)) return XZ_OK; return dec_stream_footer(s); } } /* Never reached */ } /* * xz_dec_run() is a wrapper for dec_main() to handle some special cases in * multi-call and single-call decoding. * * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we * are not going to make any progress anymore. This is to prevent the caller * from calling us infinitely when the input file is truncated or otherwise * corrupt. Since zlib-style API allows that the caller fills the input buffer * only when the decoder doesn't produce any new output, we have to be careful * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only * after the second consecutive call to xz_dec_run() that makes no progress. * * In single-call mode, if we couldn't decode everything and no error * occurred, either the input is truncated or the output buffer is too small. * Since we know that the last input byte never produces any output, we know * that if all the input was consumed and decoding wasn't finished, the file * must be corrupt. Otherwise the output buffer has to be too small or the * file is corrupt in a way that decoding it produces too big output. * * If single-call decoding fails, we reset b->in_pos and b->out_pos back to * their original values. This is because with some filter chains there won't * be any valid uncompressed data in the output buffer unless the decoding * actually succeeds (that's the price to pay of using the output buffer as * the workspace). */ enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) { size_t in_start; size_t out_start; enum xz_ret ret; if (DEC_IS_SINGLE(s->mode)) xz_dec_reset(s); in_start = b->in_pos; out_start = b->out_pos; ret = dec_main(s, b); if (DEC_IS_SINGLE(s->mode)) { if (ret == XZ_OK) ret = b->in_pos == b->in_size ? XZ_DATA_ERROR : XZ_BUF_ERROR; if (ret != XZ_STREAM_END) { b->in_pos = in_start; b->out_pos = out_start; } } else if (ret == XZ_OK && in_start == b->in_pos && out_start == b->out_pos) { if (s->allow_buf_error) ret = XZ_BUF_ERROR; s->allow_buf_error = true; } else { s->allow_buf_error = false; } return ret; } struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) { struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) return NULL; s->mode = mode; #ifdef XZ_DEC_BCJ s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); if (s->bcj == NULL) goto error_bcj; #endif s->lzma2 = xz_dec_lzma2_create(mode, dict_max); if (s->lzma2 == NULL) goto error_lzma2; xz_dec_reset(s); return s; error_lzma2: #ifdef XZ_DEC_BCJ xz_dec_bcj_end(s->bcj); error_bcj: #endif kfree(s); return NULL; } void xz_dec_reset(struct xz_dec *s) { s->sequence = SEQ_STREAM_HEADER; s->allow_buf_error = false; s->pos = 0; s->crc32 = 0; memzero(&s->block, sizeof(s->block)); memzero(&s->index, sizeof(s->index)); s->temp.pos = 0; s->temp.size = STREAM_HEADER_SIZE; } void xz_dec_end(struct xz_dec *s) { if (s != NULL) { xz_dec_lzma2_end(s->lzma2); #ifdef XZ_DEC_BCJ xz_dec_bcj_end(s->bcj); #endif kfree(s); } }
26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 // SPDX-License-Identifier: GPL-2.0-or-later /* Linux driver for Philips webcam Decompression for chipset version 1 (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. */ #include "pwc.h" void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd) { struct pwc_dec1_private *pdec = &pdev->dec1; pdec->version = pdev->release; }
8 8 5 4 4 4 4 4 5 5 5 5 5 5 5 8 50 50 50 50 50 50 50 50 50 50 22 50 55 52 50 49 54 21 20 21 21 4 3 3 3 9 9 10 9 10 6 5 7 6 4 1 3 6 9 1 1 1 1 1 1 4 4 4 4 2 4 4 3 4 1 1 1 1 5 5 3 5 5 5 4 3 1 2 2 1 2 2 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 // SPDX-License-Identifier: GPL-2.0 /* * Some IBSS support code for cfg80211. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2020-2024 Intel Corporation */ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/export.h> #include <net/cfg80211.h> #include "wext-compat.h" #include "nl80211.h" #include "rdev-ops.h" void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return; if (!wdev->u.ibss.ssid_len) return; bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY); if (WARN_ON(!bss)) return; if (wdev->u.ibss.current_bss) { cfg80211_unhold_bss(wdev->u.ibss.current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->u.ibss.current_bss->pub); } cfg80211_hold_bss(bss_from_pub(bss)); wdev->u.ibss.current_bss = bss_from_pub(bss); cfg80211_upload_connect_keys(wdev); nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT memset(&wrqu, 0, sizeof(wrqu)); memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); #endif } void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; trace_cfg80211_ibss_joined(dev, bssid, channel); if (WARN_ON(!channel)) return; ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; ev->type = EVENT_IBSS_JOINED; memcpy(ev->ij.bssid, bssid, ETH_ALEN); ev->ij.channel = channel; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_ibss_joined); int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params, struct cfg80211_cached_keys *connkeys) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; lockdep_assert_held(&rdev->wiphy.mtx); if (wdev->links[0].cac_started) return -EBUSY; if (wdev->u.ibss.ssid_len) return -EALREADY; if (!params->basic_rates) { /* * If no rates were explicitly configured, * use the mandatory rate set for 11b or * 11a for maximum compatibility. */ struct ieee80211_supported_band *sband; enum nl80211_band band; u32 flag; int j; band = params->chandef.chan->band; if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) flag = IEEE80211_RATE_MANDATORY_A; else flag = IEEE80211_RATE_MANDATORY_B; sband = rdev->wiphy.bands[band]; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].flags & flag) params->basic_rates |= BIT(j); } } if (WARN_ON(connkeys && connkeys->def < 0)) return -EINVAL; if (WARN_ON(wdev->connect_keys)) kfree_sensitive(wdev->connect_keys); wdev->connect_keys = connkeys; wdev->u.ibss.chandef = params->chandef; if (connkeys) { params->wep_keys = connkeys->params; params->wep_tx_key = connkeys->def; } #ifdef CONFIG_CFG80211_WEXT wdev->wext.ibss.chandef = params->chandef; #endif err = rdev_join_ibss(rdev, dev, params); if (err) { wdev->connect_keys = NULL; return err; } memcpy(wdev->u.ibss.ssid, params->ssid, params->ssid_len); wdev->u.ibss.ssid_len = params->ssid_len; return 0; } void cfg80211_clear_ibss(struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int i; lockdep_assert_wiphy(wdev->wiphy); kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; rdev_set_qos_map(rdev, dev, NULL); /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. */ if (rdev->ops->del_key) for (i = 0; i < 6; i++) rdev_del_key(rdev, dev, -1, i, false, NULL); if (wdev->u.ibss.current_bss) { cfg80211_unhold_bss(wdev->u.ibss.current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->u.ibss.current_bss->pub); } wdev->u.ibss.current_bss = NULL; wdev->u.ibss.ssid_len = 0; memset(&wdev->u.ibss.chandef, 0, sizeof(wdev->u.ibss.chandef)); #ifdef CONFIG_CFG80211_WEXT if (!nowext) wdev->wext.ibss.ssid_len = 0; #endif cfg80211_sched_dfs_chan_update(rdev); } int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; lockdep_assert_wiphy(wdev->wiphy); if (!wdev->u.ibss.ssid_len) return -ENOLINK; err = rdev_leave_ibss(rdev, dev); if (err) return err; wdev->conn_owner_nlportid = 0; cfg80211_clear_ibss(dev, nowext); return 0; } #ifdef CONFIG_CFG80211_WEXT int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; enum nl80211_band band; int i, err; lockdep_assert_wiphy(wdev->wiphy); if (!wdev->wext.ibss.beacon_interval) wdev->wext.ibss.beacon_interval = 100; /* try to find an IBSS channel if none requested ... */ if (!wdev->wext.ibss.chandef.chan) { struct ieee80211_channel *new_chan = NULL; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; sband = rdev->wiphy.bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; if (chan->flags & IEEE80211_CHAN_NO_IR) continue; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; new_chan = chan; break; } if (new_chan) break; } if (!new_chan) return -EINVAL; cfg80211_chandef_create(&wdev->wext.ibss.chandef, new_chan, NL80211_CHAN_NO_HT); } /* don't join -- SSID is not there */ if (!wdev->wext.ibss.ssid_len) return 0; if (!netif_running(wdev->netdev)) return 0; if (wdev->wext.keys) wdev->wext.keys->def = wdev->wext.default_key; wdev->wext.ibss.privacy = wdev->wext.default_key != -1; if (wdev->wext.keys && wdev->wext.keys->def != -1) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < 4; i++) ck->params[i].key = ck->data[i]; } err = __cfg80211_join_ibss(rdev, wdev->netdev, &wdev->wext.ibss, ck); if (err) kfree(ck); return err; } int cfg80211_ibss_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_NO_IR || chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } if (wdev->wext.ibss.chandef.chan == chan) return 0; err = 0; if (wdev->u.ibss.ssid_len) err = cfg80211_leave_ibss(rdev, dev, true); if (err) return err; if (chan) { cfg80211_chandef_create(&wdev->wext.ibss.chandef, chan, NL80211_CHAN_NO_HT); wdev->wext.ibss.channel_fixed = true; } else { /* cfg80211_ibss_wext_join will pick one if needed */ wdev->wext.ibss.channel_fixed = false; } return cfg80211_ibss_wext_join(rdev, wdev); } int cfg80211_ibss_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (wdev->u.ibss.current_bss) chan = wdev->u.ibss.current_bss->pub.channel; else if (wdev->wext.ibss.chandef.chan) chan = wdev->wext.ibss.chandef.chan; if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_ibss_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); size_t len = data->length; int err; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; err = 0; if (wdev->u.ibss.ssid_len) err = cfg80211_leave_ibss(rdev, dev, true); if (err) return err; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; memcpy(wdev->u.ibss.ssid, ssid, len); wdev->wext.ibss.ssid = wdev->u.ibss.ssid; wdev->wext.ibss.ssid_len = len; return cfg80211_ibss_wext_join(rdev, wdev); } int cfg80211_ibss_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; data->flags = 0; if (wdev->u.ibss.ssid_len) { data->flags = 1; data->length = wdev->u.ibss.ssid_len; memcpy(ssid, wdev->u.ibss.ssid, data->length); } else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) { data->flags = 1; data->length = wdev->wext.ibss.ssid_len; memcpy(ssid, wdev->wext.ibss.ssid, data->length); } return 0; } int cfg80211_ibss_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; if (bssid && !is_valid_ether_addr(bssid)) return -EINVAL; /* both automatic */ if (!bssid && !wdev->wext.ibss.bssid) return 0; /* fixed already - and no change */ if (wdev->wext.ibss.bssid && bssid && ether_addr_equal(bssid, wdev->wext.ibss.bssid)) return 0; err = 0; if (wdev->u.ibss.ssid_len) err = cfg80211_leave_ibss(rdev, dev, true); if (err) return err; if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.ibss.bssid = wdev->wext.bssid; } else wdev->wext.ibss.bssid = NULL; return cfg80211_ibss_wext_join(rdev, wdev); } int cfg80211_ibss_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; if (wdev->u.ibss.current_bss) memcpy(ap_addr->sa_data, wdev->u.ibss.current_bss->pub.bssid, ETH_ALEN); else if (wdev->wext.ibss.bssid) memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN); else eth_zero_addr(ap_addr->sa_data); return 0; } #endif
30 2 2 143 7 7 8 370 868 2 193 185 185 192 9 2 2 9 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NDISC_H #define _NDISC_H #include <net/ipv6_stubs.h> /* * ICMP codes for neighbour discovery messages */ #define NDISC_ROUTER_SOLICITATION 133 #define NDISC_ROUTER_ADVERTISEMENT 134 #define NDISC_NEIGHBOUR_SOLICITATION 135 #define NDISC_NEIGHBOUR_ADVERTISEMENT 136 #define NDISC_REDIRECT 137 /* * Router type: cross-layer information from link-layer to * IPv6 layer reported by certain link types (e.g., RFC4214). */ #define NDISC_NODETYPE_UNSPEC 0 /* unspecified (default) */ #define NDISC_NODETYPE_HOST 1 /* host or unauthorized router */ #define NDISC_NODETYPE_NODEFAULT 2 /* non-default router */ #define NDISC_NODETYPE_DEFAULT 3 /* default router */ /* * ndisc options */ enum { __ND_OPT_PREFIX_INFO_END = 0, ND_OPT_SOURCE_LL_ADDR = 1, /* RFC2461 */ ND_OPT_TARGET_LL_ADDR = 2, /* RFC2461 */ ND_OPT_PREFIX_INFO = 3, /* RFC2461 */ ND_OPT_REDIRECT_HDR = 4, /* RFC2461 */ ND_OPT_MTU = 5, /* RFC2461 */ ND_OPT_NONCE = 14, /* RFC7527 */ __ND_OPT_ARRAY_MAX, ND_OPT_ROUTE_INFO = 24, /* RFC4191 */ ND_OPT_RDNSS = 25, /* RFC5006 */ ND_OPT_DNSSL = 31, /* RFC6106 */ ND_OPT_6CO = 34, /* RFC6775 */ ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */ ND_OPT_PREF64 = 38, /* RFC8781 */ __ND_OPT_MAX }; #define MAX_RTR_SOLICITATION_DELAY HZ #define ND_REACHABLE_TIME (30*HZ) #define ND_RETRANS_TIMER HZ #include <linux/compiler.h> #include <linux/icmpv6.h> #include <linux/in6.h> #include <linux/types.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/hash.h> #include <net/neighbour.h> /* Set to 3 to get tracing... */ #define ND_DEBUG 1 #define ND_PRINTK(val, level, fmt, ...) \ do { \ if (val <= ND_DEBUG) \ net_##level##_ratelimited(fmt, ##__VA_ARGS__); \ } while (0) struct ctl_table; struct inet6_dev; struct net_device; struct net_proto_family; struct sk_buff; struct prefix_info; extern struct neigh_table nd_tbl; struct nd_msg { struct icmp6hdr icmph; struct in6_addr target; __u8 opt[]; }; struct rs_msg { struct icmp6hdr icmph; __u8 opt[]; }; struct ra_msg { struct icmp6hdr icmph; __be32 reachable_time; __be32 retrans_timer; }; struct rd_msg { struct icmp6hdr icmph; struct in6_addr target; struct in6_addr dest; __u8 opt[]; }; struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; } __packed; /* ND options */ struct ndisc_options { struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX]; #ifdef CONFIG_IPV6_ROUTE_INFO struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; #endif struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) struct nd_opt_hdr *nd_802154_opt_array[ND_OPT_TARGET_LL_ADDR + 1]; #endif }; #define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR] #define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR] #define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO] #define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END] #define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR] #define nd_opts_mtu nd_opt_array[ND_OPT_MTU] #define nd_opts_nonce nd_opt_array[ND_OPT_NONCE] #define nd_802154_opts_src_lladdr nd_802154_opt_array[ND_OPT_SOURCE_LL_ADDR] #define nd_802154_opts_tgt_lladdr nd_802154_opt_array[ND_OPT_TARGET_LL_ADDR] #define NDISC_OPT_SPACE(len) (((len)+2+7)&~7) struct ndisc_options *ndisc_parse_options(const struct net_device *dev, u8 *opt, int opt_len, struct ndisc_options *ndopts); void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data, int data_len, int pad); #define NDISC_OPS_REDIRECT_DATA_SPACE 2 /* * This structure defines the hooks for IPv6 neighbour discovery. * The following hooks can be defined; unless noted otherwise, they are * optional and can be filled with a null pointer. * * int (*parse_options)(const struct net_device *dev, * struct nd_opt_hdr *nd_opt, * struct ndisc_options *ndopts): * This function is called while parsing ndisc ops and put each position * as pointer into ndopts. If this function return unequal 0, then this * function took care about the ndisc option, if 0 then the IPv6 ndisc * option parser will take care about that option. * * void (*update)(const struct net_device *dev, struct neighbour *n, * u32 flags, u8 icmp6_type, * const struct ndisc_options *ndopts): * This function is called when IPv6 ndisc updates the neighbour cache * entry. Additional options which can be updated may be previously * parsed by parse_opts callback and accessible over ndopts parameter. * * int (*opt_addr_space)(const struct net_device *dev, u8 icmp6_type, * struct neighbour *neigh, u8 *ha_buf, * u8 **ha): * This function is called when the necessary option space will be * calculated before allocating a skb. The parameters neigh, ha_buf * abd ha are available on NDISC_REDIRECT messages only. * * void (*fill_addr_option)(const struct net_device *dev, * struct sk_buff *skb, u8 icmp6_type, * const u8 *ha): * This function is called when the skb will finally fill the option * fields inside skb. NOTE: this callback should fill the option * fields to the skb which are previously indicated by opt_space * parameter. That means the decision to add such option should * not lost between these two callbacks, e.g. protected by interface * up state. * * void (*prefix_rcv_add_addr)(struct net *net, struct net_device *dev, * const struct prefix_info *pinfo, * struct inet6_dev *in6_dev, * struct in6_addr *addr, * int addr_type, u32 addr_flags, * bool sllao, bool tokenized, * __u32 valid_lft, u32 prefered_lft, * bool dev_addr_generated): * This function is called when a RA messages is received with valid * PIO option fields and an IPv6 address will be added to the interface * for autoconfiguration. The parameter dev_addr_generated reports about * if the address was based on dev->dev_addr or not. This can be used * to add a second address if link-layer operates with two link layer * addresses. E.g. 802.15.4 6LoWPAN. */ struct ndisc_ops { int (*parse_options)(const struct net_device *dev, struct nd_opt_hdr *nd_opt, struct ndisc_options *ndopts); void (*update)(const struct net_device *dev, struct neighbour *n, u32 flags, u8 icmp6_type, const struct ndisc_options *ndopts); int (*opt_addr_space)(const struct net_device *dev, u8 icmp6_type, struct neighbour *neigh, u8 *ha_buf, u8 **ha); void (*fill_addr_option)(const struct net_device *dev, struct sk_buff *skb, u8 icmp6_type, const u8 *ha); void (*prefix_rcv_add_addr)(struct net *net, struct net_device *dev, const struct prefix_info *pinfo, struct inet6_dev *in6_dev, struct in6_addr *addr, int addr_type, u32 addr_flags, bool sllao, bool tokenized, __u32 valid_lft, u32 prefered_lft, bool dev_addr_generated); }; #if IS_ENABLED(CONFIG_IPV6) static inline int ndisc_ops_parse_options(const struct net_device *dev, struct nd_opt_hdr *nd_opt, struct ndisc_options *ndopts) { if (dev->ndisc_ops && dev->ndisc_ops->parse_options) return dev->ndisc_ops->parse_options(dev, nd_opt, ndopts); else return 0; } static inline void ndisc_ops_update(const struct net_device *dev, struct neighbour *n, u32 flags, u8 icmp6_type, const struct ndisc_options *ndopts) { if (dev->ndisc_ops && dev->ndisc_ops->update) dev->ndisc_ops->update(dev, n, flags, icmp6_type, ndopts); } static inline int ndisc_ops_opt_addr_space(const struct net_device *dev, u8 icmp6_type) { if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space && icmp6_type != NDISC_REDIRECT) return dev->ndisc_ops->opt_addr_space(dev, icmp6_type, NULL, NULL, NULL); else return 0; } static inline int ndisc_ops_redirect_opt_addr_space(const struct net_device *dev, struct neighbour *neigh, u8 *ha_buf, u8 **ha) { if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space) return dev->ndisc_ops->opt_addr_space(dev, NDISC_REDIRECT, neigh, ha_buf, ha); else return 0; } static inline void ndisc_ops_fill_addr_option(const struct net_device *dev, struct sk_buff *skb, u8 icmp6_type) { if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option && icmp6_type != NDISC_REDIRECT) dev->ndisc_ops->fill_addr_option(dev, skb, icmp6_type, NULL); } static inline void ndisc_ops_fill_redirect_addr_option(const struct net_device *dev, struct sk_buff *skb, const u8 *ha) { if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option) dev->ndisc_ops->fill_addr_option(dev, skb, NDISC_REDIRECT, ha); } static inline void ndisc_ops_prefix_rcv_add_addr(struct net *net, struct net_device *dev, const struct prefix_info *pinfo, struct inet6_dev *in6_dev, struct in6_addr *addr, int addr_type, u32 addr_flags, bool sllao, bool tokenized, __u32 valid_lft, u32 prefered_lft, bool dev_addr_generated) { if (dev->ndisc_ops && dev->ndisc_ops->prefix_rcv_add_addr) dev->ndisc_ops->prefix_rcv_add_addr(net, dev, pinfo, in6_dev, addr, addr_type, addr_flags, sllao, tokenized, valid_lft, prefered_lft, dev_addr_generated); } #endif /* * Return the padding between the option length and the start of the * link addr. Currently only IP-over-InfiniBand needs this, although * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may * also need a pad of 2. */ static inline int ndisc_addr_option_pad(unsigned short type) { switch (type) { case ARPHRD_INFINIBAND: return 2; default: return 0; } } static inline int __ndisc_opt_addr_space(unsigned char addr_len, int pad) { return NDISC_OPT_SPACE(addr_len + pad); } #if IS_ENABLED(CONFIG_IPV6) static inline int ndisc_opt_addr_space(struct net_device *dev, u8 icmp6_type) { return __ndisc_opt_addr_space(dev->addr_len, ndisc_addr_option_pad(dev->type)) + ndisc_ops_opt_addr_space(dev, icmp6_type); } static inline int ndisc_redirect_opt_addr_space(struct net_device *dev, struct neighbour *neigh, u8 *ops_data_buf, u8 **ops_data) { return __ndisc_opt_addr_space(dev->addr_len, ndisc_addr_option_pad(dev->type)) + ndisc_ops_redirect_opt_addr_space(dev, neigh, ops_data_buf, ops_data); } #endif static inline u8 *__ndisc_opt_addr_data(struct nd_opt_hdr *p, unsigned char addr_len, int prepad) { u8 *lladdr = (u8 *)(p + 1); int lladdrlen = p->nd_opt_len << 3; if (lladdrlen != __ndisc_opt_addr_space(addr_len, prepad)) return NULL; return lladdr + prepad; } static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p, struct net_device *dev) { return __ndisc_opt_addr_data(p, dev->addr_len, ndisc_addr_option_pad(dev->type)); } static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) { const u32 *p32 = pkey; return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) + (p32[1] * hash_rnd[1]) + (p32[2] * hash_rnd[2]) + (p32[3] * hash_rnd[3])); } static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey) { return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev); } static inline struct neighbour *__ipv6_neigh_lookup_noref_stub(struct net_device *dev, const void *pkey) { return ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev); } static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey) { struct neighbour *n; rcu_read_lock(); n = __ipv6_neigh_lookup_noref(dev, pkey); if (n && !refcount_inc_not_zero(&n->refcnt)) n = NULL; rcu_read_unlock(); return n; } static inline void __ipv6_confirm_neigh(struct net_device *dev, const void *pkey) { struct neighbour *n; rcu_read_lock(); n = __ipv6_neigh_lookup_noref(dev, pkey); neigh_confirm(n); rcu_read_unlock(); } static inline void __ipv6_confirm_neigh_stub(struct net_device *dev, const void *pkey) { struct neighbour *n; rcu_read_lock(); n = __ipv6_neigh_lookup_noref_stub(dev, pkey); neigh_confirm(n); rcu_read_unlock(); } /* uses ipv6_stub and is meant for use outside of IPv6 core */ static inline struct neighbour *ip_neigh_gw6(struct net_device *dev, const void *addr) { struct neighbour *neigh; neigh = __ipv6_neigh_lookup_noref_stub(dev, addr); if (unlikely(!neigh)) neigh = __neigh_create(ipv6_stub->nd_tbl, addr, dev, false); return neigh; } int ndisc_init(void); int ndisc_late_init(void); void ndisc_late_cleanup(void); void ndisc_cleanup(void); enum skb_drop_reason ndisc_rcv(struct sk_buff *skb); struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit, const struct in6_addr *saddr, u64 nonce); void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, const struct in6_addr *daddr, const struct in6_addr *saddr, u64 nonce); void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr, const struct in6_addr *saddr); void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr); void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, bool router, bool solicited, bool override, bool inc_opt); void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target); int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir); void ndisc_update(const struct net_device *dev, struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, u8 icmp6_type, struct ndisc_options *ndopts); /* * IGMP */ int igmp6_init(void); int igmp6_late_init(void); void igmp6_cleanup(void); void igmp6_late_cleanup(void); void igmp6_event_query(struct sk_buff *skb); void igmp6_event_report(struct sk_buff *skb); #ifdef CONFIG_SYSCTL int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); #endif void inet6_ifinfo_notify(int event, struct inet6_dev *idev); #endif
3 3 3 3 2 4 4 3 3 2 1 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021 Hans de Goede <hdegoede@redhat.com> * * Driver for the LetSketch / VSON WP9620N drawing tablet. * This drawing tablet is also sold under other brand names such as Case U, * presumably this driver will work for all of them. But it has only been * tested with a LetSketch WP9620N model. * * These tablets also work without a special HID driver, but then only part * of the active area works and both the pad and stylus buttons are hardwired * to special key-combos. E.g. the 2 stylus buttons send right mouse clicks / * resp. "e" key presses. * * This device has 4 USB interfaces: * * Interface 0 EP 0x81 bootclass mouse, rdesc len 18, report id 0x08, * Application(ff00.0001) * This interface sends raw event input reports in a custom format, but only * after doing the special dance from letsketch_probe(). After enabling this * interface the other 3 interfaces are disabled. * * Interface 1 EP 0x82 bootclass mouse, rdesc len 83, report id 0x0a, Tablet * This interface sends absolute events for the pen, including pressure, * but only for some part of the active area due to special "aspect ratio" * correction and only half by default since it assumes it will be used * with a phone in portraid mode, while using the tablet in landscape mode. * Also stylus + pad button events are not reported here. * * Interface 2 EP 0x83 bootclass keybd, rdesc len 64, report id none, Std Kbd * This interfaces send various hard-coded key-combos for the pad buttons * and "e" keypresses for the 2nd stylus button * * Interface 3 EP 0x84 bootclass mouse, rdesc len 75, report id 0x01, Std Mouse * This reports right-click mouse-button events for the 1st stylus button */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/usb.h> #include <linux/unaligned.h> #include "hid-ids.h" #define LETSKETCH_RAW_IF 0 #define LETSKETCH_RAW_DATA_LEN 12 #define LETSKETCH_RAW_REPORT_ID 8 #define LETSKETCH_PAD_BUTTONS 5 #define LETSKETCH_INFO_STR_IDX_BEGIN 0xc8 #define LETSKETCH_INFO_STR_IDX_END 0xca #define LETSKETCH_GET_STRING_RETRIES 5 struct letsketch_data { struct hid_device *hdev; struct input_dev *input_tablet; struct input_dev *input_tablet_pad; struct timer_list inrange_timer; }; static int letsketch_open(struct input_dev *dev) { struct letsketch_data *data = input_get_drvdata(dev); return hid_hw_open(data->hdev); } static void letsketch_close(struct input_dev *dev) { struct letsketch_data *data = input_get_drvdata(dev); hid_hw_close(data->hdev); } static struct input_dev *letsketch_alloc_input_dev(struct letsketch_data *data) { struct input_dev *input; input = devm_input_allocate_device(&data->hdev->dev); if (!input) return NULL; input->id.bustype = data->hdev->bus; input->id.vendor = data->hdev->vendor; input->id.product = data->hdev->product; input->id.version = data->hdev->bus; input->phys = data->hdev->phys; input->uniq = data->hdev->uniq; input->open = letsketch_open; input->close = letsketch_close; input_set_drvdata(input, data); return input; } static int letsketch_setup_input_tablet(struct letsketch_data *data) { struct input_dev *input; input = letsketch_alloc_input_dev(data); if (!input) return -ENOMEM; input_set_abs_params(input, ABS_X, 0, 50800, 0, 0); input_set_abs_params(input, ABS_Y, 0, 31750, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, 8192, 0, 0); input_abs_set_res(input, ABS_X, 240); input_abs_set_res(input, ABS_Y, 225); input_set_capability(input, EV_KEY, BTN_TOUCH); input_set_capability(input, EV_KEY, BTN_TOOL_PEN); input_set_capability(input, EV_KEY, BTN_STYLUS); input_set_capability(input, EV_KEY, BTN_STYLUS2); /* All known brands selling this tablet use WP9620[N] as model name */ input->name = "WP9620 Tablet"; data->input_tablet = input; return input_register_device(data->input_tablet); } static int letsketch_setup_input_tablet_pad(struct letsketch_data *data) { struct input_dev *input; int i; input = letsketch_alloc_input_dev(data); if (!input) return -ENOMEM; for (i = 0; i < LETSKETCH_PAD_BUTTONS; i++) input_set_capability(input, EV_KEY, BTN_0 + i); /* * These are never send on the pad input_dev, but must be set * on the Pad to make udev / libwacom happy. */ input_set_abs_params(input, ABS_X, 0, 1, 0, 0); input_set_abs_params(input, ABS_Y, 0, 1, 0, 0); input_set_capability(input, EV_KEY, BTN_STYLUS); input->name = "WP9620 Pad"; data->input_tablet_pad = input; return input_register_device(data->input_tablet_pad); } static void letsketch_inrange_timeout(struct timer_list *t) { struct letsketch_data *data = from_timer(data, t, inrange_timer); struct input_dev *input = data->input_tablet; input_report_key(input, BTN_TOOL_PEN, 0); input_sync(input); } static int letsketch_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int size) { struct letsketch_data *data = hid_get_drvdata(hdev); struct input_dev *input; int i; if (size != LETSKETCH_RAW_DATA_LEN || raw_data[0] != LETSKETCH_RAW_REPORT_ID) return 0; switch (raw_data[1] & 0xf0) { case 0x80: /* Pen data */ input = data->input_tablet; input_report_key(input, BTN_TOOL_PEN, 1); input_report_key(input, BTN_TOUCH, raw_data[1] & 0x01); input_report_key(input, BTN_STYLUS, raw_data[1] & 0x02); input_report_key(input, BTN_STYLUS2, raw_data[1] & 0x04); input_report_abs(input, ABS_X, get_unaligned_le16(raw_data + 2)); input_report_abs(input, ABS_Y, get_unaligned_le16(raw_data + 4)); input_report_abs(input, ABS_PRESSURE, get_unaligned_le16(raw_data + 6)); /* * There is no out of range event, so use a timer for this * when in range we get an event approx. every 8 ms. */ mod_timer(&data->inrange_timer, jiffies + msecs_to_jiffies(100)); break; case 0xe0: /* Pad data */ input = data->input_tablet_pad; for (i = 0; i < LETSKETCH_PAD_BUTTONS; i++) input_report_key(input, BTN_0 + i, raw_data[4] == (i + 1)); break; default: hid_warn(data->hdev, "Warning unknown data header: 0x%02x\n", raw_data[0]); return 0; } input_sync(input); return 0; } /* * The tablets magic handshake to put it in raw mode relies on getting * string descriptors. But the firmware is buggy and does not like it if * we do this too fast. Even if we go slow sometimes the usb_string() call * fails. Ignore errors and retry it a couple of times if necessary. */ static int letsketch_get_string(struct usb_device *udev, int index, char *buf, int size) { int i, ret; for (i = 0; i < LETSKETCH_GET_STRING_RETRIES; i++) { usleep_range(5000, 7000); ret = usb_string(udev, index, buf, size); if (ret > 0) return 0; } dev_err(&udev->dev, "Max retries (%d) exceeded reading string descriptor %d\n", LETSKETCH_GET_STRING_RETRIES, index); return ret ? ret : -EIO; } static int letsketch_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct device *dev = &hdev->dev; struct letsketch_data *data; struct usb_interface *intf; struct usb_device *udev; char buf[256]; int i, ret; if (!hid_is_usb(hdev)) return -ENODEV; intf = to_usb_interface(hdev->dev.parent); if (intf->altsetting->desc.bInterfaceNumber != LETSKETCH_RAW_IF) return -ENODEV; /* Ignore the other interfaces */ udev = interface_to_usbdev(intf); /* * Instead of using a set-feature request, or even a custom USB ctrl * message the tablet needs this elaborate magic reading of USB * string descriptors to kick it into raw mode. This is what the * Windows drivers are seen doing in an USB trace under Windows. */ for (i = LETSKETCH_INFO_STR_IDX_BEGIN; i <= LETSKETCH_INFO_STR_IDX_END; i++) { ret = letsketch_get_string(udev, i, buf, sizeof(buf)); if (ret) return ret; hid_info(hdev, "Device info: %s\n", buf); } for (i = 1; i <= 250; i++) { ret = letsketch_get_string(udev, i, buf, sizeof(buf)); if (ret) return ret; } ret = letsketch_get_string(udev, 0x64, buf, sizeof(buf)); if (ret) return ret; ret = letsketch_get_string(udev, LETSKETCH_INFO_STR_IDX_BEGIN, buf, sizeof(buf)); if (ret) return ret; /* * The tablet should be in raw mode now, end with a final delay before * doing further IO to the device. */ usleep_range(5000, 7000); ret = hid_parse(hdev); if (ret) return ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->hdev = hdev; timer_setup(&data->inrange_timer, letsketch_inrange_timeout, 0); hid_set_drvdata(hdev, data); ret = letsketch_setup_input_tablet(data); if (ret) return ret; ret = letsketch_setup_input_tablet_pad(data); if (ret) return ret; return hid_hw_start(hdev, HID_CONNECT_HIDRAW); } static const struct hid_device_id letsketch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LETSKETCH, USB_DEVICE_ID_WP9620N) }, { } }; MODULE_DEVICE_TABLE(hid, letsketch_devices); static struct hid_driver letsketch_driver = { .name = "letsketch", .id_table = letsketch_devices, .probe = letsketch_probe, .raw_event = letsketch_raw_event, }; module_hid_driver(letsketch_driver); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Driver for the LetSketch / VSON WP9620N drawing tablet"); MODULE_LICENSE("GPL");
247 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2019 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> */ #ifndef __XFS_HEALTH_H__ #define __XFS_HEALTH_H__ struct xfs_group; /* * In-Core Filesystem Health Assessments * ===================================== * * We'd like to be able to summarize the current health status of the * filesystem so that the administrator knows when it's necessary to schedule * some downtime for repairs. Until then, we would also like to avoid abrupt * shutdowns due to corrupt metadata. * * The online scrub feature evaluates the health of all filesystem metadata. * When scrub detects corruption in a piece of metadata it will set the * corresponding sickness flag, and repair will clear it if successful. If * problems remain at unmount time, we can also request manual intervention by * logging a notice to run xfs_repair. * * Each health tracking group uses a pair of fields for reporting. The * "checked" field tell us if a given piece of metadata has ever been examined, * and the "sick" field tells us if that piece was found to need repairs. * Therefore we can conclude that for a given sick flag value: * * - checked && sick => metadata needs repair * - checked && !sick => metadata is ok * - !checked && sick => errors have been observed during normal operation, * but the metadata has not been checked thoroughly * - !checked && !sick => has not been examined since mount * * Evidence of health problems can be sorted into three basic categories: * * a) Primary evidence, which signals that something is defective within the * general grouping of metadata. * * b) Secondary evidence, which are side effects of primary problem but are * not themselves problems. These can be forgotten when the primary * health problems are addressed. * * c) Indirect evidence, which points to something being wrong in another * group, but we had to release resources and this is all that's left of * that state. */ struct xfs_mount; struct xfs_perag; struct xfs_inode; struct xfs_fsop_geom; struct xfs_btree_cur; struct xfs_da_args; struct xfs_rtgroup; /* Observable health issues for metadata spanning the entire filesystem. */ #define XFS_SICK_FS_COUNTERS (1 << 0) /* summary counters */ #define XFS_SICK_FS_UQUOTA (1 << 1) /* user quota */ #define XFS_SICK_FS_GQUOTA (1 << 2) /* group quota */ #define XFS_SICK_FS_PQUOTA (1 << 3) /* project quota */ #define XFS_SICK_FS_QUOTACHECK (1 << 4) /* quota counts */ #define XFS_SICK_FS_NLINKS (1 << 5) /* inode link counts */ #define XFS_SICK_FS_METADIR (1 << 6) /* metadata directory tree */ #define XFS_SICK_FS_METAPATH (1 << 7) /* metadata directory tree path */ /* Observable health issues for realtime group metadata. */ #define XFS_SICK_RG_SUPER (1 << 0) /* rt group superblock */ #define XFS_SICK_RG_BITMAP (1 << 1) /* rt group bitmap */ #define XFS_SICK_RG_SUMMARY (1 << 2) /* rt groups summary */ #define XFS_SICK_RG_RMAPBT (1 << 3) /* reverse mappings */ #define XFS_SICK_RG_REFCNTBT (1 << 4) /* reference counts */ /* Observable health issues for AG metadata. */ #define XFS_SICK_AG_SB (1 << 0) /* superblock */ #define XFS_SICK_AG_AGF (1 << 1) /* AGF header */ #define XFS_SICK_AG_AGFL (1 << 2) /* AGFL header */ #define XFS_SICK_AG_AGI (1 << 3) /* AGI header */ #define XFS_SICK_AG_BNOBT (1 << 4) /* free space by block */ #define XFS_SICK_AG_CNTBT (1 << 5) /* free space by length */ #define XFS_SICK_AG_INOBT (1 << 6) /* inode index */ #define XFS_SICK_AG_FINOBT (1 << 7) /* free inode index */ #define XFS_SICK_AG_RMAPBT (1 << 8) /* reverse mappings */ #define XFS_SICK_AG_REFCNTBT (1 << 9) /* reference counts */ #define XFS_SICK_AG_INODES (1 << 10) /* inactivated bad inodes */ /* Observable health issues for inode metadata. */ #define XFS_SICK_INO_CORE (1 << 0) /* inode core */ #define XFS_SICK_INO_BMBTD (1 << 1) /* data fork */ #define XFS_SICK_INO_BMBTA (1 << 2) /* attr fork */ #define XFS_SICK_INO_BMBTC (1 << 3) /* cow fork */ #define XFS_SICK_INO_DIR (1 << 4) /* directory */ #define XFS_SICK_INO_XATTR (1 << 5) /* extended attributes */ #define XFS_SICK_INO_SYMLINK (1 << 6) /* symbolic link remote target */ #define XFS_SICK_INO_PARENT (1 << 7) /* parent pointers */ #define XFS_SICK_INO_BMBTD_ZAPPED (1 << 8) /* data fork erased */ #define XFS_SICK_INO_BMBTA_ZAPPED (1 << 9) /* attr fork erased */ #define XFS_SICK_INO_DIR_ZAPPED (1 << 10) /* directory erased */ #define XFS_SICK_INO_SYMLINK_ZAPPED (1 << 11) /* symlink erased */ /* Don't propagate sick status to ag health summary during inactivation */ #define XFS_SICK_INO_FORGET (1 << 12) #define XFS_SICK_INO_DIRTREE (1 << 13) /* directory tree structure */ /* Primary evidence of health problems in a given group. */ #define XFS_SICK_FS_PRIMARY (XFS_SICK_FS_COUNTERS | \ XFS_SICK_FS_UQUOTA | \ XFS_SICK_FS_GQUOTA | \ XFS_SICK_FS_PQUOTA | \ XFS_SICK_FS_QUOTACHECK | \ XFS_SICK_FS_NLINKS | \ XFS_SICK_FS_METADIR | \ XFS_SICK_FS_METAPATH) #define XFS_SICK_RG_PRIMARY (XFS_SICK_RG_SUPER | \ XFS_SICK_RG_BITMAP | \ XFS_SICK_RG_SUMMARY | \ XFS_SICK_RG_RMAPBT | \ XFS_SICK_RG_REFCNTBT) #define XFS_SICK_AG_PRIMARY (XFS_SICK_AG_SB | \ XFS_SICK_AG_AGF | \ XFS_SICK_AG_AGFL | \ XFS_SICK_AG_AGI | \ XFS_SICK_AG_BNOBT | \ XFS_SICK_AG_CNTBT | \ XFS_SICK_AG_INOBT | \ XFS_SICK_AG_FINOBT | \ XFS_SICK_AG_RMAPBT | \ XFS_SICK_AG_REFCNTBT) #define XFS_SICK_INO_PRIMARY (XFS_SICK_INO_CORE | \ XFS_SICK_INO_BMBTD | \ XFS_SICK_INO_BMBTA | \ XFS_SICK_INO_BMBTC | \ XFS_SICK_INO_DIR | \ XFS_SICK_INO_XATTR | \ XFS_SICK_INO_SYMLINK | \ XFS_SICK_INO_PARENT | \ XFS_SICK_INO_DIRTREE) #define XFS_SICK_INO_ZAPPED (XFS_SICK_INO_BMBTD_ZAPPED | \ XFS_SICK_INO_BMBTA_ZAPPED | \ XFS_SICK_INO_DIR_ZAPPED | \ XFS_SICK_INO_SYMLINK_ZAPPED) /* Secondary state related to (but not primary evidence of) health problems. */ #define XFS_SICK_FS_SECONDARY (0) #define XFS_SICK_RG_SECONDARY (0) #define XFS_SICK_AG_SECONDARY (0) #define XFS_SICK_INO_SECONDARY (XFS_SICK_INO_FORGET) /* Evidence of health problems elsewhere. */ #define XFS_SICK_FS_INDIRECT (0) #define XFS_SICK_RG_INDIRECT (0) #define XFS_SICK_AG_INDIRECT (XFS_SICK_AG_INODES) #define XFS_SICK_INO_INDIRECT (0) /* All health masks. */ #define XFS_SICK_FS_ALL (XFS_SICK_FS_PRIMARY | \ XFS_SICK_FS_SECONDARY | \ XFS_SICK_FS_INDIRECT) #define XFS_SICK_RG_ALL (XFS_SICK_RG_PRIMARY | \ XFS_SICK_RG_SECONDARY | \ XFS_SICK_RG_INDIRECT) #define XFS_SICK_AG_ALL (XFS_SICK_AG_PRIMARY | \ XFS_SICK_AG_SECONDARY | \ XFS_SICK_AG_INDIRECT) #define XFS_SICK_INO_ALL (XFS_SICK_INO_PRIMARY | \ XFS_SICK_INO_SECONDARY | \ XFS_SICK_INO_INDIRECT | \ XFS_SICK_INO_ZAPPED) /* * These functions must be provided by the xfs implementation. Function * behavior with respect to the first argument should be as follows: * * xfs_*_mark_sick: Set the sick flags and do not set checked flags. * Runtime code should call this upon encountering * a corruption. * * xfs_*_mark_corrupt: Set the sick and checked flags simultaneously. * Fsck tools should call this when corruption is * found. * * xfs_*_mark_healthy: Clear the sick flags and set the checked flags. * Fsck tools should call this after correcting errors. * * xfs_*_measure_sickness: Return the sick and check status in the provided * out parameters. */ void xfs_fs_mark_sick(struct xfs_mount *mp, unsigned int mask); void xfs_fs_mark_corrupt(struct xfs_mount *mp, unsigned int mask); void xfs_fs_mark_healthy(struct xfs_mount *mp, unsigned int mask); void xfs_fs_measure_sickness(struct xfs_mount *mp, unsigned int *sick, unsigned int *checked); void xfs_rgno_mark_sick(struct xfs_mount *mp, xfs_rgnumber_t rgno, unsigned int mask); void xfs_agno_mark_sick(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int mask); void xfs_group_mark_sick(struct xfs_group *xg, unsigned int mask); #define xfs_ag_mark_sick(pag, mask) \ xfs_group_mark_sick(pag_group(pag), (mask)) void xfs_group_mark_corrupt(struct xfs_group *xg, unsigned int mask); void xfs_group_mark_healthy(struct xfs_group *xg, unsigned int mask); void xfs_group_measure_sickness(struct xfs_group *xg, unsigned int *sick, unsigned int *checked); void xfs_inode_mark_sick(struct xfs_inode *ip, unsigned int mask); void xfs_inode_mark_corrupt(struct xfs_inode *ip, unsigned int mask); void xfs_inode_mark_healthy(struct xfs_inode *ip, unsigned int mask); void xfs_inode_measure_sickness(struct xfs_inode *ip, unsigned int *sick, unsigned int *checked); void xfs_health_unmount(struct xfs_mount *mp); void xfs_bmap_mark_sick(struct xfs_inode *ip, int whichfork); void xfs_btree_mark_sick(struct xfs_btree_cur *cur); void xfs_dirattr_mark_sick(struct xfs_inode *ip, int whichfork); void xfs_da_mark_sick(struct xfs_da_args *args); /* Now some helpers. */ static inline bool xfs_fs_has_sickness(struct xfs_mount *mp, unsigned int mask) { unsigned int sick, checked; xfs_fs_measure_sickness(mp, &sick, &checked); return sick & mask; } static inline bool xfs_group_has_sickness( struct xfs_group *xg, unsigned int mask) { unsigned int sick, checked; xfs_group_measure_sickness(xg, &sick, &checked); return sick & mask; } #define xfs_ag_has_sickness(pag, mask) \ xfs_group_has_sickness(pag_group(pag), (mask)) #define xfs_ag_is_healthy(pag) \ (!xfs_ag_has_sickness((pag), UINT_MAX)) #define xfs_rtgroup_has_sickness(rtg, mask) \ xfs_group_has_sickness(rtg_group(rtg), (mask)) #define xfs_rtgroup_is_healthy(rtg) \ (!xfs_rtgroup_has_sickness((rtg), UINT_MAX)) static inline bool xfs_inode_has_sickness(struct xfs_inode *ip, unsigned int mask) { unsigned int sick, checked; xfs_inode_measure_sickness(ip, &sick, &checked); return sick & mask; } static inline bool xfs_fs_is_healthy(struct xfs_mount *mp) { return !xfs_fs_has_sickness(mp, -1U); } static inline bool xfs_inode_is_healthy(struct xfs_inode *ip) { return !xfs_inode_has_sickness(ip, -1U); } void xfs_fsop_geom_health(struct xfs_mount *mp, struct xfs_fsop_geom *geo); void xfs_ag_geom_health(struct xfs_perag *pag, struct xfs_ag_geometry *ageo); void xfs_rtgroup_geom_health(struct xfs_rtgroup *rtg, struct xfs_rtgroup_geometry *rgeo); void xfs_bulkstat_health(struct xfs_inode *ip, struct xfs_bulkstat *bs); #define xfs_metadata_is_sick(error) \ (unlikely((error) == -EFSCORRUPTED || (error) == -EFSBADCRC)) #endif /* __XFS_HEALTH_H__ */
42044 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 // SPDX-License-Identifier: GPL-2.0-only #include <linux/fault-inject.h> #include <linux/fault-inject-usercopy.h> static struct { struct fault_attr attr; } fail_usercopy = { .attr = FAULT_ATTR_INITIALIZER, }; static int __init setup_fail_usercopy(char *str) { return setup_fault_attr(&fail_usercopy.attr, str); } __setup("fail_usercopy=", setup_fail_usercopy); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init fail_usercopy_debugfs(void) { struct dentry *dir; dir = fault_create_debugfs_attr("fail_usercopy", NULL, &fail_usercopy.attr); if (IS_ERR(dir)) return PTR_ERR(dir); return 0; } late_initcall(fail_usercopy_debugfs); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ bool should_fail_usercopy(void) { return should_fail(&fail_usercopy.attr, 1); } EXPORT_SYMBOL_GPL(should_fail_usercopy);
160 1 7 7 7 7 7 7 7 14 10 14 1 1 1 1 1 1 1 1 1 23 7 7 7 7 7 147 143 141 7 150 150 1 146 149 7 148 149 142 7 9 148 352 351 351 332 351 15 15 15 14 15 15 9 9 9 9 9 9 9 9 111 10 10 10 10 10 10 10 5 9 9 9 9 9 9 9 5 5 5 5 5 5 5 5 5 5 4 1 1 7 1 1 7 1 4 45 45 43 42 4 9 9 9 9 9 9 9 9 1 9 9 9 9 9 9 2 9 26 25 25 25 15 25 26 4 72 51 71 2 72 4 72 24 24 24 23 24 24 23 347 323 345 63 59 9 62 20 5 20 4 4 4 4 27 26 27 9 6 9 3 9 9 9 9 9 9 1 5 5 3 2 3 3 2 9 11 11 11 24 30 4 3 4 27 27 23 20 18 20 19 19 1 24 24 16 13 13 24 24 24 13 2 24 23 22 21 22 1 21 22 20 20 20 24 3 1 24 24 288 288 25 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 // SPDX-License-Identifier: GPL-2.0-only /* * drivers/net/veth.c * * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc * * Author: Pavel Emelianov <xemul@openvz.org> * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> * */ #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/u64_stats_sync.h> #include <net/rtnetlink.h> #include <net/dst.h> #include <net/netdev_lock.h> #include <net/xfrm.h> #include <net/xdp.h> #include <linux/veth.h> #include <linux/module.h> #include <linux/bpf.h> #include <linux/filter.h> #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> #include <linux/net_tstamp.h> #include <linux/skbuff_ref.h> #include <net/page_pool/helpers.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" #define VETH_XDP_FLAG BIT(0) #define VETH_RING_SIZE 256 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) #define VETH_XDP_TX_BULK_SIZE 16 #define VETH_XDP_BATCH 16 struct veth_stats { u64 rx_drops; /* xdp */ u64 xdp_packets; u64 xdp_bytes; u64 xdp_redirect; u64 xdp_drops; u64 xdp_tx; u64 xdp_tx_err; u64 peer_tq_xdp_xmit; u64 peer_tq_xdp_xmit_err; }; struct veth_rq_stats { struct veth_stats vs; struct u64_stats_sync syncp; }; struct veth_rq { struct napi_struct xdp_napi; struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */ struct net_device *dev; struct bpf_prog __rcu *xdp_prog; struct xdp_mem_info xdp_mem; struct veth_rq_stats stats; bool rx_notify_masked; struct ptr_ring xdp_ring; struct xdp_rxq_info xdp_rxq; struct page_pool *page_pool; }; struct veth_priv { struct net_device __rcu *peer; atomic64_t dropped; struct bpf_prog *_xdp_prog; struct veth_rq *rq; unsigned int requested_headroom; }; struct veth_xdp_tx_bq { struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE]; unsigned int count; }; /* * ethtool interface */ struct veth_q_stat_desc { char desc[ETH_GSTRING_LEN]; size_t offset; }; #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m) static const struct veth_q_stat_desc veth_rq_stats_desc[] = { { "xdp_packets", VETH_RQ_STAT(xdp_packets) }, { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) }, { "drops", VETH_RQ_STAT(rx_drops) }, { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) }, { "xdp_drops", VETH_RQ_STAT(xdp_drops) }, { "xdp_tx", VETH_RQ_STAT(xdp_tx) }, { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) }, }; #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc) static const struct veth_q_stat_desc veth_tq_stats_desc[] = { { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) }, { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) }, }; #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc) static struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "peer_ifindex" }, }; struct veth_xdp_buff { struct xdp_buff xdp; struct sk_buff *skb; }; static int veth_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_TP; cmd->base.autoneg = AUTONEG_DISABLE; return 0; } static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); } static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { u8 *p = buf; int i, j; switch(stringset) { case ETH_SS_STATS: memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); p += sizeof(ethtool_stats_keys); for (i = 0; i < dev->real_num_rx_queues; i++) for (j = 0; j < VETH_RQ_STATS_LEN; j++) ethtool_sprintf(&p, "rx_queue_%u_%.18s", i, veth_rq_stats_desc[j].desc); for (i = 0; i < dev->real_num_tx_queues; i++) for (j = 0; j < VETH_TQ_STATS_LEN; j++) ethtool_sprintf(&p, "tx_queue_%u_%.18s", i, veth_tq_stats_desc[j].desc); page_pool_ethtool_stats_get_strings(p); break; } } static int veth_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys) + VETH_RQ_STATS_LEN * dev->real_num_rx_queues + VETH_TQ_STATS_LEN * dev->real_num_tx_queues + page_pool_ethtool_stats_get_count(); default: return -EOPNOTSUPP; } } static void veth_get_page_pool_stats(struct net_device *dev, u64 *data) { #ifdef CONFIG_PAGE_POOL_STATS struct veth_priv *priv = netdev_priv(dev); struct page_pool_stats pp_stats = {}; int i; for (i = 0; i < dev->real_num_rx_queues; i++) { if (!priv->rq[i].page_pool) continue; page_pool_get_stats(priv->rq[i].page_pool, &pp_stats); } page_pool_ethtool_stats_get(data, &pp_stats); #endif /* CONFIG_PAGE_POOL_STATS */ } static void veth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); int i, j, idx, pp_idx; data[0] = peer ? peer->ifindex : 0; idx = 1; for (i = 0; i < dev->real_num_rx_queues; i++) { const struct veth_rq_stats *rq_stats = &priv->rq[i].stats; const void *stats_base = (void *)&rq_stats->vs; unsigned int start; size_t offset; do { start = u64_stats_fetch_begin(&rq_stats->syncp); for (j = 0; j < VETH_RQ_STATS_LEN; j++) { offset = veth_rq_stats_desc[j].offset; data[idx + j] = *(u64 *)(stats_base + offset); } } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); idx += VETH_RQ_STATS_LEN; } pp_idx = idx; if (!peer) goto page_pool_stats; rcv_priv = netdev_priv(peer); for (i = 0; i < peer->real_num_rx_queues; i++) { const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats; const void *base = (void *)&rq_stats->vs; unsigned int start, tx_idx = idx; size_t offset; tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN; do { start = u64_stats_fetch_begin(&rq_stats->syncp); for (j = 0; j < VETH_TQ_STATS_LEN; j++) { offset = veth_tq_stats_desc[j].offset; data[tx_idx + j] += *(u64 *)(base + offset); } } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); } pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN; page_pool_stats: veth_get_page_pool_stats(dev, &data[pp_idx]); } static void veth_get_channels(struct net_device *dev, struct ethtool_channels *channels) { channels->tx_count = dev->real_num_tx_queues; channels->rx_count = dev->real_num_rx_queues; channels->max_tx = dev->num_tx_queues; channels->max_rx = dev->num_rx_queues; } static int veth_set_channels(struct net_device *dev, struct ethtool_channels *ch); static const struct ethtool_ops veth_ethtool_ops = { .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = veth_get_strings, .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, .get_link_ksettings = veth_get_link_ksettings, .get_ts_info = ethtool_op_get_ts_info, .get_channels = veth_get_channels, .set_channels = veth_set_channels, }; /* general routines */ static bool veth_is_xdp_frame(void *ptr) { return (unsigned long)ptr & VETH_XDP_FLAG; } static struct xdp_frame *veth_ptr_to_xdp(void *ptr) { return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); } static void *veth_xdp_to_ptr(struct xdp_frame *xdp) { return (void *)((unsigned long)xdp | VETH_XDP_FLAG); } static void veth_ptr_free(void *ptr) { if (veth_is_xdp_frame(ptr)) xdp_return_frame(veth_ptr_to_xdp(ptr)); else kfree_skb(ptr); } static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); if (!READ_ONCE(rq->rx_notify_masked) && napi_schedule_prep(&rq->xdp_napi)) { WRITE_ONCE(rq->rx_notify_masked, true); __napi_schedule(&rq->xdp_napi); } } static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb) { if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { dev_kfree_skb_any(skb); return NET_RX_DROP; } return NET_RX_SUCCESS; } static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, struct veth_rq *rq, bool xdp) { return __dev_forward_skb(dev, skb) ?: xdp ? veth_xdp_rx(rq, skb) : __netif_rx(skb); } /* return true if the specified skb has chances of GRO aggregation * Don't strive for accuracy, but try to avoid GRO overhead in the most * common scenarios. * When XDP is enabled, all traffic is considered eligible, as the xmit * device has TSO off. * When TSO is enabled on the xmit device, we are likely interested only * in UDP aggregation, explicitly check for that if the skb is suspected * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets - * to belong to locally generated UDP traffic. */ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev, const struct net_device *rcv, const struct sk_buff *skb) { return !(dev->features & NETIF_F_ALL_TSO) || (skb->destructor == sock_wfree && rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)); } static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_rq *rq = NULL; int ret = NETDEV_TX_OK; struct net_device *rcv; int length = skb->len; bool use_napi = false; int rxq; rcu_read_lock(); rcv = rcu_dereference(priv->peer); if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } rcv_priv = netdev_priv(rcv); rxq = skb_get_queue_mapping(skb); if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; /* The napi pointer is available when an XDP program is * attached or when GRO is enabled * Don't bother with napi/GRO if the skb can't be aggregated */ use_napi = rcu_access_pointer(rq->napi) && veth_skb_is_eligible_for_gro(dev, rcv, skb); } skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { if (!use_napi) dev_sw_netstats_tx_add(dev, 1, length); else __veth_xdp_flush(rq); } else { drop: atomic64_inc(&priv->dropped); ret = NET_XMIT_DROP; } rcu_read_unlock(); return ret; } static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int i; result->peer_tq_xdp_xmit_err = 0; result->xdp_packets = 0; result->xdp_tx_err = 0; result->xdp_bytes = 0; result->rx_drops = 0; for (i = 0; i < dev->num_rx_queues; i++) { u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err; struct veth_rq_stats *stats = &priv->rq[i].stats; unsigned int start; do { start = u64_stats_fetch_begin(&stats->syncp); peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; xdp_tx_err = stats->vs.xdp_tx_err; packets = stats->vs.xdp_packets; bytes = stats->vs.xdp_bytes; drops = stats->vs.rx_drops; } while (u64_stats_fetch_retry(&stats->syncp, start)); result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; result->xdp_tx_err += xdp_tx_err; result->xdp_packets += packets; result->xdp_bytes += bytes; result->rx_drops += drops; } } static void veth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; struct veth_stats rx; tot->tx_dropped = atomic64_read(&priv->dropped); dev_fetch_sw_netstats(tot, dev->tstats); veth_stats_rx(&rx, dev); tot->tx_dropped += rx.xdp_tx_err; tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; tot->rx_bytes += rx.xdp_bytes; tot->rx_packets += rx.xdp_packets; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (peer) { struct rtnl_link_stats64 tot_peer = {}; dev_fetch_sw_netstats(&tot_peer, peer->tstats); tot->rx_bytes += tot_peer.tx_bytes; tot->rx_packets += tot_peer.tx_packets; veth_stats_rx(&rx, peer); tot->tx_dropped += rx.peer_tq_xdp_xmit_err; tot->rx_dropped += rx.xdp_tx_err; tot->tx_bytes += rx.xdp_bytes; tot->tx_packets += rx.xdp_packets; } rcu_read_unlock(); } /* fake multicast ability */ static void veth_set_multicast_list(struct net_device *dev) { } static int veth_select_rxq(struct net_device *dev) { return smp_processor_id() % dev->real_num_rx_queues; } static struct net_device *veth_peer_dev(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); /* Callers must be under RCU read side. */ return rcu_dereference(priv->peer); } static int veth_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags, bool ndo_xmit) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); int i, ret = -ENXIO, nxmit = 0; struct net_device *rcv; unsigned int max_len; struct veth_rq *rq; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; rcu_read_lock(); rcv = rcu_dereference(priv->peer); if (unlikely(!rcv)) goto out; rcv_priv = netdev_priv(rcv); rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* The napi pointer is set if NAPI is enabled, which ensures that * xdp_ring is initialized on receive side and the peer device is up. */ if (!rcu_access_pointer(rq->napi)) goto out; max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; spin_lock(&rq->xdp_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *frame = frames[i]; void *ptr = veth_xdp_to_ptr(frame); if (unlikely(xdp_get_frame_len(frame) > max_len || __ptr_ring_produce(&rq->xdp_ring, ptr))) break; nxmit++; } spin_unlock(&rq->xdp_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) __veth_xdp_flush(rq); ret = nxmit; if (ndo_xmit) { u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.peer_tq_xdp_xmit += nxmit; rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit; u64_stats_update_end(&rq->stats.syncp); } out: rcu_read_unlock(); return ret; } static int veth_ndo_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { int err; err = veth_xdp_xmit(dev, n, frames, flags, true); if (err < 0) { struct veth_priv *priv = netdev_priv(dev); atomic64_add(n, &priv->dropped); } return err; } static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) { int sent, i, err = 0, drops; sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false); if (sent < 0) { err = sent; sent = 0; } for (i = sent; unlikely(i < bq->count); i++) xdp_return_frame(bq->q[i]); drops = bq->count - sent; trace_xdp_bulk_tx(rq->dev, sent, drops, err); u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.xdp_tx += sent; rq->stats.vs.xdp_tx_err += drops; u64_stats_update_end(&rq->stats.syncp); bq->count = 0; } static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) { struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev); struct net_device *rcv; struct veth_rq *rcv_rq; rcu_read_lock(); veth_xdp_flush_bq(rq, bq); rcv = rcu_dereference(priv->peer); if (unlikely(!rcv)) goto out; rcv_priv = netdev_priv(rcv); rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* xdp_ring is initialized on receive side? */ if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog))) goto out; __veth_xdp_flush(rcv_rq); out: rcu_read_unlock(); } static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp, struct veth_xdp_tx_bq *bq) { struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); if (unlikely(!frame)) return -EOVERFLOW; if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE)) veth_xdp_flush_bq(rq, bq); bq->q[bq->count++] = frame; return 0; } static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq, struct xdp_frame *frame, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { struct xdp_frame orig_frame; struct bpf_prog *xdp_prog; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (likely(xdp_prog)) { struct veth_xdp_buff vxbuf; struct xdp_buff *xdp = &vxbuf.xdp; u32 act; xdp_convert_frame_to_buff(frame, xdp); xdp->rxq = &rq->xdp_rxq; vxbuf.skb = NULL; act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: if (xdp_update_frame_from_buff(xdp, frame)) goto err_xdp; break; case XDP_TX: orig_frame = *frame; xdp->rxq->mem.type = frame->mem_type; if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { trace_xdp_exception(rq->dev, xdp_prog, act); frame = &orig_frame; stats->rx_drops++; goto err_xdp; } stats->xdp_tx++; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: orig_frame = *frame; xdp->rxq->mem.type = frame->mem_type; if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { frame = &orig_frame; stats->rx_drops++; goto err_xdp; } stats->xdp_redirect++; rcu_read_unlock(); goto xdp_xmit; default: bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); fallthrough; case XDP_DROP: stats->xdp_drops++; goto err_xdp; } } rcu_read_unlock(); return frame; err_xdp: rcu_read_unlock(); xdp_return_frame(frame); xdp_xmit: return NULL; } /* frames array contains VETH_XDP_BATCH at most */ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames, int n_xdpf, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { void *skbs[VETH_XDP_BATCH]; int i; if (unlikely(!napi_skb_cache_get_bulk(skbs, n_xdpf))) { for (i = 0; i < n_xdpf; i++) xdp_return_frame(frames[i]); stats->rx_drops += n_xdpf; return; } for (i = 0; i < n_xdpf; i++) { struct sk_buff *skb = skbs[i]; skb = __xdp_build_skb_from_frame(frames[i], skb, rq->dev); if (!skb) { xdp_return_frame(frames[i]); stats->rx_drops++; continue; } napi_gro_receive(&rq->xdp_napi, skb); } } static void veth_xdp_get(struct xdp_buff *xdp) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); int i; get_page(virt_to_page(xdp->data)); if (likely(!xdp_buff_has_frags(xdp))) return; for (i = 0; i < sinfo->nr_frags; i++) __skb_frag_ref(&sinfo->frags[i]); } static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, struct xdp_buff *xdp, struct sk_buff **pskb) { struct sk_buff *skb = *pskb; u32 frame_sz; if (skb_shared(skb) || skb_head_is_locked(skb) || skb_shinfo(skb)->nr_frags || skb_headroom(skb) < XDP_PACKET_HEADROOM) { if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM)) goto drop; skb = *pskb; } /* SKB "head" area always have tailroom for skb_shared_info */ frame_sz = skb_end_pointer(skb) - skb->head; frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); xdp_prepare_buff(xdp, skb->head, skb_headroom(skb), skb_headlen(skb), true); if (skb_is_nonlinear(skb)) { skb_shinfo(skb)->xdp_frags_size = skb->data_len; xdp_buff_set_frags_flag(xdp); } else { xdp_buff_clear_frags_flag(xdp); } *pskb = skb; return 0; drop: consume_skb(skb); *pskb = NULL; return -ENOMEM; } static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { void *orig_data, *orig_data_end; struct bpf_prog *xdp_prog; struct veth_xdp_buff vxbuf; struct xdp_buff *xdp = &vxbuf.xdp; u32 act, metalen; int off; skb_prepare_for_gro(skb); rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (unlikely(!xdp_prog)) { rcu_read_unlock(); goto out; } __skb_push(skb, skb->data - skb_mac_header(skb)); if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb)) goto drop; vxbuf.skb = skb; orig_data = xdp->data; orig_data_end = xdp->data_end; act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: veth_xdp_get(xdp); consume_skb(skb); xdp->rxq->mem = rq->xdp_mem; if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) { trace_xdp_exception(rq->dev, xdp_prog, act); stats->rx_drops++; goto err_xdp; } stats->xdp_tx++; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: veth_xdp_get(xdp); consume_skb(skb); xdp->rxq->mem = rq->xdp_mem; if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { stats->rx_drops++; goto err_xdp; } stats->xdp_redirect++; rcu_read_unlock(); goto xdp_xmit; default: bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); fallthrough; case XDP_DROP: stats->xdp_drops++; goto xdp_drop; } rcu_read_unlock(); /* check if bpf_xdp_adjust_head was used */ off = orig_data - xdp->data; if (off > 0) __skb_push(skb, off); else if (off < 0) __skb_pull(skb, -off); skb_reset_mac_header(skb); /* check if bpf_xdp_adjust_tail was used */ off = xdp->data_end - orig_data_end; if (off != 0) __skb_put(skb, off); /* positive on grow, negative on shrink */ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers * (e.g. bpf_xdp_adjust_tail), we need to update data_len here. */ if (xdp_buff_has_frags(xdp)) skb->data_len = skb_shinfo(skb)->xdp_frags_size; else skb->data_len = 0; skb->protocol = eth_type_trans(skb, rq->dev); metalen = xdp->data - xdp->data_meta; if (metalen) skb_metadata_set(skb, metalen); out: return skb; drop: stats->rx_drops++; xdp_drop: rcu_read_unlock(); kfree_skb(skb); return NULL; err_xdp: rcu_read_unlock(); xdp_return_buff(xdp); xdp_xmit: return NULL; } static int veth_xdp_rcv(struct veth_rq *rq, int budget, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { int i, done = 0, n_xdpf = 0; void *xdpf[VETH_XDP_BATCH]; for (i = 0; i < budget; i++) { void *ptr = __ptr_ring_consume(&rq->xdp_ring); if (!ptr) break; if (veth_is_xdp_frame(ptr)) { /* ndo_xdp_xmit */ struct xdp_frame *frame = veth_ptr_to_xdp(ptr); stats->xdp_bytes += xdp_get_frame_len(frame); frame = veth_xdp_rcv_one(rq, frame, bq, stats); if (frame) { /* XDP_PASS */ xdpf[n_xdpf++] = frame; if (n_xdpf == VETH_XDP_BATCH) { veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats); n_xdpf = 0; } } } else { /* ndo_start_xmit */ struct sk_buff *skb = ptr; stats->xdp_bytes += skb->len; skb = veth_xdp_rcv_skb(rq, skb, bq, stats); if (skb) { if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC)) netif_receive_skb(skb); else napi_gro_receive(&rq->xdp_napi, skb); } } done++; } if (n_xdpf) veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats); u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.xdp_redirect += stats->xdp_redirect; rq->stats.vs.xdp_bytes += stats->xdp_bytes; rq->stats.vs.xdp_drops += stats->xdp_drops; rq->stats.vs.rx_drops += stats->rx_drops; rq->stats.vs.xdp_packets += done; u64_stats_update_end(&rq->stats.syncp); return done; } static int veth_poll(struct napi_struct *napi, int budget) { struct veth_rq *rq = container_of(napi, struct veth_rq, xdp_napi); struct veth_stats stats = {}; struct veth_xdp_tx_bq bq; int done; bq.count = 0; xdp_set_return_frame_no_direct(); done = veth_xdp_rcv(rq, budget, &bq, &stats); if (stats.xdp_redirect > 0) xdp_do_flush(); if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { if (napi_schedule_prep(&rq->xdp_napi)) { WRITE_ONCE(rq->rx_notify_masked, true); __napi_schedule(&rq->xdp_napi); } } } if (stats.xdp_tx > 0) veth_xdp_flush(rq, &bq); xdp_clear_return_frame_no_direct(); return done; } static int veth_create_page_pool(struct veth_rq *rq) { struct page_pool_params pp_params = { .order = 0, .pool_size = VETH_RING_SIZE, .nid = NUMA_NO_NODE, .dev = &rq->dev->dev, }; rq->page_pool = page_pool_create(&pp_params); if (IS_ERR(rq->page_pool)) { int err = PTR_ERR(rq->page_pool); rq->page_pool = NULL; return err; } return 0; } static int __veth_napi_enable_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err, i; for (i = start; i < end; i++) { err = veth_create_page_pool(&priv->rq[i]); if (err) goto err_page_pool; } for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); if (err) goto err_xdp_ring; } for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; napi_enable(&rq->xdp_napi); rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); } return 0; err_xdp_ring: for (i--; i >= start; i--) ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); i = end; err_page_pool: for (i--; i >= start; i--) { page_pool_destroy(priv->rq[i].page_pool); priv->rq[i].page_pool = NULL; } return err; } static int __veth_napi_enable(struct net_device *dev) { return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); } static void veth_napi_del_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rcu_assign_pointer(priv->rq[i].napi, NULL); napi_disable(&rq->xdp_napi); __netif_napi_del(&rq->xdp_napi); } synchronize_net(); for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rq->rx_notify_masked = false; ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); } for (i = start; i < end; i++) { page_pool_destroy(priv->rq[i].page_pool); priv->rq[i].page_pool = NULL; } } static void veth_napi_del(struct net_device *dev) { veth_napi_del_range(dev, 0, dev->real_num_rx_queues); } static bool veth_gro_requested(const struct net_device *dev) { return !!(dev->wanted_features & NETIF_F_GRO); } static int veth_enable_xdp_range(struct net_device *dev, int start, int end, bool napi_already_on) { struct veth_priv *priv = netdev_priv(dev); int err, i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; if (!napi_already_on) netif_napi_add(dev, &rq->xdp_napi, veth_poll); err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); if (err < 0) goto err_rxq_reg; err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err < 0) goto err_reg_mem; /* Save original mem info as it can be overwritten */ rq->xdp_mem = rq->xdp_rxq.mem; } return 0; err_reg_mem: xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); err_rxq_reg: for (i--; i >= start; i--) { struct veth_rq *rq = &priv->rq[i]; xdp_rxq_info_unreg(&rq->xdp_rxq); if (!napi_already_on) netif_napi_del(&rq->xdp_napi); } return err; } static void veth_disable_xdp_range(struct net_device *dev, int start, int end, bool delete_napi) { struct veth_priv *priv = netdev_priv(dev); int i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rq->xdp_rxq.mem = rq->xdp_mem; xdp_rxq_info_unreg(&rq->xdp_rxq); if (delete_napi) netif_napi_del(&rq->xdp_napi); } } static int veth_enable_xdp(struct net_device *dev) { bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); struct veth_priv *priv = netdev_priv(dev); int err, i; if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on); if (err) return err; if (!napi_already_on) { err = __veth_napi_enable(dev); if (err) { veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); return err; } } } for (i = 0; i < dev->real_num_rx_queues; i++) { rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); } return 0; } static void veth_disable_xdp(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int i; for (i = 0; i < dev->real_num_rx_queues; i++) rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); if (!netif_running(dev) || !veth_gro_requested(dev)) veth_napi_del(dev); veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); } static int veth_napi_enable_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err, i; for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; netif_napi_add(dev, &rq->xdp_napi, veth_poll); } err = __veth_napi_enable_range(dev, start, end); if (err) { for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; netif_napi_del(&rq->xdp_napi); } return err; } return err; } static int veth_napi_enable(struct net_device *dev) { return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); } static void veth_disable_range_safe(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); if (start >= end) return; if (priv->_xdp_prog) { veth_napi_del_range(dev, start, end); veth_disable_xdp_range(dev, start, end, false); } else if (veth_gro_requested(dev)) { veth_napi_del_range(dev, start, end); } } static int veth_enable_range_safe(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err; if (start >= end) return 0; if (priv->_xdp_prog) { /* these channels are freshly initialized, napi is not on there even * when GRO is requeste */ err = veth_enable_xdp_range(dev, start, end, false); if (err) return err; err = __veth_napi_enable_range(dev, start, end); if (err) { /* on error always delete the newly added napis */ veth_disable_xdp_range(dev, start, end, true); return err; } } else if (veth_gro_requested(dev)) { return veth_napi_enable_range(dev, start, end); } return 0; } static void veth_set_xdp_features(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; peer = rtnl_dereference(priv->peer); if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { struct veth_priv *priv_peer = netdev_priv(peer); xdp_features_t val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_RX_SG; if (priv_peer->_xdp_prog || veth_gro_requested(peer)) val |= NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG; xdp_set_features_flag(dev, val); } else { xdp_clear_features_flag(dev); } } static int veth_set_channels(struct net_device *dev, struct ethtool_channels *ch) { struct veth_priv *priv = netdev_priv(dev); unsigned int old_rx_count, new_rx_count; struct veth_priv *peer_priv; struct net_device *peer; int err; /* sanity check. Upper bounds are already enforced by the caller */ if (!ch->rx_count || !ch->tx_count) return -EINVAL; /* avoid braking XDP, if that is enabled */ peer = rtnl_dereference(priv->peer); peer_priv = peer ? netdev_priv(peer) : NULL; if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues) return -EINVAL; if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues) return -EINVAL; old_rx_count = dev->real_num_rx_queues; new_rx_count = ch->rx_count; if (netif_running(dev)) { /* turn device off */ netif_carrier_off(dev); if (peer) netif_carrier_off(peer); /* try to allocate new resurces, as needed*/ err = veth_enable_range_safe(dev, old_rx_count, new_rx_count); if (err) goto out; } err = netif_set_real_num_rx_queues(dev, ch->rx_count); if (err) goto revert; err = netif_set_real_num_tx_queues(dev, ch->tx_count); if (err) { int err2 = netif_set_real_num_rx_queues(dev, old_rx_count); /* this error condition could happen only if rx and tx change * in opposite directions (e.g. tx nr raises, rx nr decreases) * and we can't do anything to fully restore the original * status */ if (err2) pr_warn("Can't restore rx queues config %d -> %d %d", new_rx_count, old_rx_count, err2); else goto revert; } out: if (netif_running(dev)) { /* note that we need to swap the arguments WRT the enable part * to identify the range we have to disable */ veth_disable_range_safe(dev, new_rx_count, old_rx_count); netif_carrier_on(dev); if (peer) netif_carrier_on(peer); } /* update XDP supported features */ veth_set_xdp_features(dev); if (peer) veth_set_xdp_features(peer); return err; revert: new_rx_count = old_rx_count; old_rx_count = ch->rx_count; goto out; } static int veth_open(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); int err; if (!peer) return -ENOTCONN; if (priv->_xdp_prog) { err = veth_enable_xdp(dev); if (err) return err; } else if (veth_gro_requested(dev)) { err = veth_napi_enable(dev); if (err) return err; } if (peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(peer); } veth_set_xdp_features(dev); return 0; } static int veth_close(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); netif_carrier_off(dev); if (peer) netif_carrier_off(peer); if (priv->_xdp_prog) veth_disable_xdp(dev); else if (veth_gro_requested(dev)) veth_napi_del(dev); return 0; } static int is_valid_veth_mtu(int mtu) { return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU; } static int veth_alloc_queues(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int i; priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); if (!priv->rq) return -ENOMEM; for (i = 0; i < dev->num_rx_queues; i++) { priv->rq[i].dev = dev; u64_stats_init(&priv->rq[i].stats.syncp); } return 0; } static void veth_free_queues(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); kvfree(priv->rq); } static int veth_dev_init(struct net_device *dev) { netdev_lockdep_set_classes(dev); return veth_alloc_queues(dev); } static void veth_dev_free(struct net_device *dev) { veth_free_queues(dev); } #ifdef CONFIG_NET_POLL_CONTROLLER static void veth_poll_controller(struct net_device *dev) { /* veth only receives frames when its peer sends one * Since it has nothing to do with disabling irqs, we are guaranteed * never to have pending data when we poll for it so * there is nothing to do here. * * We need this though so netpoll recognizes us as an interface that * supports polling, which enables bridge devices in virt setups to * still use netconsole */ } #endif /* CONFIG_NET_POLL_CONTROLLER */ static int veth_get_iflink(const struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; int iflink; rcu_read_lock(); peer = rcu_dereference(priv->peer); iflink = peer ? READ_ONCE(peer->ifindex) : 0; rcu_read_unlock(); return iflink; } static netdev_features_t veth_fix_features(struct net_device *dev, netdev_features_t features) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; peer = rtnl_dereference(priv->peer); if (peer) { struct veth_priv *peer_priv = netdev_priv(peer); if (peer_priv->_xdp_prog) features &= ~NETIF_F_GSO_SOFTWARE; } return features; } static int veth_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = features ^ dev->features; struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; int err; if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog) return 0; peer = rtnl_dereference(priv->peer); if (features & NETIF_F_GRO) { err = veth_napi_enable(dev); if (err) return err; if (peer) xdp_features_set_redirect_target(peer, true); } else { if (peer) xdp_features_clear_redirect_target(peer); veth_napi_del(dev); } return 0; } static void veth_set_rx_headroom(struct net_device *dev, int new_hr) { struct veth_priv *peer_priv, *priv = netdev_priv(dev); struct net_device *peer; if (new_hr < 0) new_hr = 0; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (unlikely(!peer)) goto out; peer_priv = netdev_priv(peer); priv->requested_headroom = new_hr; new_hr = max(priv->requested_headroom, peer_priv->requested_headroom); dev->needed_headroom = new_hr; peer->needed_headroom = new_hr; out: rcu_read_unlock(); } static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct veth_priv *priv = netdev_priv(dev); struct bpf_prog *old_prog; struct net_device *peer; unsigned int max_mtu; int err; old_prog = priv->_xdp_prog; priv->_xdp_prog = prog; peer = rtnl_dereference(priv->peer); if (prog) { if (!peer) { NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached"); err = -ENOTCONN; goto err; } max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) - peer->hard_header_len; /* Allow increasing the max_mtu if the program supports * XDP fragments. */ if (prog->aux->xdp_has_frags) max_mtu += PAGE_SIZE * MAX_SKB_FRAGS; if (peer->mtu > max_mtu) { NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP"); err = -ERANGE; goto err; } if (dev->real_num_rx_queues < peer->real_num_tx_queues) { NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues"); err = -ENOSPC; goto err; } if (dev->flags & IFF_UP) { err = veth_enable_xdp(dev); if (err) { NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed"); goto err; } } if (!old_prog) { peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; peer->max_mtu = max_mtu; } xdp_features_set_redirect_target(peer, true); } if (old_prog) { if (!prog) { if (peer && !veth_gro_requested(dev)) xdp_features_clear_redirect_target(peer); if (dev->flags & IFF_UP) veth_disable_xdp(dev); if (peer) { peer->hw_features |= NETIF_F_GSO_SOFTWARE; peer->max_mtu = ETH_MAX_MTU; } } bpf_prog_put(old_prog); } if ((!!old_prog ^ !!prog) && peer) netdev_update_features(peer); return 0; err: priv->_xdp_prog = old_prog; return err; } static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return veth_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) { struct veth_xdp_buff *_ctx = (void *)ctx; if (!_ctx->skb) return -ENODATA; *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp; return 0; } static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type) { struct veth_xdp_buff *_ctx = (void *)ctx; struct sk_buff *skb = _ctx->skb; if (!skb) return -ENODATA; *hash = skb_get_hash(skb); *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE; return 0; } static int veth_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, u16 *vlan_tci) { const struct veth_xdp_buff *_ctx = (void *)ctx; const struct sk_buff *skb = _ctx->skb; int err; if (!skb) return -ENODATA; err = __vlan_hwaccel_get_tag(skb, vlan_tci); if (err) return err; *vlan_proto = skb->vlan_proto; return err; } static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, .ndo_stop = veth_close, .ndo_start_xmit = veth_xmit, .ndo_get_stats64 = veth_get_stats64, .ndo_set_rx_mode = veth_set_multicast_list, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = veth_poll_controller, #endif .ndo_get_iflink = veth_get_iflink, .ndo_fix_features = veth_fix_features, .ndo_set_features = veth_set_features, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, .ndo_xdp_xmit = veth_ndo_xdp_xmit, .ndo_get_peer_dev = veth_peer_dev, }; static const struct xdp_metadata_ops veth_xdp_metadata_ops = { .xmo_rx_timestamp = veth_xdp_rx_timestamp, .xmo_rx_hash = veth_xdp_rx_hash, .xmo_rx_vlan_tag = veth_xdp_rx_vlan_tag, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX ) static void veth_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_PHONY_HEADROOM; dev->priv_flags |= IFF_DISABLE_NETPOLL; dev->lltx = true; dev->netdev_ops = &veth_netdev_ops; dev->xdp_metadata_ops = &veth_xdp_metadata_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= VETH_FEATURES; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); dev->needs_free_netdev = true; dev->priv_destructor = veth_dev_free; dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; dev->hw_enc_features = VETH_FEATURES; dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; netif_set_tso_max_size(dev, GSO_MAX_SIZE); } /* * netlink interface */ static int veth_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (tb[IFLA_MTU]) { if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) return -EINVAL; } return 0; } static struct rtnl_link_ops veth_link_ops; static void veth_disable_gro(struct net_device *dev) { dev->features &= ~NETIF_F_GRO; dev->wanted_features &= ~NETIF_F_GRO; netdev_update_features(dev); } static int veth_init_queues(struct net_device *dev, struct nlattr *tb[]) { int err; if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) { err = netif_set_real_num_tx_queues(dev, 1); if (err) return err; } if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) { err = netif_set_real_num_rx_queues(dev, 1); if (err) return err; } return 0; } static int veth_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net *peer_net = rtnl_newlink_peer_net(params); struct nlattr **data = params->data; struct nlattr **tb = params->tb; int err; struct net_device *peer; struct veth_priv *priv; char ifname[IFNAMSIZ]; struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; unsigned char name_assign_type; struct ifinfomsg *ifmp; /* * create and register peer first */ if (data && data[VETH_INFO_PEER]) { struct nlattr *nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); tbp = peer_tb; } else { ifmp = NULL; tbp = tb; } if (ifmp && tbp[IFLA_IFNAME]) { nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); name_assign_type = NET_NAME_ENUM; } peer = rtnl_create_link(peer_net, ifname, name_assign_type, &veth_link_ops, tbp, extack); if (IS_ERR(peer)) return PTR_ERR(peer); if (!ifmp || !tbp[IFLA_ADDRESS]) eth_hw_addr_random(peer); if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; netif_inherit_tso_max(peer, dev); err = register_netdevice(peer); if (err < 0) goto err_register_peer; /* keep GRO disabled by default to be consistent with the established * veth behavior */ veth_disable_gro(peer); netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp, 0, NULL); if (err < 0) goto err_configure_peer; /* * register dev last * * note, that since we've registered new device the dev's name * should be re-allocated */ if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); if (tb[IFLA_IFNAME]) nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); err = register_netdevice(dev); if (err < 0) goto err_register_dev; netif_carrier_off(dev); /* * tie the deviced together */ priv = netdev_priv(dev); rcu_assign_pointer(priv->peer, peer); err = veth_init_queues(dev, tb); if (err) goto err_queues; priv = netdev_priv(peer); rcu_assign_pointer(priv->peer, dev); err = veth_init_queues(peer, tb); if (err) goto err_queues; veth_disable_gro(dev); /* update XDP supported features */ veth_set_xdp_features(dev); veth_set_xdp_features(peer); return 0; err_queues: unregister_netdevice(dev); err_register_dev: /* nothing to do */ err_configure_peer: unregister_netdevice(peer); return err; err_register_peer: free_netdev(peer); return err; } static void veth_dellink(struct net_device *dev, struct list_head *head) { struct veth_priv *priv; struct net_device *peer; priv = netdev_priv(dev); peer = rtnl_dereference(priv->peer); /* Note : dellink() is called from default_device_exit_batch(), * before a rcu_synchronize() point. The devices are guaranteed * not being freed before one RCU grace period. */ RCU_INIT_POINTER(priv->peer, NULL); unregister_netdevice_queue(dev, head); if (peer) { priv = netdev_priv(peer); RCU_INIT_POINTER(priv->peer, NULL); unregister_netdevice_queue(peer, head); } } static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, }; static struct net *veth_get_link_net(const struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); return peer ? dev_net(peer) : dev_net(dev); } static unsigned int veth_get_num_queues(void) { /* enforce the same queue limit as rtnl_create_link */ int queues = num_possible_cpus(); if (queues > 4096) queues = 4096; return queues; } static struct rtnl_link_ops veth_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct veth_priv), .setup = veth_setup, .validate = veth_validate, .newlink = veth_newlink, .dellink = veth_dellink, .policy = veth_policy, .peer_type = VETH_INFO_PEER, .maxtype = VETH_INFO_MAX, .get_link_net = veth_get_link_net, .get_num_tx_queues = veth_get_num_queues, .get_num_rx_queues = veth_get_num_queues, }; /* * init/fini */ static __init int veth_init(void) { return rtnl_link_register(&veth_link_ops); } static __exit void veth_exit(void) { rtnl_link_unregister(&veth_link_ops); } module_init(veth_init); module_exit(veth_exit); MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
3 3 3 3 3 1 1 3 3 3 2 3 2 2 2 1 2 3 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 // SPDX-License-Identifier: GPL-2.0-only #include <linux/ethtool.h> #include <linux/firmware.h> #include <linux/sfp.h> #include <net/devlink.h> #include <net/netdev_lock.h> #include "netlink.h" #include "common.h" #include "bitset.h" #include "module_fw.h" struct module_req_info { struct ethnl_req_info base; }; struct module_reply_data { struct ethnl_reply_data base; struct ethtool_module_power_mode_params power; }; #define MODULE_REPDATA(__reply_base) \ container_of(__reply_base, struct module_reply_data, base) /* MODULE_GET */ const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1] = { [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), }; static int module_get_power_mode(struct net_device *dev, struct module_reply_data *data, struct netlink_ext_ack *extack) { const struct ethtool_ops *ops = dev->ethtool_ops; if (!ops->get_module_power_mode) return 0; if (dev->ethtool->module_fw_flash_in_progress) { NL_SET_ERR_MSG(extack, "Module firmware flashing is in progress"); return -EBUSY; } return ops->get_module_power_mode(dev, &data->power, extack); } static int module_prepare_data(const struct ethnl_req_info *req_base, struct ethnl_reply_data *reply_base, const struct genl_info *info) { struct module_reply_data *data = MODULE_REPDATA(reply_base); struct net_device *dev = reply_base->dev; int ret; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; ret = module_get_power_mode(dev, data, info->extack); if (ret < 0) goto out_complete; out_complete: ethnl_ops_complete(dev); return ret; } static int module_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { struct module_reply_data *data = MODULE_REPDATA(reply_base); int len = 0; if (data->power.policy) len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE_POLICY */ if (data->power.mode) len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE */ return len; } static int module_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { const struct module_reply_data *data = MODULE_REPDATA(reply_base); if (data->power.policy && nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE_POLICY, data->power.policy)) return -EMSGSIZE; if (data->power.mode && nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE, data->power.mode)) return -EMSGSIZE; return 0; } /* MODULE_SET */ const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1] = { [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), [ETHTOOL_A_MODULE_POWER_MODE_POLICY] = NLA_POLICY_RANGE(NLA_U8, ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH, ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO), }; static int ethnl_set_module_validate(struct ethnl_req_info *req_info, struct genl_info *info) { const struct ethtool_ops *ops = req_info->dev->ethtool_ops; struct nlattr **tb = info->attrs; if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]) return 0; if (req_info->dev->ethtool->module_fw_flash_in_progress) { NL_SET_ERR_MSG(info->extack, "Module firmware flashing is in progress"); return -EBUSY; } if (!ops->get_module_power_mode || !ops->set_module_power_mode) { NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY], "Setting power mode policy is not supported by this device"); return -EOPNOTSUPP; } return 1; } static int ethnl_set_module(struct ethnl_req_info *req_info, struct genl_info *info) { struct ethtool_module_power_mode_params power = {}; struct ethtool_module_power_mode_params power_new; const struct ethtool_ops *ops; struct net_device *dev = req_info->dev; struct nlattr **tb = info->attrs; int ret; ops = dev->ethtool_ops; power_new.policy = nla_get_u8(tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]); ret = ops->get_module_power_mode(dev, &power, info->extack); if (ret < 0) return ret; if (power_new.policy == power.policy) return 0; ret = ops->set_module_power_mode(dev, &power_new, info->extack); return ret < 0 ? ret : 1; } const struct ethnl_request_ops ethnl_module_request_ops = { .request_cmd = ETHTOOL_MSG_MODULE_GET, .reply_cmd = ETHTOOL_MSG_MODULE_GET_REPLY, .hdr_attr = ETHTOOL_A_MODULE_HEADER, .req_info_size = sizeof(struct module_req_info), .reply_data_size = sizeof(struct module_reply_data), .prepare_data = module_prepare_data, .reply_size = module_reply_size, .fill_reply = module_fill_reply, .set_validate = ethnl_set_module_validate, .set = ethnl_set_module, .set_ntf_cmd = ETHTOOL_MSG_MODULE_NTF, }; /* MODULE_FW_FLASH_ACT */ const struct nla_policy ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1] = { [ETHTOOL_A_MODULE_FW_FLASH_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), [ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME] = { .type = NLA_NUL_STRING }, [ETHTOOL_A_MODULE_FW_FLASH_PASSWORD] = { .type = NLA_U32 }, }; static LIST_HEAD(module_fw_flash_work_list); static DEFINE_SPINLOCK(module_fw_flash_work_list_lock); static int module_flash_fw_work_list_add(struct ethtool_module_fw_flash *module_fw, struct genl_info *info) { struct ethtool_module_fw_flash *work; /* First, check if already registered. */ spin_lock(&module_fw_flash_work_list_lock); list_for_each_entry(work, &module_fw_flash_work_list, list) { if (work->fw_update.ntf_params.portid == info->snd_portid && work->fw_update.dev == module_fw->fw_update.dev) { spin_unlock(&module_fw_flash_work_list_lock); return -EALREADY; } } list_add_tail(&module_fw->list, &module_fw_flash_work_list); spin_unlock(&module_fw_flash_work_list_lock); return 0; } static void module_flash_fw_work_list_del(struct list_head *list) { spin_lock(&module_fw_flash_work_list_lock); list_del(list); spin_unlock(&module_fw_flash_work_list_lock); } static void module_flash_fw_work(struct work_struct *work) { struct ethtool_module_fw_flash *module_fw; module_fw = container_of(work, struct ethtool_module_fw_flash, work); ethtool_cmis_fw_update(&module_fw->fw_update); module_flash_fw_work_list_del(&module_fw->list); module_fw->fw_update.dev->ethtool->module_fw_flash_in_progress = false; netdev_put(module_fw->fw_update.dev, &module_fw->dev_tracker); release_firmware(module_fw->fw_update.fw); kfree(module_fw); } #define MODULE_EEPROM_PHYS_ID_PAGE 0 #define MODULE_EEPROM_PHYS_ID_I2C_ADDR 0x50 static int module_flash_fw_work_init(struct ethtool_module_fw_flash *module_fw, struct net_device *dev, struct netlink_ext_ack *extack) { const struct ethtool_ops *ops = dev->ethtool_ops; struct ethtool_module_eeprom page_data = {}; u8 phys_id; int err; /* Fetch the SFF-8024 Identifier Value. For all supported standards, it * is located at I2C address 0x50, byte 0. See section 4.1 in SFF-8024, * revision 4.9. */ page_data.page = MODULE_EEPROM_PHYS_ID_PAGE; page_data.offset = SFP_PHYS_ID; page_data.length = sizeof(phys_id); page_data.i2c_address = MODULE_EEPROM_PHYS_ID_I2C_ADDR; page_data.data = &phys_id; err = ops->get_module_eeprom_by_page(dev, &page_data, extack); if (err < 0) return err; switch (phys_id) { case SFF8024_ID_QSFP_DD: case SFF8024_ID_OSFP: case SFF8024_ID_DSFP: case SFF8024_ID_QSFP_PLUS_CMIS: case SFF8024_ID_SFP_DD_CMIS: case SFF8024_ID_SFP_PLUS_CMIS: INIT_WORK(&module_fw->work, module_flash_fw_work); break; default: NL_SET_ERR_MSG(extack, "Module type does not support firmware flashing"); return -EOPNOTSUPP; } return 0; } void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv) { struct ethtool_module_fw_flash *work; spin_lock(&module_fw_flash_work_list_lock); list_for_each_entry(work, &module_fw_flash_work_list, list) { if (work->fw_update.dev == sk_priv->dev && work->fw_update.ntf_params.portid == sk_priv->portid) { work->fw_update.ntf_params.closed_sock = true; break; } } spin_unlock(&module_fw_flash_work_list_lock); } static int module_flash_fw_schedule(struct net_device *dev, const char *file_name, struct ethtool_module_fw_flash_params *params, struct sk_buff *skb, struct genl_info *info) { struct ethtool_cmis_fw_update_params *fw_update; struct ethtool_module_fw_flash *module_fw; int err; module_fw = kzalloc(sizeof(*module_fw), GFP_KERNEL); if (!module_fw) return -ENOMEM; fw_update = &module_fw->fw_update; fw_update->params = *params; err = request_firmware_direct(&fw_update->fw, file_name, &dev->dev); if (err) { NL_SET_ERR_MSG(info->extack, "Failed to request module firmware image"); goto err_free; } err = module_flash_fw_work_init(module_fw, dev, info->extack); if (err < 0) goto err_release_firmware; dev->ethtool->module_fw_flash_in_progress = true; netdev_hold(dev, &module_fw->dev_tracker, GFP_KERNEL); fw_update->dev = dev; fw_update->ntf_params.portid = info->snd_portid; fw_update->ntf_params.seq = info->snd_seq; fw_update->ntf_params.closed_sock = false; err = ethnl_sock_priv_set(skb, dev, fw_update->ntf_params.portid, ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH); if (err < 0) goto err_release_firmware; err = module_flash_fw_work_list_add(module_fw, info); if (err < 0) goto err_release_firmware; schedule_work(&module_fw->work); return 0; err_release_firmware: release_firmware(fw_update->fw); err_free: kfree(module_fw); return err; } static int module_flash_fw(struct net_device *dev, struct nlattr **tb, struct sk_buff *skb, struct genl_info *info) { struct ethtool_module_fw_flash_params params = {}; const char *file_name; struct nlattr *attr; if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME)) return -EINVAL; file_name = nla_data(tb[ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME]); attr = tb[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD]; if (attr) { params.password = cpu_to_be32(nla_get_u32(attr)); params.password_valid = true; } return module_flash_fw_schedule(dev, file_name, &params, skb, info); } static int ethnl_module_fw_flash_validate(struct net_device *dev, struct netlink_ext_ack *extack) { struct devlink_port *devlink_port = dev->devlink_port; const struct ethtool_ops *ops = dev->ethtool_ops; if (!ops->set_module_eeprom_by_page || !ops->get_module_eeprom_by_page) { NL_SET_ERR_MSG(extack, "Flashing module firmware is not supported by this device"); return -EOPNOTSUPP; } if (!ops->reset) { NL_SET_ERR_MSG(extack, "Reset module is not supported by this device, so flashing is not permitted"); return -EOPNOTSUPP; } if (dev->ethtool->module_fw_flash_in_progress) { NL_SET_ERR_MSG(extack, "Module firmware flashing already in progress"); return -EBUSY; } if (dev->flags & IFF_UP) { NL_SET_ERR_MSG(extack, "Netdevice is up, so flashing is not permitted"); return -EBUSY; } if (devlink_port && devlink_port->attrs.split) { NL_SET_ERR_MSG(extack, "Can't perform firmware flashing on a split port"); return -EOPNOTSUPP; } return 0; } int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info) { struct ethnl_req_info req_info = {}; struct nlattr **tb = info->attrs; struct net_device *dev; int ret; ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_MODULE_FW_FLASH_HEADER], genl_info_net(info), info->extack, true); if (ret < 0) return ret; dev = req_info.dev; rtnl_lock(); netdev_lock_ops(dev); ret = ethnl_ops_begin(dev); if (ret < 0) goto out_unlock; ret = ethnl_module_fw_flash_validate(dev, info->extack); if (ret < 0) goto out_unlock; ret = module_flash_fw(dev, tb, skb, info); ethnl_ops_complete(dev); out_unlock: netdev_unlock_ops(dev); rtnl_unlock(); ethnl_parse_header_dev_put(&req_info); return ret; } /* MODULE_FW_FLASH_NTF */ static int ethnl_module_fw_flash_ntf_put_err(struct sk_buff *skb, char *err_msg, char *sub_err_msg) { int err_msg_len, sub_err_msg_len, total_len; struct nlattr *attr; if (!err_msg) return 0; err_msg_len = strlen(err_msg); total_len = err_msg_len + 2; /* For period and NUL. */ if (sub_err_msg) { sub_err_msg_len = strlen(sub_err_msg); total_len += sub_err_msg_len + 2; /* For ", ". */ } attr = nla_reserve(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG, total_len); if (!attr) return -ENOMEM; if (sub_err_msg) sprintf(nla_data(attr), "%s, %s.", err_msg, sub_err_msg); else sprintf(nla_data(attr), "%s.", err_msg); return 0; } static void ethnl_module_fw_flash_ntf(struct net_device *dev, enum ethtool_module_fw_flash_status status, struct ethnl_module_fw_flash_ntf_params *ntf_params, char *err_msg, char *sub_err_msg, u64 done, u64 total) { struct sk_buff *skb; void *hdr; int ret; if (ntf_params->closed_sock) return; skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return; hdr = ethnl_unicast_put(skb, ntf_params->portid, ++ntf_params->seq, ETHTOOL_MSG_MODULE_FW_FLASH_NTF); if (!hdr) goto err_skb; ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_MODULE_FW_FLASH_HEADER); if (ret < 0) goto err_skb; if (nla_put_u32(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS, status)) goto err_skb; ret = ethnl_module_fw_flash_ntf_put_err(skb, err_msg, sub_err_msg); if (ret < 0) goto err_skb; if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_DONE, done)) goto err_skb; if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_TOTAL, total)) goto err_skb; genlmsg_end(skb, hdr); genlmsg_unicast(dev_net(dev), skb, ntf_params->portid); return; err_skb: nlmsg_free(skb); } void ethnl_module_fw_flash_ntf_err(struct net_device *dev, struct ethnl_module_fw_flash_ntf_params *params, char *err_msg, char *sub_err_msg) { ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR, params, err_msg, sub_err_msg, 0, 0); } void ethnl_module_fw_flash_ntf_start(struct net_device *dev, struct ethnl_module_fw_flash_ntf_params *params) { ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED, params, NULL, NULL, 0, 0); } void ethnl_module_fw_flash_ntf_complete(struct net_device *dev, struct ethnl_module_fw_flash_ntf_params *params) { ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED, params, NULL, NULL, 0, 0); } void ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev, struct ethnl_module_fw_flash_ntf_params *params, u64 done, u64 total) { ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS, params, NULL, NULL, done, total); }
80 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 // SPDX-License-Identifier: GPL-2.0 /* RTT/RTO calculation. * * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com) * * https://tools.ietf.org/html/rfc6298 * https://tools.ietf.org/html/rfc1122#section-4.2.3.1 * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf */ #include <linux/net.h> #include "ar-internal.h" #define RXRPC_RTO_MAX (120 * USEC_PER_SEC) #define RXRPC_TIMEOUT_INIT ((unsigned int)(1 * USEC_PER_SEC)) /* RFC6298 2.1 initial RTO value */ #define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */ static u32 rxrpc_rto_min_us(struct rxrpc_call *call) { return 200; } static u32 __rxrpc_set_rto(const struct rxrpc_call *call) { return (call->srtt_us >> 3) + call->rttvar_us; } static u32 rxrpc_bound_rto(u32 rto) { return clamp(200000, rto + 100000, RXRPC_RTO_MAX); } /* * Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 * piece by Van Jacobson. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ static void rxrpc_rtt_estimator(struct rxrpc_call *call, long sample_rtt_us) { long m = sample_rtt_us; /* RTT */ u32 srtt = call->srtt_us; /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. * This is designed to be as fast as possible * m stands for "measurement". * * On a 1990 paper the rto value is changed to: * RTO = rtt + 4 * mdev * * Funny. This algorithm seems to be very broken. * These formulae increase RTO, when it should be decreased, increase * too slowly, when it should be increased quickly, decrease too quickly * etc. I guess in BSD RTO takes ONE value, so that it is absolutely * does not matter how to _calculate_ it. Seems, it was trap * that VJ failed to avoid. 8) */ if (srtt != 0) { m -= (srtt >> 3); /* m is now error in rtt est */ srtt += m; /* rtt = 7/8 rtt + 1/8 new */ if (m < 0) { m = -m; /* m is now abs(error) */ m -= (call->mdev_us >> 2); /* similar update on mdev */ /* This is similar to one of Eifel findings. * Eifel blocks mdev updates when rtt decreases. * This solution is a bit different: we use finer gain * for mdev in this case (alpha*beta). * Like Eifel it also prevents growth of rto, * but also it limits too fast rto decreases, * happening in pure Eifel. */ if (m > 0) m >>= 3; } else { m -= (call->mdev_us >> 2); /* similar update on mdev */ } call->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ if (call->mdev_us > call->mdev_max_us) { call->mdev_max_us = call->mdev_us; if (call->mdev_max_us > call->rttvar_us) call->rttvar_us = call->mdev_max_us; } } else { /* no previous measure. */ srtt = m << 3; /* take the measured time to be rtt */ call->mdev_us = m << 1; /* make sure rto = 3*rtt */ call->rttvar_us = umax(call->mdev_us, rxrpc_rto_min_us(call)); call->mdev_max_us = call->rttvar_us; } call->srtt_us = umax(srtt, 1); } /* * Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ static void rxrpc_set_rto(struct rxrpc_call *call) { u32 rto; /* 1. If rtt variance happened to be less 50msec, it is hallucination. * It cannot be less due to utterly erratic ACK generation made * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * to do with delayed acks, because at cwnd>2 true delack timeout * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ rto = __rxrpc_set_rto(call); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ /* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo * guarantees that rto is higher. */ call->rto_us = rxrpc_bound_rto(rto); } static void rxrpc_update_rtt_min(struct rxrpc_call *call, ktime_t resp_time, long rtt_us) { /* Window size 5mins in approx usec (ipv4.sysctl_tcp_min_rtt_wlen) */ u32 wlen_us = 5ULL * NSEC_PER_SEC / 1024; minmax_running_min(&call->min_rtt, wlen_us, resp_time / 1024, (u32)rtt_us ? : jiffies_to_usecs(1)); } static void rxrpc_ack_update_rtt(struct rxrpc_call *call, ktime_t resp_time, long rtt_us) { if (rtt_us < 0) return; /* Update RACK min RTT [RFC8985 6.1 Step 1]. */ rxrpc_update_rtt_min(call, resp_time, rtt_us); rxrpc_rtt_estimator(call, rtt_us); rxrpc_set_rto(call); /* Only reset backoff on valid RTT measurement [RFC6298]. */ call->backoff = 0; } /* * Add RTT information to cache. This is called in softirq mode and has * exclusive access to the call RTT data. */ void rxrpc_call_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, int rtt_slot, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, ktime_t send_time, ktime_t resp_time) { s64 rtt_us; rtt_us = ktime_to_us(ktime_sub(resp_time, send_time)); if (rtt_us < 0) return; rxrpc_ack_update_rtt(call, resp_time, rtt_us); if (call->rtt_count < 3) call->rtt_count++; call->rtt_taken++; WRITE_ONCE(call->peer->recent_srtt_us, call->srtt_us / 8); WRITE_ONCE(call->peer->recent_rto_us, call->rto_us); trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial, rtt_us, call->srtt_us, call->rto_us); } /* * Get the retransmission timeout to set in nanoseconds, backing it off each * time we retransmit. */ ktime_t rxrpc_get_rto_backoff(struct rxrpc_call *call, bool retrans) { u64 timo_us; u32 backoff = READ_ONCE(call->backoff); timo_us = call->rto_us; timo_us <<= backoff; if (retrans && timo_us * 2 <= RXRPC_RTO_MAX) WRITE_ONCE(call->backoff, backoff + 1); if (timo_us < 1) timo_us = 1; return ns_to_ktime(timo_us * NSEC_PER_USEC); } void rxrpc_call_init_rtt(struct rxrpc_call *call) { call->rtt_last_req = KTIME_MIN; call->rto_us = RXRPC_TIMEOUT_INIT; call->mdev_us = RXRPC_TIMEOUT_INIT; call->backoff = 0; //minmax_reset(&call->rtt_min, rxrpc_jiffies32, ~0U); }
16 16 4 3 16 16 16 6 16 3 18 14 18 1 1 8 8 8 8 5 8 8 8 5 13 8 5 13 10 13 3 1 13 13 2 13 6 3 6 8 8 8 8 21 21 21 20 19 7 6 11 11 11 11 17 3 17 12 6 9 8 8 8 2 8 1 7 5 5 13 11 11 11 13 13 13 13 13 13 13 15 15 3 2 2 15 15 5 15 15 15 24 7 21 24 22 9 9 9 9 9 10 4 4 1 3 13 13 13 15 15 14 14 13 1 13 10 10 8 7 6 5 8 17 16 2 3 15 1 1 17 12 10 10 12 22 21 20 7 2 6 5 13 12 17 9 2 9 6 5 14 14 14 12 5 14 14 22 5 1 4 2 2 1 2 2 1 2 2 5 6 5 5 5 6 9 3 2 1 6 7 9 15 3 2 1 1 2 2 1 15 13 12 12 12 8 12 13 7 5 5 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015, Sony Mobile Communications Inc. * Copyright (c) 2013, The Linux Foundation. All rights reserved. */ #include <linux/module.h> #include <linux/netlink.h> #include <linux/qrtr.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/spinlock.h> #include <linux/wait.h> #include <net/sock.h> #include "qrtr.h" #define QRTR_PROTO_VER_1 1 #define QRTR_PROTO_VER_2 3 /* auto-bind range */ #define QRTR_MIN_EPH_SOCKET 0x4000 #define QRTR_MAX_EPH_SOCKET 0x7fff #define QRTR_EPH_PORT_RANGE \ XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET) #define QRTR_PORT_CTRL_LEGACY 0xffff /** * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1 * @version: protocol version * @type: packet type; one of QRTR_TYPE_* * @src_node_id: source node * @src_port_id: source port * @confirm_rx: boolean; whether a resume-tx packet should be send in reply * @size: length of packet, excluding this header * @dst_node_id: destination node * @dst_port_id: destination port */ struct qrtr_hdr_v1 { __le32 version; __le32 type; __le32 src_node_id; __le32 src_port_id; __le32 confirm_rx; __le32 size; __le32 dst_node_id; __le32 dst_port_id; } __packed; /** * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions * @version: protocol version * @type: packet type; one of QRTR_TYPE_* * @flags: bitmask of QRTR_FLAGS_* * @optlen: length of optional header data * @size: length of packet, excluding this header and optlen * @src_node_id: source node * @src_port_id: source port * @dst_node_id: destination node * @dst_port_id: destination port */ struct qrtr_hdr_v2 { u8 version; u8 type; u8 flags; u8 optlen; __le32 size; __le16 src_node_id; __le16 src_port_id; __le16 dst_node_id; __le16 dst_port_id; }; #define QRTR_FLAGS_CONFIRM_RX BIT(0) struct qrtr_cb { u32 src_node; u32 src_port; u32 dst_node; u32 dst_port; u8 type; u8 confirm_rx; }; #define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \ sizeof(struct qrtr_hdr_v2)) struct qrtr_sock { /* WARNING: sk must be the first member */ struct sock sk; struct sockaddr_qrtr us; struct sockaddr_qrtr peer; }; static inline struct qrtr_sock *qrtr_sk(struct sock *sk) { BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0); return container_of(sk, struct qrtr_sock, sk); } static unsigned int qrtr_local_nid = 1; /* for node ids */ static RADIX_TREE(qrtr_nodes, GFP_ATOMIC); static DEFINE_SPINLOCK(qrtr_nodes_lock); /* broadcast list */ static LIST_HEAD(qrtr_all_nodes); /* lock for qrtr_all_nodes and node reference */ static DEFINE_MUTEX(qrtr_node_lock); /* local port allocation management */ static DEFINE_XARRAY_ALLOC(qrtr_ports); /** * struct qrtr_node - endpoint node * @ep_lock: lock for endpoint management and callbacks * @ep: endpoint * @ref: reference count for node * @nid: node id * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port * @qrtr_tx_lock: lock for qrtr_tx_flow inserts * @rx_queue: receive queue * @item: list item for broadcast list */ struct qrtr_node { struct mutex ep_lock; struct qrtr_endpoint *ep; struct kref ref; unsigned int nid; struct radix_tree_root qrtr_tx_flow; struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */ struct sk_buff_head rx_queue; struct list_head item; }; /** * struct qrtr_tx_flow - tx flow control * @resume_tx: waiters for a resume tx from the remote * @pending: number of waiting senders * @tx_failed: indicates that a message with confirm_rx flag was lost */ struct qrtr_tx_flow { struct wait_queue_head resume_tx; int pending; int tx_failed; }; #define QRTR_TX_FLOW_HIGH 10 #define QRTR_TX_FLOW_LOW 5 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to); static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to); static struct qrtr_sock *qrtr_port_lookup(int port); static void qrtr_port_put(struct qrtr_sock *ipc); /* Release node resources and free the node. * * Do not call directly, use qrtr_node_release. To be used with * kref_put_mutex. As such, the node mutex is expected to be locked on call. */ static void __qrtr_node_release(struct kref *kref) { struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); struct radix_tree_iter iter; struct qrtr_tx_flow *flow; unsigned long flags; void __rcu **slot; spin_lock_irqsave(&qrtr_nodes_lock, flags); /* If the node is a bridge for other nodes, there are possibly * multiple entries pointing to our released node, delete them all. */ radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { if (*slot == node) radix_tree_iter_delete(&qrtr_nodes, &iter, slot); } spin_unlock_irqrestore(&qrtr_nodes_lock, flags); list_del(&node->item); mutex_unlock(&qrtr_node_lock); skb_queue_purge(&node->rx_queue); /* Free tx flow counters */ radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { flow = *slot; radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot); kfree(flow); } kfree(node); } /* Increment reference to node. */ static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node) { if (node) kref_get(&node->ref); return node; } /* Decrement reference to node and release as necessary. */ static void qrtr_node_release(struct qrtr_node *node) { if (!node) return; kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock); } /** * qrtr_tx_resume() - reset flow control counter * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on * @skb: resume_tx packet */ static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb) { struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)skb->data; u64 remote_node = le32_to_cpu(pkt->client.node); u32 remote_port = le32_to_cpu(pkt->client.port); struct qrtr_tx_flow *flow; unsigned long key; key = remote_node << 32 | remote_port; rcu_read_lock(); flow = radix_tree_lookup(&node->qrtr_tx_flow, key); rcu_read_unlock(); if (flow) { spin_lock(&flow->resume_tx.lock); flow->pending = 0; spin_unlock(&flow->resume_tx.lock); wake_up_interruptible_all(&flow->resume_tx); } consume_skb(skb); } /** * qrtr_tx_wait() - flow control for outgoing packets * @node: qrtr_node that the packet is to be send to * @dest_node: node id of the destination * @dest_port: port number of the destination * @type: type of message * * The flow control scheme is based around the low and high "watermarks". When * the low watermark is passed the confirm_rx flag is set on the outgoing * message, which will trigger the remote to send a control message of the type * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit * further transmision should be paused. * * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure */ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port, int type) { unsigned long key = (u64)dest_node << 32 | dest_port; struct qrtr_tx_flow *flow; int confirm_rx = 0; int ret; /* Never set confirm_rx on non-data packets */ if (type != QRTR_TYPE_DATA) return 0; mutex_lock(&node->qrtr_tx_lock); flow = radix_tree_lookup(&node->qrtr_tx_flow, key); if (!flow) { flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (flow) { init_waitqueue_head(&flow->resume_tx); if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) { kfree(flow); flow = NULL; } } } mutex_unlock(&node->qrtr_tx_lock); /* Set confirm_rx if we where unable to find and allocate a flow */ if (!flow) return 1; spin_lock_irq(&flow->resume_tx.lock); ret = wait_event_interruptible_locked_irq(flow->resume_tx, flow->pending < QRTR_TX_FLOW_HIGH || flow->tx_failed || !node->ep); if (ret < 0) { confirm_rx = ret; } else if (!node->ep) { confirm_rx = -EPIPE; } else if (flow->tx_failed) { flow->tx_failed = 0; confirm_rx = 1; } else { flow->pending++; confirm_rx = flow->pending == QRTR_TX_FLOW_LOW; } spin_unlock_irq(&flow->resume_tx.lock); return confirm_rx; } /** * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed * @node: qrtr_node that the packet is to be send to * @dest_node: node id of the destination * @dest_port: port number of the destination * * Signal that the transmission of a message with confirm_rx flag failed. The * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH, * at which point transmission would stall forever waiting for the resume TX * message associated with the dropped confirm_rx message. * Work around this by marking the flow as having a failed transmission and * cause the next transmission attempt to be sent with the confirm_rx. */ static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node, int dest_port) { unsigned long key = (u64)dest_node << 32 | dest_port; struct qrtr_tx_flow *flow; rcu_read_lock(); flow = radix_tree_lookup(&node->qrtr_tx_flow, key); rcu_read_unlock(); if (flow) { spin_lock_irq(&flow->resume_tx.lock); flow->tx_failed = 1; spin_unlock_irq(&flow->resume_tx.lock); } } /* Pass an outgoing packet socket buffer to the endpoint driver. */ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to) { struct qrtr_hdr_v1 *hdr; size_t len = skb->len; int rc, confirm_rx; confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type); if (confirm_rx < 0) { kfree_skb(skb); return confirm_rx; } hdr = skb_push(skb, sizeof(*hdr)); hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); hdr->type = cpu_to_le32(type); hdr->src_node_id = cpu_to_le32(from->sq_node); hdr->src_port_id = cpu_to_le32(from->sq_port); if (to->sq_port == QRTR_PORT_CTRL) { hdr->dst_node_id = cpu_to_le32(node->nid); hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); } else { hdr->dst_node_id = cpu_to_le32(to->sq_node); hdr->dst_port_id = cpu_to_le32(to->sq_port); } hdr->size = cpu_to_le32(len); hdr->confirm_rx = !!confirm_rx; rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); if (!rc) { mutex_lock(&node->ep_lock); rc = -ENODEV; if (node->ep) rc = node->ep->xmit(node->ep, skb); else kfree_skb(skb); mutex_unlock(&node->ep_lock); } /* Need to ensure that a subsequent message carries the otherwise lost * confirm_rx flag if we dropped this one */ if (rc && confirm_rx) qrtr_tx_flow_failed(node, to->sq_node, to->sq_port); return rc; } /* Lookup node by id. * * callers must release with qrtr_node_release() */ static struct qrtr_node *qrtr_node_lookup(unsigned int nid) { struct qrtr_node *node; unsigned long flags; mutex_lock(&qrtr_node_lock); spin_lock_irqsave(&qrtr_nodes_lock, flags); node = radix_tree_lookup(&qrtr_nodes, nid); node = qrtr_node_acquire(node); spin_unlock_irqrestore(&qrtr_nodes_lock, flags); mutex_unlock(&qrtr_node_lock); return node; } /* Assign node id to node. * * This is mostly useful for automatic node id assignment, based on * the source id in the incoming packet. */ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) { unsigned long flags; if (nid == QRTR_EP_NID_AUTO) return; spin_lock_irqsave(&qrtr_nodes_lock, flags); radix_tree_insert(&qrtr_nodes, nid, node); if (node->nid == QRTR_EP_NID_AUTO) node->nid = nid; spin_unlock_irqrestore(&qrtr_nodes_lock, flags); } /** * qrtr_endpoint_post() - post incoming data * @ep: endpoint handle * @data: data pointer * @len: size of data in bytes * * Return: 0 on success; negative error code on failure */ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) { struct qrtr_node *node = ep->node; const struct qrtr_hdr_v1 *v1; const struct qrtr_hdr_v2 *v2; struct qrtr_sock *ipc; struct sk_buff *skb; struct qrtr_cb *cb; size_t size; unsigned int ver; size_t hdrlen; if (len == 0 || len & 3) return -EINVAL; skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN); if (!skb) return -ENOMEM; cb = (struct qrtr_cb *)skb->cb; /* Version field in v1 is little endian, so this works for both cases */ ver = *(u8*)data; switch (ver) { case QRTR_PROTO_VER_1: if (len < sizeof(*v1)) goto err; v1 = data; hdrlen = sizeof(*v1); cb->type = le32_to_cpu(v1->type); cb->src_node = le32_to_cpu(v1->src_node_id); cb->src_port = le32_to_cpu(v1->src_port_id); cb->confirm_rx = !!v1->confirm_rx; cb->dst_node = le32_to_cpu(v1->dst_node_id); cb->dst_port = le32_to_cpu(v1->dst_port_id); size = le32_to_cpu(v1->size); break; case QRTR_PROTO_VER_2: if (len < sizeof(*v2)) goto err; v2 = data; hdrlen = sizeof(*v2) + v2->optlen; cb->type = v2->type; cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX); cb->src_node = le16_to_cpu(v2->src_node_id); cb->src_port = le16_to_cpu(v2->src_port_id); cb->dst_node = le16_to_cpu(v2->dst_node_id); cb->dst_port = le16_to_cpu(v2->dst_port_id); if (cb->src_port == (u16)QRTR_PORT_CTRL) cb->src_port = QRTR_PORT_CTRL; if (cb->dst_port == (u16)QRTR_PORT_CTRL) cb->dst_port = QRTR_PORT_CTRL; size = le32_to_cpu(v2->size); break; default: pr_err("qrtr: Invalid version %d\n", ver); goto err; } if (cb->dst_port == QRTR_PORT_CTRL_LEGACY) cb->dst_port = QRTR_PORT_CTRL; if (!size || len != ALIGN(size, 4) + hdrlen) goto err; if ((cb->type == QRTR_TYPE_NEW_SERVER || cb->type == QRTR_TYPE_RESUME_TX) && size < sizeof(struct qrtr_ctrl_pkt)) goto err; if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && cb->type != QRTR_TYPE_RESUME_TX) goto err; skb_put_data(skb, data + hdrlen, size); qrtr_node_assign(node, cb->src_node); if (cb->type == QRTR_TYPE_NEW_SERVER) { /* Remote node endpoint can bridge other distant nodes */ const struct qrtr_ctrl_pkt *pkt; pkt = data + hdrlen; qrtr_node_assign(node, le32_to_cpu(pkt->server.node)); } if (cb->type == QRTR_TYPE_RESUME_TX) { qrtr_tx_resume(node, skb); } else { ipc = qrtr_port_lookup(cb->dst_port); if (!ipc) goto err; if (sock_queue_rcv_skb(&ipc->sk, skb)) { qrtr_port_put(ipc); goto err; } qrtr_port_put(ipc); } return 0; err: kfree_skb(skb); return -EINVAL; } EXPORT_SYMBOL_GPL(qrtr_endpoint_post); /** * qrtr_alloc_ctrl_packet() - allocate control packet skb * @pkt: reference to qrtr_ctrl_pkt pointer * @flags: the type of memory to allocate * * Returns newly allocated sk_buff, or NULL on failure * * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and * on success returns a reference to the control packet in @pkt. */ static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt, gfp_t flags) { const int pkt_len = sizeof(struct qrtr_ctrl_pkt); struct sk_buff *skb; skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, flags); if (!skb) return NULL; skb_reserve(skb, QRTR_HDR_MAX_SIZE); *pkt = skb_put_zero(skb, pkt_len); return skb; } /** * qrtr_endpoint_register() - register a new endpoint * @ep: endpoint to register * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment * Return: 0 on success; negative error code on failure * * The specified endpoint must have the xmit function pointer set on call. */ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid) { struct qrtr_node *node; if (!ep || !ep->xmit) return -EINVAL; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; kref_init(&node->ref); mutex_init(&node->ep_lock); skb_queue_head_init(&node->rx_queue); node->nid = QRTR_EP_NID_AUTO; node->ep = ep; INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL); mutex_init(&node->qrtr_tx_lock); qrtr_node_assign(node, nid); mutex_lock(&qrtr_node_lock); list_add(&node->item, &qrtr_all_nodes); mutex_unlock(&qrtr_node_lock); ep->node = node; return 0; } EXPORT_SYMBOL_GPL(qrtr_endpoint_register); /** * qrtr_endpoint_unregister - unregister endpoint * @ep: endpoint to unregister */ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep) { struct qrtr_node *node = ep->node; struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL}; struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL}; struct radix_tree_iter iter; struct qrtr_ctrl_pkt *pkt; struct qrtr_tx_flow *flow; struct sk_buff *skb; unsigned long flags; void __rcu **slot; mutex_lock(&node->ep_lock); node->ep = NULL; mutex_unlock(&node->ep_lock); /* Notify the local controller about the event */ spin_lock_irqsave(&qrtr_nodes_lock, flags); radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { if (*slot != node) continue; src.sq_node = iter.index; skb = qrtr_alloc_ctrl_packet(&pkt, GFP_ATOMIC); if (skb) { pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE); qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst); } } spin_unlock_irqrestore(&qrtr_nodes_lock, flags); /* Wake up any transmitters waiting for resume-tx from the node */ mutex_lock(&node->qrtr_tx_lock); radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { flow = *slot; wake_up_interruptible_all(&flow->resume_tx); } mutex_unlock(&node->qrtr_tx_lock); qrtr_node_release(node); ep->node = NULL; } EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister); /* Lookup socket by port. * * Callers must release with qrtr_port_put() */ static struct qrtr_sock *qrtr_port_lookup(int port) { struct qrtr_sock *ipc; if (port == QRTR_PORT_CTRL) port = 0; rcu_read_lock(); ipc = xa_load(&qrtr_ports, port); if (ipc) sock_hold(&ipc->sk); rcu_read_unlock(); return ipc; } /* Release acquired socket. */ static void qrtr_port_put(struct qrtr_sock *ipc) { sock_put(&ipc->sk); } /* Remove port assignment. */ static void qrtr_port_remove(struct qrtr_sock *ipc) { struct qrtr_ctrl_pkt *pkt; struct sk_buff *skb; int port = ipc->us.sq_port; struct sockaddr_qrtr to; to.sq_family = AF_QIPCRTR; to.sq_node = QRTR_NODE_BCAST; to.sq_port = QRTR_PORT_CTRL; skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL); if (skb) { pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); pkt->client.node = cpu_to_le32(ipc->us.sq_node); pkt->client.port = cpu_to_le32(ipc->us.sq_port); skb_set_owner_w(skb, &ipc->sk); qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us, &to); } if (port == QRTR_PORT_CTRL) port = 0; __sock_put(&ipc->sk); xa_erase(&qrtr_ports, port); /* Ensure that if qrtr_port_lookup() did enter the RCU read section we * wait for it to up increment the refcount */ synchronize_rcu(); } /* Assign port number to socket. * * Specify port in the integer pointed to by port, and it will be adjusted * on return as necesssary. * * Port may be: * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET] * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN * >QRTR_MIN_EPH_SOCKET: Specified; available to all */ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) { int rc; if (!*port) { rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE, GFP_KERNEL); } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL); } else { rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL); } if (rc == -EBUSY) return -EADDRINUSE; else if (rc < 0) return rc; sock_hold(&ipc->sk); return 0; } /* Reset all non-control ports */ static void qrtr_reset_ports(void) { struct qrtr_sock *ipc; unsigned long index; rcu_read_lock(); xa_for_each_start(&qrtr_ports, index, ipc, 1) { sock_hold(&ipc->sk); ipc->sk.sk_err = ENETRESET; sk_error_report(&ipc->sk); sock_put(&ipc->sk); } rcu_read_unlock(); } /* Bind socket to address. * * Socket should be locked upon call. */ static int __qrtr_bind(struct socket *sock, const struct sockaddr_qrtr *addr, int zapped) { struct qrtr_sock *ipc = qrtr_sk(sock->sk); struct sock *sk = sock->sk; int port; int rc; /* rebinding ok */ if (!zapped && addr->sq_port == ipc->us.sq_port) return 0; port = addr->sq_port; rc = qrtr_port_assign(ipc, &port); if (rc) return rc; /* unbind previous, if any */ if (!zapped) qrtr_port_remove(ipc); ipc->us.sq_port = port; sock_reset_flag(sk, SOCK_ZAPPED); /* Notify all open ports about the new controller */ if (port == QRTR_PORT_CTRL) qrtr_reset_ports(); return 0; } /* Auto bind to an ephemeral port. */ static int qrtr_autobind(struct socket *sock) { struct sock *sk = sock->sk; struct sockaddr_qrtr addr; if (!sock_flag(sk, SOCK_ZAPPED)) return 0; addr.sq_family = AF_QIPCRTR; addr.sq_node = qrtr_local_nid; addr.sq_port = 0; return __qrtr_bind(sock, &addr, 1); } /* Bind socket to specified sockaddr. */ static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len) { DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); struct qrtr_sock *ipc = qrtr_sk(sock->sk); struct sock *sk = sock->sk; int rc; if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) return -EINVAL; if (addr->sq_node != ipc->us.sq_node) return -EINVAL; lock_sock(sk); rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED)); release_sock(sk); return rc; } /* Queue packet to local peer socket. */ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to) { struct qrtr_sock *ipc; struct qrtr_cb *cb; ipc = qrtr_port_lookup(to->sq_port); if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */ if (ipc) qrtr_port_put(ipc); kfree_skb(skb); return -ENODEV; } cb = (struct qrtr_cb *)skb->cb; cb->src_node = from->sq_node; cb->src_port = from->sq_port; if (sock_queue_rcv_skb(&ipc->sk, skb)) { qrtr_port_put(ipc); kfree_skb(skb); return -ENOSPC; } qrtr_port_put(ipc); return 0; } /* Queue packet for broadcast. */ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to) { struct sk_buff *skbn; mutex_lock(&qrtr_node_lock); list_for_each_entry(node, &qrtr_all_nodes, item) { skbn = pskb_copy(skb, GFP_KERNEL); if (!skbn) break; skb_set_owner_w(skbn, skb->sk); qrtr_node_enqueue(node, skbn, type, from, to); } mutex_unlock(&qrtr_node_lock); qrtr_local_enqueue(NULL, skb, type, from, to); return 0; } static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int, struct sockaddr_qrtr *, struct sockaddr_qrtr *); __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA); struct qrtr_sock *ipc = qrtr_sk(sock->sk); struct sock *sk = sock->sk; struct qrtr_node *node; struct sk_buff *skb; size_t plen; u32 type; int rc; if (msg->msg_flags & ~(MSG_DONTWAIT)) return -EINVAL; if (len > 65535) return -EMSGSIZE; lock_sock(sk); if (addr) { if (msg->msg_namelen < sizeof(*addr)) { release_sock(sk); return -EINVAL; } if (addr->sq_family != AF_QIPCRTR) { release_sock(sk); return -EINVAL; } rc = qrtr_autobind(sock); if (rc) { release_sock(sk); return rc; } } else if (sk->sk_state == TCP_ESTABLISHED) { addr = &ipc->peer; } else { release_sock(sk); return -ENOTCONN; } node = NULL; if (addr->sq_node == QRTR_NODE_BCAST) { if (addr->sq_port != QRTR_PORT_CTRL && qrtr_local_nid != QRTR_NODE_BCAST) { release_sock(sk); return -ENOTCONN; } enqueue_fn = qrtr_bcast_enqueue; } else if (addr->sq_node == ipc->us.sq_node) { enqueue_fn = qrtr_local_enqueue; } else { node = qrtr_node_lookup(addr->sq_node); if (!node) { release_sock(sk); return -ECONNRESET; } enqueue_fn = qrtr_node_enqueue; } plen = (len + 3) & ~3; skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE, msg->msg_flags & MSG_DONTWAIT, &rc); if (!skb) { rc = -ENOMEM; goto out_node; } skb_reserve(skb, QRTR_HDR_MAX_SIZE); rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc) { kfree_skb(skb); goto out_node; } if (ipc->us.sq_port == QRTR_PORT_CTRL) { if (len < 4) { rc = -EINVAL; kfree_skb(skb); goto out_node; } /* control messages already require the type as 'command' */ skb_copy_bits(skb, 0, &qrtr_type, 4); } type = le32_to_cpu(qrtr_type); rc = enqueue_fn(node, skb, type, &ipc->us, addr); if (rc >= 0) rc = len; out_node: qrtr_node_release(node); release_sock(sk); return rc; } static int qrtr_send_resume_tx(struct qrtr_cb *cb) { struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port }; struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port }; struct qrtr_ctrl_pkt *pkt; struct qrtr_node *node; struct sk_buff *skb; int ret; node = qrtr_node_lookup(remote.sq_node); if (!node) return -EINVAL; skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL); if (!skb) return -ENOMEM; pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX); pkt->client.node = cpu_to_le32(cb->dst_node); pkt->client.port = cpu_to_le32(cb->dst_port); ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote); qrtr_node_release(node); return ret; } static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name); struct sock *sk = sock->sk; struct sk_buff *skb; struct qrtr_cb *cb; int copied, rc; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { release_sock(sk); return -EADDRNOTAVAIL; } skb = skb_recv_datagram(sk, flags, &rc); if (!skb) { release_sock(sk); return rc; } cb = (struct qrtr_cb *)skb->cb; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_msg(skb, 0, msg, copied); if (rc < 0) goto out; rc = copied; if (addr) { /* There is an anonymous 2-byte hole after sq_family, * make sure to clear it. */ memset(addr, 0, sizeof(*addr)); addr->sq_family = AF_QIPCRTR; addr->sq_node = cb->src_node; addr->sq_port = cb->src_port; msg->msg_namelen = sizeof(*addr); } out: if (cb->confirm_rx) qrtr_send_resume_tx(cb); skb_free_datagram(sk, skb); release_sock(sk); return rc; } static int qrtr_connect(struct socket *sock, struct sockaddr *saddr, int len, int flags) { DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); struct qrtr_sock *ipc = qrtr_sk(sock->sk); struct sock *sk = sock->sk; int rc; if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR) return -EINVAL; lock_sock(sk); sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rc = qrtr_autobind(sock); if (rc) { release_sock(sk); return rc; } ipc->peer = *addr; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; release_sock(sk); return 0; } static int qrtr_getname(struct socket *sock, struct sockaddr *saddr, int peer) { struct qrtr_sock *ipc = qrtr_sk(sock->sk); struct sockaddr_qrtr qaddr; struct sock *sk = sock->sk; lock_sock(sk); if (peer) { if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } qaddr = ipc->peer; } else { qaddr = ipc->us; } release_sock(sk); qaddr.sq_family = AF_QIPCRTR; memcpy(saddr, &qaddr, sizeof(qaddr)); return sizeof(qaddr); } static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct qrtr_sock *ipc = qrtr_sk(sock->sk); struct sock *sk = sock->sk; struct sockaddr_qrtr *sq; struct sk_buff *skb; struct ifreq ifr; long len = 0; int rc = 0; lock_sock(sk); switch (cmd) { case TIOCOUTQ: len = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (len < 0) len = 0; rc = put_user(len, (int __user *)argp); break; case TIOCINQ: skb = skb_peek(&sk->sk_receive_queue); if (skb) len = skb->len; rc = put_user(len, (int __user *)argp); break; case SIOCGIFADDR: if (get_user_ifreq(&ifr, NULL, argp)) { rc = -EFAULT; break; } sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; *sq = ipc->us; if (put_user_ifreq(&ifr, argp)) { rc = -EFAULT; break; } break; case SIOCADDRT: case SIOCDELRT: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: rc = -EINVAL; break; default: rc = -ENOIOCTLCMD; break; } release_sock(sk); return rc; } static int qrtr_release(struct socket *sock) { struct sock *sk = sock->sk; struct qrtr_sock *ipc; if (!sk) return 0; lock_sock(sk); ipc = qrtr_sk(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_orphan(sk); sock->sk = NULL; if (!sock_flag(sk, SOCK_ZAPPED)) qrtr_port_remove(ipc); skb_queue_purge(&sk->sk_receive_queue); release_sock(sk); sock_put(sk); return 0; } static const struct proto_ops qrtr_proto_ops = { .owner = THIS_MODULE, .family = AF_QIPCRTR, .bind = qrtr_bind, .connect = qrtr_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .listen = sock_no_listen, .sendmsg = qrtr_sendmsg, .recvmsg = qrtr_recvmsg, .getname = qrtr_getname, .ioctl = qrtr_ioctl, .gettstamp = sock_gettstamp, .poll = datagram_poll, .shutdown = sock_no_shutdown, .release = qrtr_release, .mmap = sock_no_mmap, }; static struct proto qrtr_proto = { .name = "QIPCRTR", .owner = THIS_MODULE, .obj_size = sizeof(struct qrtr_sock), }; static int qrtr_create(struct net *net, struct socket *sock, int protocol, int kern) { struct qrtr_sock *ipc; struct sock *sk; if (sock->type != SOCK_DGRAM) return -EPROTOTYPE; sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern); if (!sk) return -ENOMEM; sock_set_flag(sk, SOCK_ZAPPED); sock_init_data(sock, sk); sock->ops = &qrtr_proto_ops; ipc = qrtr_sk(sk); ipc->us.sq_family = AF_QIPCRTR; ipc->us.sq_node = qrtr_local_nid; ipc->us.sq_port = 0; return 0; } static const struct net_proto_family qrtr_family = { .owner = THIS_MODULE, .family = AF_QIPCRTR, .create = qrtr_create, }; static int __init qrtr_proto_init(void) { int rc; rc = proto_register(&qrtr_proto, 1); if (rc) return rc; rc = sock_register(&qrtr_family); if (rc) goto err_proto; rc = qrtr_ns_init(); if (rc) goto err_sock; return 0; err_sock: sock_unregister(qrtr_family.family); err_proto: proto_unregister(&qrtr_proto); return rc; } postcore_initcall(qrtr_proto_init); static void __exit qrtr_proto_fini(void) { qrtr_ns_remove(); sock_unregister(qrtr_family.family); proto_unregister(&qrtr_proto); } module_exit(qrtr_proto_fini); MODULE_DESCRIPTION("Qualcomm IPC-router driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_NETPROTO(PF_QIPCRTR);
70 81 71 71 70 70 70 70 71 5 5 5 5 86 86 86 44 44 44 11 11 11 11 11 2 2 2 99 99 99 46 46 46 106 105 104 105 107 101 100 100 101 2 1 1 2 4 3 3 4 86 86 3 86 3 86 86 86 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/proc/net.c * * Copyright (C) 2007 * * Author: Eric Biederman <ebiederm@xmission.com> * * proc net directory handling functions */ #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> #include <linux/nsproxy.h> #include <linux/uidgid.h> #include <net/net_namespace.h> #include <linux/seq_file.h> #include "internal.h" static inline struct net *PDE_NET(struct proc_dir_entry *pde) { return pde->parent->data; } static struct net *get_proc_net(const struct inode *inode) { return maybe_get_net(PDE_NET(PDE(inode))); } static int seq_open_net(struct inode *inode, struct file *file) { unsigned int state_size = PDE(inode)->state_size; struct seq_net_private *p; struct net *net; WARN_ON_ONCE(state_size < sizeof(*p)); if (file->f_mode & FMODE_WRITE && !PDE(inode)->write) return -EACCES; net = get_proc_net(inode); if (!net) return -ENXIO; p = __seq_open_private(file, PDE(inode)->seq_ops, state_size); if (!p) { put_net(net); return -ENOMEM; } #ifdef CONFIG_NET_NS p->net = net; netns_tracker_alloc(net, &p->ns_tracker, GFP_KERNEL); #endif return 0; } static void seq_file_net_put_net(struct seq_file *seq) { #ifdef CONFIG_NET_NS struct seq_net_private *priv = seq->private; put_net_track(priv->net, &priv->ns_tracker); #else put_net(&init_net); #endif } static int seq_release_net(struct inode *ino, struct file *f) { struct seq_file *seq = f->private_data; seq_file_net_put_net(seq); seq_release_private(ino, f); return 0; } static const struct proc_ops proc_net_seq_ops = { .proc_open = seq_open_net, .proc_read = seq_read, .proc_write = proc_simple_write, .proc_lseek = seq_lseek, .proc_release = seq_release_net, }; int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux) { #ifdef CONFIG_NET_NS struct seq_net_private *p = priv_data; p->net = get_net_track(current->nsproxy->net_ns, &p->ns_tracker, GFP_KERNEL); #endif return 0; } void bpf_iter_fini_seq_net(void *priv_data) { #ifdef CONFIG_NET_NS struct seq_net_private *p = priv_data; put_net_track(p->net, &p->ns_tracker); #endif } struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, unsigned int state_size, void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; pde_force_lookup(p); p->proc_ops = &proc_net_seq_ops; p->seq_ops = ops; p->state_size = state_size; return proc_register(parent, p); } EXPORT_SYMBOL_GPL(proc_create_net_data); /** * proc_create_net_data_write - Create a writable net_ns-specific proc file * @name: The name of the file. * @mode: The file's access mode. * @parent: The parent directory in which to create. * @ops: The seq_file ops with which to read the file. * @write: The write method with which to 'modify' the file. * @state_size: The size of the per-file private state to allocate. * @data: Data for retrieval by pde_data(). * * Create a network namespaced proc file in the @parent directory with the * specified @name and @mode that allows reading of a file that displays a * series of elements and also provides for the file accepting writes that have * some arbitrary effect. * * The functions in the @ops table are used to iterate over items to be * presented and extract the readable content using the seq_file interface. * * The @write function is called with the data copied into a kernel space * scratch buffer and has a NUL appended for convenience. The buffer may be * modified by the @write function. @write should return 0 on success. * * The @data value is accessible from the @show and @write functions by calling * pde_data() on the file inode. The network namespace must be accessed by * calling seq_file_net() on the seq_file struct. */ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, proc_write_t write, unsigned int state_size, void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; pde_force_lookup(p); p->proc_ops = &proc_net_seq_ops; p->seq_ops = ops; p->state_size = state_size; p->write = write; return proc_register(parent, p); } EXPORT_SYMBOL_GPL(proc_create_net_data_write); static int single_open_net(struct inode *inode, struct file *file) { struct proc_dir_entry *de = PDE(inode); struct net *net; int err; net = get_proc_net(inode); if (!net) return -ENXIO; err = single_open(file, de->single_show, net); if (err) put_net(net); return err; } static int single_release_net(struct inode *ino, struct file *f) { struct seq_file *seq = f->private_data; put_net(seq->private); return single_release(ino, f); } static const struct proc_ops proc_net_single_ops = { .proc_open = single_open_net, .proc_read = seq_read, .proc_write = proc_simple_write, .proc_lseek = seq_lseek, .proc_release = single_release_net, }; struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, struct proc_dir_entry *parent, int (*show)(struct seq_file *, void *), void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; pde_force_lookup(p); p->proc_ops = &proc_net_single_ops; p->single_show = show; return proc_register(parent, p); } EXPORT_SYMBOL_GPL(proc_create_net_single); /** * proc_create_net_single_write - Create a writable net_ns-specific proc file * @name: The name of the file. * @mode: The file's access mode. * @parent: The parent directory in which to create. * @show: The seqfile show method with which to read the file. * @write: The write method with which to 'modify' the file. * @data: Data for retrieval by pde_data(). * * Create a network-namespaced proc file in the @parent directory with the * specified @name and @mode that allows reading of a file that displays a * single element rather than a series and also provides for the file accepting * writes that have some arbitrary effect. * * The @show function is called to extract the readable content via the * seq_file interface. * * The @write function is called with the data copied into a kernel space * scratch buffer and has a NUL appended for convenience. The buffer may be * modified by the @write function. @write should return 0 on success. * * The @data value is accessible from the @show and @write functions by calling * pde_data() on the file inode. The network namespace must be accessed by * calling seq_file_single_net() on the seq_file struct. */ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode, struct proc_dir_entry *parent, int (*show)(struct seq_file *, void *), proc_write_t write, void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; pde_force_lookup(p); p->proc_ops = &proc_net_single_ops; p->single_show = show; p->write = write; return proc_register(parent, p); } EXPORT_SYMBOL_GPL(proc_create_net_single_write); static struct net *get_proc_task_net(struct inode *dir) { struct task_struct *task; struct nsproxy *ns; struct net *net = NULL; rcu_read_lock(); task = pid_task(proc_pid(dir), PIDTYPE_PID); if (task != NULL) { task_lock(task); ns = task->nsproxy; if (ns != NULL) net = get_net(ns->net_ns); task_unlock(task); } rcu_read_unlock(); return net; } static struct dentry *proc_tgid_net_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct dentry *de; struct net *net; de = ERR_PTR(-ENOENT); net = get_proc_task_net(dir); if (net != NULL) { de = proc_lookup_de(dir, dentry, net->proc_net); put_net(net); } return de; } static int proc_tgid_net_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct net *net; net = get_proc_task_net(inode); generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); if (net != NULL) { stat->nlink = net->proc_net->nlink; put_net(net); } return 0; } const struct inode_operations proc_net_inode_operations = { .lookup = proc_tgid_net_lookup, .getattr = proc_tgid_net_getattr, .setattr = proc_setattr, }; static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx) { int ret; struct net *net; ret = -EINVAL; net = get_proc_task_net(file_inode(file)); if (net != NULL) { ret = proc_readdir_de(file, ctx, net->proc_net); put_net(net); } return ret; } const struct file_operations proc_net_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = proc_tgid_net_readdir, }; static __net_init int proc_net_ns_init(struct net *net) { struct proc_dir_entry *netd, *net_statd; kuid_t uid; kgid_t gid; int err; /* * This PDE acts only as an anchor for /proc/${pid}/net hierarchy. * Corresponding inode (PDE(inode) == net->proc_net) is never * instantiated therefore blanket zeroing is fine. * net->proc_net_stat inode is instantiated normally. */ err = -ENOMEM; netd = kmem_cache_zalloc(proc_dir_entry_cache, GFP_KERNEL); if (!netd) goto out; netd->subdir = RB_ROOT; netd->data = net; netd->nlink = 2; netd->namelen = 3; netd->parent = &proc_root; netd->name = netd->inline_name; memcpy(netd->name, "net", 4); uid = make_kuid(net->user_ns, 0); if (!uid_valid(uid)) uid = netd->uid; gid = make_kgid(net->user_ns, 0); if (!gid_valid(gid)) gid = netd->gid; proc_set_user(netd, uid, gid); /* Seed dentry revalidation for /proc/${pid}/net */ pde_force_lookup(netd); err = -EEXIST; net_statd = proc_net_mkdir(net, "stat", netd); if (!net_statd) goto free_net; net->proc_net = netd; net->proc_net_stat = net_statd; return 0; free_net: pde_free(netd); out: return err; } static __net_exit void proc_net_ns_exit(struct net *net) { remove_proc_entry("stat", net->proc_net); pde_free(net->proc_net); } static struct pernet_operations __net_initdata proc_net_ns_ops = { .init = proc_net_ns_init, .exit = proc_net_ns_exit, }; int __init proc_net_init(void) { proc_symlink("net", NULL, "self/net"); return register_pernet_subsys(&proc_net_ns_ops); }
363 361 363 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 // SPDX-License-Identifier: GPL-2.0 /* Lock down the kernel * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/security.h> #include <linux/export.h> #include <linux/lsm_hooks.h> #include <uapi/linux/lsm.h> static enum lockdown_reason kernel_locked_down; static const enum lockdown_reason lockdown_levels[] = {LOCKDOWN_NONE, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_CONFIDENTIALITY_MAX}; /* * Put the kernel into lock-down mode. */ static int lock_kernel_down(const char *where, enum lockdown_reason level) { if (kernel_locked_down >= level) return -EPERM; kernel_locked_down = level; pr_notice("Kernel is locked down from %s; see man kernel_lockdown.7\n", where); return 0; } static int __init lockdown_param(char *level) { if (!level) return -EINVAL; if (strcmp(level, "integrity") == 0) lock_kernel_down("command line", LOCKDOWN_INTEGRITY_MAX); else if (strcmp(level, "confidentiality") == 0) lock_kernel_down("command line", LOCKDOWN_CONFIDENTIALITY_MAX); else return -EINVAL; return 0; } early_param("lockdown", lockdown_param); /** * lockdown_is_locked_down - Find out if the kernel is locked down * @what: Tag to use in notice generated if lockdown is in effect */ static int lockdown_is_locked_down(enum lockdown_reason what) { if (WARN(what >= LOCKDOWN_CONFIDENTIALITY_MAX, "Invalid lockdown reason")) return -EPERM; if (kernel_locked_down >= what) { if (lockdown_reasons[what]) pr_notice_ratelimited("Lockdown: %s: %s is restricted; see man kernel_lockdown.7\n", current->comm, lockdown_reasons[what]); return -EPERM; } return 0; } static struct security_hook_list lockdown_hooks[] __ro_after_init = { LSM_HOOK_INIT(locked_down, lockdown_is_locked_down), }; static const struct lsm_id lockdown_lsmid = { .name = "lockdown", .id = LSM_ID_LOCKDOWN, }; static int __init lockdown_lsm_init(void) { #if defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY) lock_kernel_down("Kernel configuration", LOCKDOWN_INTEGRITY_MAX); #elif defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY) lock_kernel_down("Kernel configuration", LOCKDOWN_CONFIDENTIALITY_MAX); #endif security_add_hooks(lockdown_hooks, ARRAY_SIZE(lockdown_hooks), &lockdown_lsmid); return 0; } static ssize_t lockdown_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char temp[80] = ""; int i, offset = 0; for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) { enum lockdown_reason level = lockdown_levels[i]; if (lockdown_reasons[level]) { const char *label = lockdown_reasons[level]; if (kernel_locked_down == level) offset += sprintf(temp+offset, "[%s] ", label); else offset += sprintf(temp+offset, "%s ", label); } } /* Convert the last space to a newline if needed. */ if (offset > 0) temp[offset-1] = '\n'; return simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); } static ssize_t lockdown_write(struct file *file, const char __user *buf, size_t n, loff_t *ppos) { char *state; int i, len, err = -EINVAL; state = memdup_user_nul(buf, n); if (IS_ERR(state)) return PTR_ERR(state); len = strlen(state); if (len && state[len-1] == '\n') { state[len-1] = '\0'; len--; } for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) { enum lockdown_reason level = lockdown_levels[i]; const char *label = lockdown_reasons[level]; if (label && !strcmp(state, label)) err = lock_kernel_down("securityfs", level); } kfree(state); return err ? err : n; } static const struct file_operations lockdown_ops = { .read = lockdown_read, .write = lockdown_write, }; static int __init lockdown_secfs_init(void) { struct dentry *dentry; dentry = securityfs_create_file("lockdown", 0644, NULL, NULL, &lockdown_ops); return PTR_ERR_OR_ZERO(dentry); } core_initcall(lockdown_secfs_init); #ifdef CONFIG_SECURITY_LOCKDOWN_LSM_EARLY DEFINE_EARLY_LSM(lockdown) = { #else DEFINE_LSM(lockdown) = { #endif .name = "lockdown", .init = lockdown_lsm_init, };
3 3 2 3 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 // SPDX-License-Identifier: GPL-2.0 /* Some of this code is credited to Linux USB open source files that are distributed with Linux. Copyright: 2007 Metrologic Instruments. All rights reserved. Copyright: 2011 Azimut Ltd. <http://azimutrzn.ru/> */ #include <linux/kernel.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb/serial.h> #define DRIVER_DESC "Metrologic Instruments Inc. - USB-POS driver" /* Product information. */ #define FOCUS_VENDOR_ID 0x0C2E #define FOCUS_PRODUCT_ID_BI 0x0720 #define FOCUS_PRODUCT_ID_UNI 0x0700 #define METROUSB_SET_REQUEST_TYPE 0x40 #define METROUSB_SET_MODEM_CTRL_REQUEST 10 #define METROUSB_SET_BREAK_REQUEST 0x40 #define METROUSB_MCR_NONE 0x08 /* Deactivate DTR and RTS. */ #define METROUSB_MCR_RTS 0x0a /* Activate RTS. */ #define METROUSB_MCR_DTR 0x09 /* Activate DTR. */ #define WDR_TIMEOUT 5000 /* default urb timeout. */ /* Private data structure. */ struct metrousb_private { spinlock_t lock; int throttled; unsigned long control_state; }; /* Device table list. */ static const struct usb_device_id id_table[] = { { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) }, { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) }, { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */ { }, /* Terminating entry. */ }; MODULE_DEVICE_TABLE(usb, id_table); /* UNI-Directional mode commands for device configure */ #define UNI_CMD_OPEN 0x80 #define UNI_CMD_CLOSE 0xFF static int metrousb_is_unidirectional_mode(struct usb_serial *serial) { u16 product_id = le16_to_cpu(serial->dev->descriptor.idProduct); return product_id == FOCUS_PRODUCT_ID_UNI; } static int metrousb_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { if (metrousb_is_unidirectional_mode(serial)) { if (epds->num_interrupt_out == 0) { dev_err(&serial->interface->dev, "interrupt-out endpoint missing\n"); return -ENODEV; } } return 1; } static int metrousb_send_unidirectional_cmd(u8 cmd, struct usb_serial_port *port) { int ret; int actual_len; u8 *buffer_cmd = NULL; if (!metrousb_is_unidirectional_mode(port->serial)) return 0; buffer_cmd = kzalloc(sizeof(cmd), GFP_KERNEL); if (!buffer_cmd) return -ENOMEM; *buffer_cmd = cmd; ret = usb_interrupt_msg(port->serial->dev, usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress), buffer_cmd, sizeof(cmd), &actual_len, USB_CTRL_SET_TIMEOUT); kfree(buffer_cmd); if (ret < 0) return ret; else if (actual_len != sizeof(cmd)) return -EIO; return 0; } static void metrousb_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; unsigned long flags; int throttled = 0; int result = 0; dev_dbg(&port->dev, "%s\n", __func__); switch (urb->status) { case 0: /* Success status, read from the port. */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* urb has been terminated. */ dev_dbg(&port->dev, "%s - urb shutting down, error code=%d\n", __func__, urb->status); return; default: dev_dbg(&port->dev, "%s - non-zero urb received, error code=%d\n", __func__, urb->status); goto exit; } /* Set the data read from the usb port into the serial port buffer. */ if (urb->actual_length) { /* Loop through the data copying each byte to the tty layer. */ tty_insert_flip_string(&port->port, data, urb->actual_length); /* Force the data to the tty layer. */ tty_flip_buffer_push(&port->port); } /* Set any port variables. */ spin_lock_irqsave(&metro_priv->lock, flags); throttled = metro_priv->throttled; spin_unlock_irqrestore(&metro_priv->lock, flags); if (throttled) return; exit: /* Try to resubmit the urb. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&port->dev, "%s - failed submitting interrupt in urb, error code=%d\n", __func__, result); } static void metrousb_cleanup(struct usb_serial_port *port) { usb_kill_urb(port->interrupt_in_urb); metrousb_send_unidirectional_cmd(UNI_CMD_CLOSE, port); } static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; int result = 0; /* Set the private data information for the port. */ spin_lock_irqsave(&metro_priv->lock, flags); metro_priv->control_state = 0; metro_priv->throttled = 0; spin_unlock_irqrestore(&metro_priv->lock, flags); /* Clear the urb pipe. */ usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe); /* Start reading from the device */ usb_fill_int_urb(port->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, metrousb_read_int_callback, port, 1); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting interrupt in urb, error code=%d\n", __func__, result); return result; } /* Send activate cmd to device */ result = metrousb_send_unidirectional_cmd(UNI_CMD_OPEN, port); if (result) { dev_err(&port->dev, "%s - failed to configure device, error code=%d\n", __func__, result); goto err_kill_urb; } return 0; err_kill_urb: usb_kill_urb(port->interrupt_in_urb); return result; } static int metrousb_set_modem_ctrl(struct usb_serial *serial, unsigned int control_state) { int retval = 0; unsigned char mcr = METROUSB_MCR_NONE; dev_dbg(&serial->dev->dev, "%s - control state = %d\n", __func__, control_state); /* Set the modem control value. */ if (control_state & TIOCM_DTR) mcr |= METROUSB_MCR_DTR; if (control_state & TIOCM_RTS) mcr |= METROUSB_MCR_RTS; /* Send the command to the usb port. */ retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), METROUSB_SET_REQUEST_TYPE, METROUSB_SET_MODEM_CTRL_REQUEST, control_state, 0, NULL, 0, WDR_TIMEOUT); if (retval < 0) dev_err(&serial->dev->dev, "%s - set modem ctrl=0x%x failed, error code=%d\n", __func__, mcr, retval); return retval; } static int metrousb_port_probe(struct usb_serial_port *port) { struct metrousb_private *metro_priv; metro_priv = kzalloc(sizeof(*metro_priv), GFP_KERNEL); if (!metro_priv) return -ENOMEM; spin_lock_init(&metro_priv->lock); usb_set_serial_port_data(port, metro_priv); return 0; } static void metrousb_port_remove(struct usb_serial_port *port) { struct metrousb_private *metro_priv; metro_priv = usb_get_serial_port_data(port); kfree(metro_priv); } static void metrousb_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; /* Set the private information for the port to stop reading data. */ spin_lock_irqsave(&metro_priv->lock, flags); metro_priv->throttled = 1; spin_unlock_irqrestore(&metro_priv->lock, flags); } static int metrousb_tiocmget(struct tty_struct *tty) { unsigned long control_state = 0; struct usb_serial_port *port = tty->driver_data; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&metro_priv->lock, flags); control_state = metro_priv->control_state; spin_unlock_irqrestore(&metro_priv->lock, flags); return control_state; } static int metrousb_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; unsigned long control_state = 0; dev_dbg(&port->dev, "%s - set=%d, clear=%d\n", __func__, set, clear); spin_lock_irqsave(&metro_priv->lock, flags); control_state = metro_priv->control_state; /* Set the RTS and DTR values. */ if (set & TIOCM_RTS) control_state |= TIOCM_RTS; if (set & TIOCM_DTR) control_state |= TIOCM_DTR; if (clear & TIOCM_RTS) control_state &= ~TIOCM_RTS; if (clear & TIOCM_DTR) control_state &= ~TIOCM_DTR; metro_priv->control_state = control_state; spin_unlock_irqrestore(&metro_priv->lock, flags); return metrousb_set_modem_ctrl(serial, control_state); } static void metrousb_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; int result = 0; /* Set the private information for the port to resume reading data. */ spin_lock_irqsave(&metro_priv->lock, flags); metro_priv->throttled = 0; spin_unlock_irqrestore(&metro_priv->lock, flags); /* Submit the urb to read from the port. */ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (result) dev_err(&port->dev, "failed submitting interrupt in urb error code=%d\n", result); } static struct usb_serial_driver metrousb_device = { .driver = { .name = "metro-usb", }, .description = "Metrologic USB to Serial", .id_table = id_table, .num_interrupt_in = 1, .calc_num_ports = metrousb_calc_num_ports, .open = metrousb_open, .close = metrousb_cleanup, .read_int_callback = metrousb_read_int_callback, .port_probe = metrousb_port_probe, .port_remove = metrousb_port_remove, .throttle = metrousb_throttle, .unthrottle = metrousb_unthrottle, .tiocmget = metrousb_tiocmget, .tiocmset = metrousb_tiocmset, }; static struct usb_serial_driver * const serial_drivers[] = { &metrousb_device, NULL, }; module_usb_serial_driver(serial_drivers, id_table); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Philip Nicastro"); MODULE_AUTHOR("Aleksey Babahin <tamerlan311@gmail.com>"); MODULE_DESCRIPTION(DRIVER_DESC);
1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 // SPDX-License-Identifier: GPL-2.0-or-later /* * Nano River Technologies viperboard i2c controller driver * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <poeschel@lemonage.de> * All rights reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/mfd/viperboard.h> struct vprbrd_i2c { struct i2c_adapter i2c; u8 bus_freq_param; }; /* i2c bus frequency module parameter */ static u8 i2c_bus_param; static unsigned int i2c_bus_freq = 100; module_param(i2c_bus_freq, int, 0); MODULE_PARM_DESC(i2c_bus_freq, "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000"); static int vprbrd_i2c_status(struct i2c_adapter *i2c, struct vprbrd_i2c_status *status, bool prev_error) { u16 bytes_xfer; int ret; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; /* check for protocol error */ bytes_xfer = sizeof(struct vprbrd_i2c_status); ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS); if (ret != bytes_xfer) prev_error = true; if (prev_error) { dev_err(&i2c->dev, "failure in usb communication\n"); return -EREMOTEIO; } dev_dbg(&i2c->dev, " status = %d\n", status->status); if (status->status != 0x00) { dev_err(&i2c->dev, "failure: i2c protocol error\n"); return -EPROTO; } return 0; } static int vprbrd_i2c_receive(struct usb_device *usb_dev, struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer) { int ret, bytes_actual; int error = 0; /* send the read request */ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg, sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); error = -EREMOTEIO; } /* read the actual data */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) { dev_err(&usb_dev->dev, "failure receiving usb\n"); error = -EREMOTEIO; } return error; } static int vprbrd_i2c_addr(struct usb_device *usb_dev, struct vprbrd_i2c_addr_msg *amsg) { int ret, bytes_actual; ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg, sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); return -EREMOTEIO; } return 0; } static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg) { int ret; u16 remain_len, len1, len2, start = 0x0000; struct vprbrd_i2c_read_msg *rmsg = (struct vprbrd_i2c_read_msg *)vb->buf; remain_len = msg->len; rmsg->header.cmd = VPRBRD_I2C_CMD_READ; while (remain_len > 0) { rmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len <= 255) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len; rmsg->header.len1 = 0x00; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 510) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 255; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 512) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 510; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 767) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 512; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1022) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 767; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1024) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 1022; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len = 0; } else { len1 = 512; len2 = 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x02; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len -= 1024; start += 1024; } rmsg->header.tf1 = cpu_to_le16(len1); rmsg->header.tf2 = cpu_to_le16(len2); /* first read transfer */ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start, rmsg, len1); /* second read transfer if neccessary */ if (len2 > 0) { ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start + 512, rmsg, len2); } } return 0; } static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg) { int ret, bytes_actual; u16 remain_len, bytes_xfer, start = 0x0000; struct vprbrd_i2c_write_msg *wmsg = (struct vprbrd_i2c_write_msg *)vb->buf; remain_len = msg->len; wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE; wmsg->header.last = 0x00; wmsg->header.chan = 0x00; wmsg->header.spi = 0x0000; while (remain_len > 0) { wmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len > 503) { wmsg->header.len1 = 0xff; wmsg->header.len2 = 0xf8; remain_len -= 503; bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr); start += 503; } else if (remain_len > 255) { wmsg->header.len1 = 0xff; wmsg->header.len2 = (remain_len - 255); bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } else { wmsg->header.len1 = remain_len; wmsg->header.len2 = 0x00; bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } memcpy(wmsg->data, msg->buf + start, bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr)); ret = usb_bulk_msg(vb->usb_dev, usb_sndbulkpipe(vb->usb_dev, VPRBRD_EP_OUT), wmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) return -EREMOTEIO; } return 0; } static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int i, ret, error = 0; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; struct vprbrd_i2c_addr_msg *amsg = (struct vprbrd_i2c_addr_msg *)vb->buf; struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf; for (i = 0 ; i < num ; i++) { pmsg = &msgs[i]; dev_dbg(&i2c->dev, " %d: %s (flags %d) %d bytes to 0x%02x\n", i, pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->flags, pmsg->len, pmsg->addr); mutex_lock(&vb->lock); /* directly send the message */ if (pmsg->flags & I2C_M_RD) { /* read data */ amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x01; amsg->len = cpu_to_le16(pmsg->len); /* send the addr and len, we're interested to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_read(vb, pmsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; /* in case of protocol error, return the error */ if (error < 0) goto error; } else { /* write data */ ret = vprbrd_i2c_write(vb, pmsg); amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x00; amsg->len = cpu_to_le16(pmsg->len); /* send the addr, the data goes to to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; if (error < 0) goto error; } mutex_unlock(&vb->lock); } return num; error: mutex_unlock(&vb->lock); return error; } static u32 vprbrd_i2c_func(struct i2c_adapter *i2c) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } /* This is the actual algorithm we define */ static const struct i2c_algorithm vprbrd_algorithm = { .xfer = vprbrd_i2c_xfer, .functionality = vprbrd_i2c_func, }; static const struct i2c_adapter_quirks vprbrd_quirks = { .max_read_len = 2048, .max_write_len = 2048, }; static int vprbrd_i2c_probe(struct platform_device *pdev) { struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent); struct vprbrd_i2c *vb_i2c; int ret; int pipe; vb_i2c = devm_kzalloc(&pdev->dev, sizeof(*vb_i2c), GFP_KERNEL); if (vb_i2c == NULL) return -ENOMEM; /* setup i2c adapter description */ vb_i2c->i2c.owner = THIS_MODULE; vb_i2c->i2c.class = I2C_CLASS_HWMON; vb_i2c->i2c.algo = &vprbrd_algorithm; vb_i2c->i2c.quirks = &vprbrd_quirks; vb_i2c->i2c.algo_data = vb; /* save the param in usb capabable memory */ vb_i2c->bus_freq_param = i2c_bus_param; snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name), "viperboard at bus %03d device %03d", vb->usb_dev->bus->busnum, vb->usb_dev->devnum); /* setting the bus frequency */ if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ) && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) { pipe = usb_sndctrlpipe(vb->usb_dev, 0); ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1, VPRBRD_USB_TIMEOUT_MS); if (ret != 1) { dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n", i2c_bus_freq); return -EIO; } } else { dev_err(&pdev->dev, "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq); return -EIO; } vb_i2c->i2c.dev.parent = &pdev->dev; /* attach to i2c layer */ i2c_add_adapter(&vb_i2c->i2c); platform_set_drvdata(pdev, vb_i2c); return 0; } static void vprbrd_i2c_remove(struct platform_device *pdev) { struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev); i2c_del_adapter(&vb_i2c->i2c); } static struct platform_driver vprbrd_i2c_driver = { .driver.name = "viperboard-i2c", .probe = vprbrd_i2c_probe, .remove = vprbrd_i2c_remove, }; static int __init vprbrd_i2c_init(void) { switch (i2c_bus_freq) { case 6000: i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ; break; case 3000: i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ; break; case 1000: i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ; break; case 400: i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ; break; case 200: i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ; break; case 100: i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; break; case 10: i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ; break; default: pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq); i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; } return platform_driver_register(&vprbrd_i2c_driver); } subsys_initcall(vprbrd_i2c_init); static void __exit vprbrd_i2c_exit(void) { platform_driver_unregister(&vprbrd_i2c_driver); } module_exit(vprbrd_i2c_exit); MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>"); MODULE_DESCRIPTION("I2C controller driver for Nano River Techs Viperboard"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:viperboard-i2c");
2 2 2 121 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SOCK_REUSEPORT_H #define _SOCK_REUSEPORT_H #include <linux/filter.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/spinlock.h> #include <net/sock.h> extern spinlock_t reuseport_lock; struct sock_reuseport { struct rcu_head rcu; u16 max_socks; /* length of socks */ u16 num_socks; /* elements in socks */ u16 num_closed_socks; /* closed elements in socks */ u16 incoming_cpu; /* The last synq overflow event timestamp of this * reuse->socks[] group. */ unsigned int synq_overflow_ts; /* ID stays the same even after the size of socks[] grows. */ unsigned int reuseport_id; unsigned int bind_inany:1; unsigned int has_conns:1; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ struct sock *socks[] __counted_by(max_socks); }; extern int reuseport_alloc(struct sock *sk, bool bind_inany); extern int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany); extern void reuseport_detach_sock(struct sock *sk); void reuseport_stop_listen_sock(struct sock *sk); extern struct sock *reuseport_select_sock(struct sock *sk, u32 hash, struct sk_buff *skb, int hdr_len); struct sock *reuseport_migrate_sock(struct sock *sk, struct sock *migrating_sk, struct sk_buff *skb); extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); static inline bool reuseport_has_conns(struct sock *sk) { struct sock_reuseport *reuse; bool ret = false; rcu_read_lock(); reuse = rcu_dereference(sk->sk_reuseport_cb); if (reuse && reuse->has_conns) ret = true; rcu_read_unlock(); return ret; } void reuseport_has_conns_set(struct sock *sk); void reuseport_update_incoming_cpu(struct sock *sk, int val); #endif /* _SOCK_REUSEPORT_H */
8320 8299 8302 6558 6053 5288 614 4419 4422 4417 1097 616 614 617 619 1096 1085 1082 1039 1038 1038 1036 1039 1040 1036 1036 62 62 62 62 6182 16 6183 4031 6182 28 4974 6176 6139 6154 6149 4036 2957 5288 5289 5286 5279 5277 6140 5288 3116 3187 6071 6075 6090 6161 6150 5286 5299 33 43 18 6129 1854 266 265 266 266 266 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 // SPDX-License-Identifier: GPL-2.0-only /* * Generic helpers for smp ipi calls * * (C) Jens Axboe <jens.axboe@oracle.com> 2008 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/irq_work.h> #include <linux/rcupdate.h> #include <linux/rculist.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/sched/idle.h> #include <linux/hypervisor.h> #include <linux/sched/clock.h> #include <linux/nmi.h> #include <linux/sched/debug.h> #include <linux/jump_label.h> #include <linux/string_choices.h> #include <trace/events/ipi.h> #define CREATE_TRACE_POINTS #include <trace/events/csd.h> #undef CREATE_TRACE_POINTS #include "smpboot.h" #include "sched/smp.h" #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) struct call_function_data { call_single_data_t __percpu *csd; cpumask_var_t cpumask; cpumask_var_t cpumask_ipi; }; static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data); static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1); static void __flush_smp_call_function_queue(bool warn_cpu_offline); int smpcfd_prepare_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return -ENOMEM; if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, cpu_to_node(cpu))) { free_cpumask_var(cfd->cpumask); return -ENOMEM; } cfd->csd = alloc_percpu(call_single_data_t); if (!cfd->csd) { free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); return -ENOMEM; } return 0; } int smpcfd_dead_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); free_percpu(cfd->csd); return 0; } int smpcfd_dying_cpu(unsigned int cpu) { /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ __flush_smp_call_function_queue(false); irq_work_run(); return 0; } void __init call_function_init(void) { int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); smpcfd_prepare_cpu(smp_processor_id()); } static __always_inline void send_call_function_single_ipi(int cpu) { if (call_function_single_prep_ipi(cpu)) { trace_ipi_send_cpu(cpu, _RET_IP_, generic_smp_call_function_single_interrupt); arch_send_call_function_single_ipi(cpu); } } static __always_inline void send_call_function_ipi_mask(struct cpumask *mask) { trace_ipi_send_cpumask(mask, _RET_IP_, generic_smp_call_function_single_interrupt); arch_send_call_function_ipi_mask(mask); } static __always_inline void csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd) { trace_csd_function_entry(func, csd); func(info); trace_csd_function_exit(func, csd); } #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled); /* * Parse the csdlock_debug= kernel boot parameter. * * If you need to restore the old "ext" value that once provided * additional debugging information, reapply the following commits: * * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging") * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging") */ static int __init csdlock_debug(char *str) { int ret; unsigned int val = 0; ret = get_option(&str, &val); if (ret) { if (val) static_branch_enable(&csdlock_debug_enabled); else static_branch_disable(&csdlock_debug_enabled); } return 1; } __setup("csdlock_debug=", csdlock_debug); static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(void *, cur_csd_info); static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */ module_param(csd_lock_timeout, ulong, 0644); static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */ module_param(panic_on_ipistall, int, 0644); static atomic_t csd_bug_count = ATOMIC_INIT(0); /* Record current CSD work for current CPU, NULL to erase. */ static void __csd_lock_record(call_single_data_t *csd) { if (!csd) { smp_mb(); /* NULL cur_csd after unlock. */ __this_cpu_write(cur_csd, NULL); return; } __this_cpu_write(cur_csd_func, csd->func); __this_cpu_write(cur_csd_info, csd->info); smp_wmb(); /* func and info before csd. */ __this_cpu_write(cur_csd, csd); smp_mb(); /* Update cur_csd before function call. */ /* Or before unlock, as the case may be. */ } static __always_inline void csd_lock_record(call_single_data_t *csd) { if (static_branch_unlikely(&csdlock_debug_enabled)) __csd_lock_record(csd); } static int csd_lock_wait_getcpu(call_single_data_t *csd) { unsigned int csd_type; csd_type = CSD_TYPE(csd); if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC) return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ return -1; } static atomic_t n_csd_lock_stuck; /** * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long? * * Returns @true if a CSD-lock acquisition is stuck and has been stuck * long enough for a "non-responsive CSD lock" message to be printed. */ bool csd_lock_is_stuck(void) { return !!atomic_read(&n_csd_lock_stuck); } /* * Complain if too much time spent waiting. Note that only * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, * so waiting on other types gets much less information. */ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages) { int cpu = -1; int cpux; bool firsttime; u64 ts2, ts_delta; call_single_data_t *cpu_cur_csd; unsigned int flags = READ_ONCE(csd->node.u_flags); unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC; if (!(flags & CSD_FLAG_LOCK)) { if (!unlikely(*bug_id)) return true; cpu = csd_lock_wait_getcpu(csd); pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n", *bug_id, raw_smp_processor_id(), cpu); atomic_dec(&n_csd_lock_stuck); return true; } ts2 = ktime_get_mono_fast_ns(); /* How long since we last checked for a stuck CSD lock.*/ ts_delta = ts2 - *ts1; if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) * (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) || csd_lock_timeout_ns == 0)) return false; if (ts0 > ts2) { /* Our own sched_clock went backward; don't blame another CPU. */ ts_delta = ts0 - ts2; pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta); *ts1 = ts2; return false; } firsttime = !*bug_id; if (firsttime) *bug_id = atomic_inc_return(&csd_bug_count); cpu = csd_lock_wait_getcpu(csd); if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) cpux = 0; else cpux = cpu; cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */ /* How long since this CSD lock was stuck. */ ts_delta = ts2 - ts0; pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n", firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta, cpu, csd->func, csd->info); (*nmessages)++; if (firsttime) atomic_inc(&n_csd_lock_stuck); /* * If the CSD lock is still stuck after 5 minutes, it is unlikely * to become unstuck. Use a signed comparison to avoid triggering * on underflows when the TSC is out of sync between sockets. */ BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC)); if (cpu_cur_csd && csd != cpu_cur_csd) { pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n", *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)), READ_ONCE(per_cpu(cur_csd_info, cpux))); } else { pr_alert("\tcsd: CSD lock (#%d) %s.\n", *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); } if (cpu >= 0) { if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0)) dump_cpu_task(cpu); if (!cpu_cur_csd) { pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); arch_send_call_function_single_ipi(cpu); } } if (firsttime) dump_stack(); *ts1 = ts2; return false; } /* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources * * For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */ static void __csd_lock_wait(call_single_data_t *csd) { unsigned long nmessages = 0; int bug_id = 0; u64 ts0, ts1; ts1 = ts0 = ktime_get_mono_fast_ns(); for (;;) { if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages)) break; cpu_relax(); } smp_acquire__after_ctrl_dep(); } static __always_inline void csd_lock_wait(call_single_data_t *csd) { if (static_branch_unlikely(&csdlock_debug_enabled)) { __csd_lock_wait(csd); return; } smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); } #else static void csd_lock_record(call_single_data_t *csd) { } static __always_inline void csd_lock_wait(call_single_data_t *csd) { smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); } #endif static __always_inline void csd_lock(call_single_data_t *csd) { csd_lock_wait(csd); csd->node.u_flags |= CSD_FLAG_LOCK; /* * prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other * fields of the specified call_single_data_t structure: */ smp_wmb(); } static __always_inline void csd_unlock(call_single_data_t *csd) { WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: */ smp_store_release(&csd->node.u_flags, 0); } static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); void __smp_call_single_queue(int cpu, struct llist_node *node) { /* * We have to check the type of the CSD before queueing it, because * once queued it can have its flags cleared by * flush_smp_call_function_queue() * even if we haven't sent the smp_call IPI yet (e.g. the stopper * executes migration_cpu_stop() on the remote CPU). */ if (trace_csd_queue_cpu_enabled()) { call_single_data_t *csd; smp_call_func_t func; csd = container_of(node, call_single_data_t, node.llist); func = CSD_TYPE(csd) == CSD_TYPE_TTWU ? sched_ttwu_pending : csd->func; trace_csd_queue_cpu(cpu, _RET_IP_, func, csd); } /* * The list addition should be visible to the target CPU when it pops * the head of the list to pull the entry off it in the IPI handler * because of normal cache coherency rules implied by the underlying * llist ops. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ if (llist_add(node, &per_cpu(call_single_queue, cpu))) send_call_function_single_ipi(cpu); } /* * Insert a previously allocated call_single_data_t element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */ static int generic_exec_single(int cpu, call_single_data_t *csd) { if (cpu == smp_processor_id()) { smp_call_func_t func = csd->func; void *info = csd->info; unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ csd_lock_record(csd); csd_unlock(csd); local_irq_save(flags); csd_do_func(func, info, NULL); csd_lock_record(NULL); local_irq_restore(flags); return 0; } if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); return -ENXIO; } __smp_call_single_queue(cpu, &csd->node.llist); return 0; } /** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. */ void generic_smp_call_function_single_interrupt(void) { __flush_smp_call_function_queue(true); } /** * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */ static void __flush_smp_call_function_queue(bool warn_cpu_offline) { call_single_data_t *csd, *csd_next; struct llist_node *entry, *prev; struct llist_head *head; static bool warned; atomic_t *tbt; lockdep_assert_irqs_disabled(); /* Allow waiters to send backtrace NMI from here onwards */ tbt = this_cpu_ptr(&trigger_backtrace); atomic_set_release(tbt, 1); head = this_cpu_ptr(&call_single_queue); entry = llist_del_all(head); entry = llist_reverse_order(entry); /* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && entry != NULL)) { warned = true; WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ llist_for_each_entry(csd, entry, node.llist) { switch (CSD_TYPE(csd)) { case CSD_TYPE_ASYNC: case CSD_TYPE_SYNC: case CSD_TYPE_IRQ_WORK: pr_warn("IPI callback %pS sent to offline CPU\n", csd->func); break; case CSD_TYPE_TTWU: pr_warn("IPI task-wakeup sent to offline CPU\n"); break; default: pr_warn("IPI callback, unknown type %d, sent to offline CPU\n", CSD_TYPE(csd)); break; } } } /* * First; run all SYNC callbacks, people are waiting for us. */ prev = NULL; llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { /* Do we wait until *after* callback? */ if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { smp_call_func_t func = csd->func; void *info = csd->info; if (prev) { prev->next = &csd_next->node.llist; } else { entry = &csd_next->node.llist; } csd_lock_record(csd); csd_do_func(func, info, csd); csd_unlock(csd); csd_lock_record(NULL); } else { prev = &csd->node.llist; } } if (!entry) return; /* * Second; run all !SYNC callbacks. */ prev = NULL; llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { int type = CSD_TYPE(csd); if (type != CSD_TYPE_TTWU) { if (prev) { prev->next = &csd_next->node.llist; } else { entry = &csd_next->node.llist; } if (type == CSD_TYPE_ASYNC) { smp_call_func_t func = csd->func; void *info = csd->info; csd_lock_record(csd); csd_unlock(csd); csd_do_func(func, info, csd); csd_lock_record(NULL); } else if (type == CSD_TYPE_IRQ_WORK) { irq_work_single(csd); } } else { prev = &csd->node.llist; } } /* * Third; only CSD_TYPE_TTWU is left, issue those. */ if (entry) { csd = llist_entry(entry, typeof(*csd), node.llist); csd_do_func(sched_ttwu_pending, entry, csd); } } /** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * from task context (idle, migration thread) * * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to * handle queued SMP function calls before scheduling. * * The migration thread has to ensure that an eventually pending wakeup has * been handled before it migrates a task. */ void flush_smp_call_function_queue(void) { unsigned int was_pending; unsigned long flags; if (llist_empty(this_cpu_ptr(&call_single_queue))) return; local_irq_save(flags); /* Get the already pending soft interrupts for RT enabled kernels */ was_pending = local_softirq_pending(); __flush_smp_call_function_queue(true); if (local_softirq_pending()) do_softirq_post_smp_call_flush(was_pending); local_irq_restore(flags); } /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. */ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { call_single_data_t *csd; call_single_data_t csd_stack = { .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, }, }; int this_cpu; int err; /* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); /* * When @wait we can deadlock when we interrupt between llist_add() and * arch_send_call_function_ipi*(); when !@wait we can deadlock due to * csd_lock() on because the interrupt context uses the same csd * storage. */ WARN_ON_ONCE(!in_task()); csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } csd->func = func; csd->info = info; #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG csd->node.src = smp_processor_id(); csd->node.dst = cpu; #endif err = generic_exec_single(cpu, csd); if (wait) csd_lock_wait(csd); put_cpu(); return err; } EXPORT_SYMBOL(smp_call_function_single); /** * smp_call_function_single_async() - Run an asynchronous function on a * specific CPU. * @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure * * Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * * If the function is called with one csd which has not yet been * processed by previous call to smp_call_function_single_async(), the * function will return immediately with -EBUSY showing that the csd * object is still in progress. * * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. * * Return: %0 on success or negative errno value on error */ int smp_call_function_single_async(int cpu, call_single_data_t *csd) { int err = 0; preempt_disable(); if (csd->node.u_flags & CSD_FLAG_LOCK) { err = -EBUSY; goto out; } csd->node.u_flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd); out: preempt_enable(); return err; } EXPORT_SYMBOL_GPL(smp_call_function_single_async); /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). * * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */ int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ nodemask = cpumask_of_node(cpu_to_node(cpu)); for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; } EXPORT_SYMBOL_GPL(smp_call_function_any); /* * Flags to be used as scf_flags argument of smp_call_function_many_cond(). * * %SCF_WAIT: Wait until function execution is completed * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask */ #define SCF_WAIT (1U << 0) #define SCF_RUN_LOCAL (1U << 1) static void smp_call_function_many_cond(const struct cpumask *mask, smp_call_func_t func, void *info, unsigned int scf_flags, smp_cond_func_t cond_func) { int cpu, last_cpu, this_cpu = smp_processor_id(); struct call_function_data *cfd; bool wait = scf_flags & SCF_WAIT; int nr_cpus = 0; bool run_remote = false; bool run_local = false; lockdep_assert_preemption_disabled(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ if (cpu_online(this_cpu) && !oops_in_progress && !early_boot_irqs_disabled) lockdep_assert_irqs_enabled(); /* * When @wait we can deadlock when we interrupt between llist_add() and * arch_send_call_function_ipi*(); when !@wait we can deadlock due to * csd_lock() on because the interrupt context uses the same csd * storage. */ WARN_ON_ONCE(!in_task()); /* Check if we need local execution. */ if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) && (!cond_func || cond_func(this_cpu, info))) run_local = true; /* Check if we need remote execution, i.e., any CPU excluding this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); if (cpu < nr_cpu_ids) run_remote = true; if (run_remote) { cfd = this_cpu_ptr(&cfd_data); cpumask_and(cfd->cpumask, mask, cpu_online_mask); __cpumask_clear_cpu(this_cpu, cfd->cpumask); cpumask_clear(cfd->cpumask_ipi); for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); if (cond_func && !cond_func(cpu, info)) { __cpumask_clear_cpu(cpu, cfd->cpumask); continue; } csd_lock(csd); if (wait) csd->node.u_flags |= CSD_TYPE_SYNC; csd->func = func; csd->info = info; #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG csd->node.src = smp_processor_id(); csd->node.dst = cpu; #endif trace_csd_queue_cpu(cpu, _RET_IP_, func, csd); if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { __cpumask_set_cpu(cpu, cfd->cpumask_ipi); nr_cpus++; last_cpu = cpu; } } /* * Choose the most efficient way to send an IPI. Note that the * number of CPUs might be zero due to concurrent changes to the * provided mask. */ if (nr_cpus == 1) send_call_function_single_ipi(last_cpu); else if (likely(nr_cpus > 1)) send_call_function_ipi_mask(cfd->cpumask_ipi); } if (run_local) { unsigned long flags; local_irq_save(flags); csd_do_func(func, info, NULL); local_irq_restore(flags); } if (run_remote && wait) { for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd; csd = per_cpu_ptr(cfd->csd, cpu); csd_lock_wait(csd); } } } /** * smp_call_function_many(): Run a function on a set of CPUs. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait * (atomically) until function has completed on other CPUs. If * %SCF_RUN_LOCAL is set, the function will also be run locally * if the local CPU is set in the @cpumask. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */ void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL); } EXPORT_SYMBOL(smp_call_function_many); /** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * Returns 0. * * If @wait is true, then returns once @func has returned; otherwise * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ void smp_call_function(smp_call_func_t func, void *info, int wait) { preempt_disable(); smp_call_function_many(cpu_online_mask, func, info, wait); preempt_enable(); } EXPORT_SYMBOL(smp_call_function); /* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */ void __weak __init arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; } early_param("nosmp", nosmp); /* this is hard limit */ static int __init nrcpus(char *str) { int nr_cpus; if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) set_nr_cpu_ids(nr_cpus); return 0; } early_param("nr_cpus", nrcpus); static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); #if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS) /* Setup number of possible processor ids */ unsigned int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); #endif /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ void __init setup_nr_cpu_ids(void) { set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1); } /* Called by boot processor to activate the rest. */ void __init smp_init(void) { int num_nodes, num_cpus; idle_threads_init(); cpuhp_threads_init(); pr_info("Bringing up secondary CPUs ...\n"); bringup_nonboot_cpus(setup_max_cpus); num_nodes = num_online_nodes(); num_cpus = num_online_cpus(); pr_info("Brought up %d node%s, %d CPU%s\n", num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus)); /* Any cleanup work */ smp_cpus_done(setup_max_cpus); } /* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void *info, bool wait, const struct cpumask *mask) { unsigned int scf_flags = SCF_RUN_LOCAL; if (wait) scf_flags |= SCF_WAIT; preempt_disable(); smp_call_function_many_cond(mask, func, info, scf_flags, cond_func); preempt_enable(); } EXPORT_SYMBOL(on_each_cpu_cond_mask); static void do_nothing(void *unused) { } /** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */ void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(kick_all_cpus_sync); /** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */ void wake_up_all_idle_cpus(void) { int cpu; for_each_possible_cpu(cpu) { preempt_disable(); if (cpu != smp_processor_id() && cpu_online(cpu)) wake_up_if_idle(cpu); preempt_enable(); } } EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); /** * struct smp_call_on_cpu_struct - Call a function on a specific CPU * @work: &work_struct * @done: &completion to signal * @func: function to call * @data: function's data argument * @ret: return value from @func * @cpu: target CPU (%-1 for any CPU) * * Used to call a function on a specific cpu and wait for it to return. * Optionally make sure the call is done on a specified physical cpu via vcpu * pinning in order to support virtualized environments. */ struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; }; static void smp_call_on_cpu_callback(struct work_struct *work) { struct smp_call_on_cpu_struct *sscs; sscs = container_of(work, struct smp_call_on_cpu_struct, work); if (sscs->cpu >= 0) hypervisor_pin_vcpu(sscs->cpu); sscs->ret = sscs->func(sscs->data); if (sscs->cpu >= 0) hypervisor_pin_vcpu(-1); complete(&sscs->done); } int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) { struct smp_call_on_cpu_struct sscs = { .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), .func = func, .data = par, .cpu = phys ? cpu : -1, }; INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; queue_work_on(cpu, system_wq, &sscs.work); wait_for_completion(&sscs.done); destroy_work_on_stack(&sscs.work); return sscs.ret; } EXPORT_SYMBOL_GPL(smp_call_on_cpu);
4 4 2