| 34 34 34 34 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 | // SPDX-License-Identifier: GPL-2.0 #include <linux/build_bug.h> #include <linux/errno.h> #include <linux/errname.h> #include <linux/kernel.h> #include <linux/math.h> /* * Ensure these tables do not accidentally become gigantic if some * huge errno makes it in. On most architectures, the first table will * only have about 140 entries, but mips and parisc have more sparsely * allocated errnos (with EHWPOISON = 257 on parisc, and EDQUOT = 1133 * on mips), so this wastes a bit of space on those - though we * special case the EDQUOT case. */ #define E(err) [err + BUILD_BUG_ON_ZERO(err <= 0 || err > 300)] = "-" #err static const char *names_0[] = { E(E2BIG), E(EACCES), E(EADDRINUSE), E(EADDRNOTAVAIL), E(EADV), E(EAFNOSUPPORT), E(EAGAIN), /* EWOULDBLOCK */ E(EALREADY), E(EBADE), E(EBADF), E(EBADFD), E(EBADMSG), E(EBADR), E(EBADRQC), E(EBADSLT), E(EBFONT), E(EBUSY), E(ECANCELED), /* ECANCELLED */ E(ECHILD), E(ECHRNG), E(ECOMM), E(ECONNABORTED), E(ECONNREFUSED), /* EREFUSED */ E(ECONNRESET), E(EDEADLK), /* EDEADLOCK */ #if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */ E(EDEADLOCK), #endif E(EDESTADDRREQ), E(EDOM), E(EDOTDOT), #ifndef CONFIG_MIPS E(EDQUOT), #endif E(EEXIST), E(EFAULT), E(EFBIG), E(EHOSTDOWN), E(EHOSTUNREACH), E(EHWPOISON), E(EIDRM), E(EILSEQ), #ifdef EINIT E(EINIT), #endif E(EINPROGRESS), E(EINTR), E(EINVAL), E(EIO), E(EISCONN), E(EISDIR), E(EISNAM), E(EKEYEXPIRED), E(EKEYREJECTED), E(EKEYREVOKED), E(EL2HLT), E(EL2NSYNC), E(EL3HLT), E(EL3RST), E(ELIBACC), E(ELIBBAD), E(ELIBEXEC), E(ELIBMAX), E(ELIBSCN), E(ELNRNG), E(ELOOP), E(EMEDIUMTYPE), E(EMFILE), E(EMLINK), E(EMSGSIZE), E(EMULTIHOP), E(ENAMETOOLONG), E(ENAVAIL), E(ENETDOWN), E(ENETRESET), E(ENETUNREACH), E(ENFILE), E(ENOANO), E(ENOBUFS), E(ENOCSI), E(ENODATA), E(ENODEV), E(ENOENT), E(ENOEXEC), E(ENOKEY), E(ENOLCK), E(ENOLINK), E(ENOMEDIUM), E(ENOMEM), E(ENOMSG), E(ENONET), E(ENOPKG), E(ENOPROTOOPT), E(ENOSPC), E(ENOSR), E(ENOSTR), E(ENOSYS), E(ENOTBLK), E(ENOTCONN), E(ENOTDIR), E(ENOTEMPTY), E(ENOTNAM), E(ENOTRECOVERABLE), E(ENOTSOCK), E(ENOTTY), E(ENOTUNIQ), E(ENXIO), E(EOPNOTSUPP), E(EOVERFLOW), E(EOWNERDEAD), E(EPERM), E(EPFNOSUPPORT), E(EPIPE), #ifdef EPROCLIM E(EPROCLIM), #endif E(EPROTO), E(EPROTONOSUPPORT), E(EPROTOTYPE), E(ERANGE), E(EREMCHG), #ifdef EREMDEV E(EREMDEV), #endif E(EREMOTE), E(EREMOTEIO), E(ERESTART), E(ERFKILL), E(EROFS), #ifdef ERREMOTE E(ERREMOTE), #endif E(ESHUTDOWN), E(ESOCKTNOSUPPORT), E(ESPIPE), E(ESRCH), E(ESRMNT), E(ESTALE), E(ESTRPIPE), E(ETIME), E(ETIMEDOUT), E(ETOOMANYREFS), E(ETXTBSY), E(EUCLEAN), E(EUNATCH), E(EUSERS), E(EXDEV), E(EXFULL), }; #undef E #ifdef EREFUSED /* parisc */ static_assert(EREFUSED == ECONNREFUSED); #endif #ifdef ECANCELLED /* parisc */ static_assert(ECANCELLED == ECANCELED); #endif static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */ #define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err static const char *names_512[] = { E(ERESTARTSYS), E(ERESTARTNOINTR), E(ERESTARTNOHAND), E(ENOIOCTLCMD), E(ERESTART_RESTARTBLOCK), E(EPROBE_DEFER), E(EOPENSTALE), E(ENOPARAM), E(EBADHANDLE), E(ENOTSYNC), E(EBADCOOKIE), E(ENOTSUPP), E(ETOOSMALL), E(ESERVERFAULT), E(EBADTYPE), E(EJUKEBOX), E(EIOCBQUEUED), E(ERECALLCONFLICT), }; #undef E static const char *__errname(unsigned err) { if (err < ARRAY_SIZE(names_0)) return names_0[err]; if (err >= 512 && err - 512 < ARRAY_SIZE(names_512)) return names_512[err - 512]; /* But why? */ if (IS_ENABLED(CONFIG_MIPS) && err == EDQUOT) /* 1133 */ return "-EDQUOT"; return NULL; } /* * errname(EIO) -> "EIO" * errname(-EIO) -> "-EIO" */ const char *errname(int err) { const char *name = __errname(abs(err)); if (!name) return NULL; return err > 0 ? name + 1 : name; } EXPORT_SYMBOL(errname); |
| 272 127 8 422 426 1131 20 1134 1135 1139 1138 1133 1138 1131 1134 20 1133 127 1134 12 18 7 7 137 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | // SPDX-License-Identifier: GPL-2.0-or-later /* * fs/inotify_user.c - inotify support for userspace * * Authors: * John McCutchan <ttb@tentacle.dhs.org> * Robert Love <rml@novell.com> * * Copyright (C) 2005 John McCutchan * Copyright 2006 Hewlett-Packard Development Company, L.P. * * Copyright (C) 2009 Eric Paris <Red Hat Inc> * inotify was largely rewriten to make use of the fsnotify infrastructure */ #include <linux/dcache.h> /* d_unlinked */ #include <linux/fs.h> /* struct inode */ #include <linux/fsnotify_backend.h> #include <linux/inotify.h> #include <linux/path.h> /* struct path */ #include <linux/slab.h> /* kmem_* */ #include <linux/types.h> #include <linux/sched.h> #include <linux/sched/user.h> #include <linux/sched/mm.h> #include "inotify.h" /* * Check if 2 events contain the same information. */ static bool event_compare(struct fsnotify_event *old_fsn, struct fsnotify_event *new_fsn) { struct inotify_event_info *old, *new; old = INOTIFY_E(old_fsn); new = INOTIFY_E(new_fsn); if (old->mask & FS_IN_IGNORED) return false; if ((old->mask == new->mask) && (old->wd == new->wd) && (old->name_len == new->name_len) && (!old->name_len || !strcmp(old->name, new->name))) return true; return false; } static int inotify_merge(struct fsnotify_group *group, struct fsnotify_event *event) { struct list_head *list = &group->notification_list; struct fsnotify_event *last_event; last_event = list_entry(list->prev, struct fsnotify_event, list); return event_compare(last_event, event); } int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *name, u32 cookie) { struct inotify_inode_mark *i_mark; struct inotify_event_info *event; struct fsnotify_event *fsn_event; struct fsnotify_group *group = inode_mark->group; int ret; int len = 0, wd; int alloc_len = sizeof(struct inotify_event_info); struct mem_cgroup *old_memcg; if (name) { len = name->len; alloc_len += len + 1; } pr_debug("%s: group=%p mark=%p mask=%x\n", __func__, group, inode_mark, mask); i_mark = container_of(inode_mark, struct inotify_inode_mark, fsn_mark); /* * We can be racing with mark being detached. Don't report event with * invalid wd. */ wd = READ_ONCE(i_mark->wd); if (wd == -1) return 0; /* * Whoever is interested in the event, pays for the allocation. Do not * trigger OOM killer in the target monitoring memcg as it may have * security repercussion. */ old_memcg = set_active_memcg(group->memcg); event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); set_active_memcg(old_memcg); if (unlikely(!event)) { /* * Treat lost event due to ENOMEM the same way as queue * overflow to let userspace know event was lost. */ fsnotify_queue_overflow(group); return -ENOMEM; } /* * We now report FS_ISDIR flag with MOVE_SELF and DELETE_SELF events * for fanotify. inotify never reported IN_ISDIR with those events. * It looks like an oversight, but to avoid the risk of breaking * existing inotify programs, mask the flag out from those events. */ if (mask & (IN_MOVE_SELF | IN_DELETE_SELF)) mask &= ~IN_ISDIR; fsn_event = &event->fse; fsnotify_init_event(fsn_event); event->mask = mask; event->wd = wd; event->sync_cookie = cookie; event->name_len = len; if (len) strscpy(event->name, name->name, event->name_len + 1); ret = fsnotify_add_event(group, fsn_event, inotify_merge); if (ret) { /* Our event wasn't used in the end. Free it. */ fsnotify_destroy_event(group, fsn_event); } if (inode_mark->flags & FSNOTIFY_MARK_FLAG_IN_ONESHOT) fsnotify_destroy_mark(inode_mark, group); return 0; } static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) { inotify_ignored_and_remove_idr(fsn_mark, group); } /* * This is NEVER supposed to be called. Inotify marks should either have been * removed from the idr when the watch was removed or in the * fsnotify_destroy_mark_by_group() call when the inotify instance was being * torn down. This is only called if the idr is about to be freed but there * are still marks in it. */ static int idr_callback(int id, void *p, void *data) { struct fsnotify_mark *fsn_mark; struct inotify_inode_mark *i_mark; static bool warned = false; if (warned) return 0; warned = true; fsn_mark = p; i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in " "idr. Probably leaking memory\n", id, p, data); /* * I'm taking the liberty of assuming that the mark in question is a * valid address and I'm dereferencing it. This might help to figure * out why we got here and the panic is no worse than the original * BUG() that was here. */ if (fsn_mark) printk(KERN_WARNING "fsn_mark->group=%p wd=%d\n", fsn_mark->group, i_mark->wd); return 0; } static void inotify_free_group_priv(struct fsnotify_group *group) { /* ideally the idr is empty and we won't hit the BUG in the callback */ idr_for_each(&group->inotify_data.idr, idr_callback, group); idr_destroy(&group->inotify_data.idr); if (group->inotify_data.ucounts) dec_inotify_instances(group->inotify_data.ucounts); } static void inotify_free_event(struct fsnotify_group *group, struct fsnotify_event *fsn_event) { kfree(INOTIFY_E(fsn_event)); } /* ding dong the mark is dead */ static void inotify_free_mark(struct fsnotify_mark *fsn_mark) { struct inotify_inode_mark *i_mark; i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); kmem_cache_free(inotify_inode_mark_cachep, i_mark); } const struct fsnotify_ops inotify_fsnotify_ops = { .handle_inode_event = inotify_handle_inode_event, .free_group_priv = inotify_free_group_priv, .free_event = inotify_free_event, .freeing_mark = inotify_freeing_mark, .free_mark = inotify_free_mark, }; |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Etoms Et61x151 GPL Linux driver by Michel Xhaard (09/09/2004) * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "etoms" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("Etoms USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned char autogain; char sensor; #define SENSOR_PAS106 0 #define SENSOR_TAS5130CXX 1 signed char ag_cnt; #define AG_CNT_START 13 }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, /* {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, */ }; static const struct v4l2_pix_format sif_mode[] = { {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; #define ETOMS_ALT_SIZE_1000 12 #define ET_GPIO_DIR_CTRL 0x04 /* Control IO bit[0..5] (0 in 1 out) */ #define ET_GPIO_OUT 0x05 /* Only IO data */ #define ET_GPIO_IN 0x06 /* Read Only IO data */ #define ET_RESET_ALL 0x03 #define ET_ClCK 0x01 #define ET_CTRL 0x02 /* enable i2c OutClck Powerdown mode */ #define ET_COMP 0x12 /* Compression register */ #define ET_MAXQt 0x13 #define ET_MINQt 0x14 #define ET_COMP_VAL0 0x02 #define ET_COMP_VAL1 0x03 #define ET_REG1d 0x1d #define ET_REG1e 0x1e #define ET_REG1f 0x1f #define ET_REG20 0x20 #define ET_REG21 0x21 #define ET_REG22 0x22 #define ET_REG23 0x23 #define ET_REG24 0x24 #define ET_REG25 0x25 /* base registers for luma calculation */ #define ET_LUMA_CENTER 0x39 #define ET_G_RED 0x4d #define ET_G_GREEN1 0x4e #define ET_G_BLUE 0x4f #define ET_G_GREEN2 0x50 #define ET_G_GR_H 0x51 #define ET_G_GB_H 0x52 #define ET_O_RED 0x34 #define ET_O_GREEN1 0x35 #define ET_O_BLUE 0x36 #define ET_O_GREEN2 0x37 #define ET_SYNCHRO 0x68 #define ET_STARTX 0x69 #define ET_STARTY 0x6a #define ET_WIDTH_LOW 0x6b #define ET_HEIGTH_LOW 0x6c #define ET_W_H_HEIGTH 0x6d #define ET_REG6e 0x6e /* OBW */ #define ET_REG6f 0x6f /* OBW */ #define ET_REG70 0x70 /* OBW_AWB */ #define ET_REG71 0x71 /* OBW_AWB */ #define ET_REG72 0x72 /* OBW_AWB */ #define ET_REG73 0x73 /* Clkdelay ns */ #define ET_REG74 0x74 /* test pattern */ #define ET_REG75 0x75 /* test pattern */ #define ET_I2C_CLK 0x8c #define ET_PXL_CLK 0x60 #define ET_I2C_BASE 0x89 #define ET_I2C_COUNT 0x8a #define ET_I2C_PREFETCH 0x8b #define ET_I2C_REG 0x88 #define ET_I2C_DATA7 0x87 #define ET_I2C_DATA6 0x86 #define ET_I2C_DATA5 0x85 #define ET_I2C_DATA4 0x84 #define ET_I2C_DATA3 0x83 #define ET_I2C_DATA2 0x82 #define ET_I2C_DATA1 0x81 #define ET_I2C_DATA0 0x80 #define PAS106_REG2 0x02 /* pxlClk = systemClk/(reg2) */ #define PAS106_REG3 0x03 /* line/frame H [11..4] */ #define PAS106_REG4 0x04 /* line/frame L [3..0] */ #define PAS106_REG5 0x05 /* exposure time line offset(default 5) */ #define PAS106_REG6 0x06 /* exposure time pixel offset(default 6) */ #define PAS106_REG7 0x07 /* signbit Dac (default 0) */ #define PAS106_REG9 0x09 #define PAS106_REG0e 0x0e /* global gain [4..0](default 0x0e) */ #define PAS106_REG13 0x13 /* end i2c write */ static const __u8 GainRGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 }; static const __u8 I2c2[] = { 0x08, 0x08, 0x08, 0x08, 0x0d }; static const __u8 I2c3[] = { 0x12, 0x05 }; static const __u8 I2c4[] = { 0x41, 0x08 }; /* read 'len' bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, __u16 index, __u16 len) { struct usb_device *dev = gspca_dev->dev; if (len > USB_BUF_SZ) { gspca_err(gspca_dev, "reg_r: buffer overflow\n"); return; } usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, len, 500); gspca_dbg(gspca_dev, D_USBI, "reg read [%02x] -> %02x ..\n", index, gspca_dev->usb_buf[0]); } static void reg_w_val(struct gspca_dev *gspca_dev, __u16 index, __u8 val) { struct usb_device *dev = gspca_dev->dev; gspca_dev->usb_buf[0] = val; usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, 1, 500); } static void reg_w(struct gspca_dev *gspca_dev, __u16 index, const __u8 *buffer, __u16 len) { struct usb_device *dev = gspca_dev->dev; if (len > USB_BUF_SZ) { pr_err("reg_w: buffer overflow\n"); return; } gspca_dbg(gspca_dev, D_USBO, "reg write [%02x] = %02x..\n", index, *buffer); memcpy(gspca_dev->usb_buf, buffer, len); usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, len, 500); } static int i2c_w(struct gspca_dev *gspca_dev, __u8 reg, const __u8 *buffer, int len, __u8 mode) { /* buffer should be [D0..D7] */ __u8 ptchcount; /* set the base address */ reg_w_val(gspca_dev, ET_I2C_BASE, 0x40); /* sensor base for the pas106 */ /* set count and prefetch */ ptchcount = ((len & 0x07) << 4) | (mode & 0x03); reg_w_val(gspca_dev, ET_I2C_COUNT, ptchcount); /* set the register base */ reg_w_val(gspca_dev, ET_I2C_REG, reg); while (--len >= 0) reg_w_val(gspca_dev, ET_I2C_DATA0 + len, buffer[len]); return 0; } static int i2c_r(struct gspca_dev *gspca_dev, __u8 reg) { /* set the base address */ reg_w_val(gspca_dev, ET_I2C_BASE, 0x40); /* sensor base for the pas106 */ /* set count and prefetch (cnd: 4 bits - mode: 4 bits) */ reg_w_val(gspca_dev, ET_I2C_COUNT, 0x11); reg_w_val(gspca_dev, ET_I2C_REG, reg); /* set the register base */ reg_w_val(gspca_dev, ET_I2C_PREFETCH, 0x02); /* prefetch */ reg_w_val(gspca_dev, ET_I2C_PREFETCH, 0x00); reg_r(gspca_dev, ET_I2C_DATA0, 1); /* read one byte */ return 0; } static int Et_WaitStatus(struct gspca_dev *gspca_dev) { int retry = 10; while (retry--) { reg_r(gspca_dev, ET_ClCK, 1); if (gspca_dev->usb_buf[0] != 0) return 1; } return 0; } static int et_video(struct gspca_dev *gspca_dev, int on) { int ret; reg_w_val(gspca_dev, ET_GPIO_OUT, on ? 0x10 /* startvideo - set Bit5 */ : 0); /* stopvideo */ ret = Et_WaitStatus(gspca_dev); if (ret != 0) gspca_err(gspca_dev, "timeout video on/off\n"); return ret; } static void Et_init2(struct gspca_dev *gspca_dev) { __u8 value; static const __u8 FormLine[] = { 0x84, 0x03, 0x14, 0xf4, 0x01, 0x05 }; gspca_dbg(gspca_dev, D_STREAM, "Open Init2 ET\n"); reg_w_val(gspca_dev, ET_GPIO_DIR_CTRL, 0x2f); reg_w_val(gspca_dev, ET_GPIO_OUT, 0x10); reg_r(gspca_dev, ET_GPIO_IN, 1); reg_w_val(gspca_dev, ET_ClCK, 0x14); /* 0x14 // 0x16 enabled pattern */ reg_w_val(gspca_dev, ET_CTRL, 0x1b); /* compression et subsampling */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = ET_COMP_VAL1; /* 320 */ else value = ET_COMP_VAL0; /* 640 */ reg_w_val(gspca_dev, ET_COMP, value); reg_w_val(gspca_dev, ET_MAXQt, 0x1f); reg_w_val(gspca_dev, ET_MINQt, 0x04); /* undocumented registers */ reg_w_val(gspca_dev, ET_REG1d, 0xff); reg_w_val(gspca_dev, ET_REG1e, 0xff); reg_w_val(gspca_dev, ET_REG1f, 0xff); reg_w_val(gspca_dev, ET_REG20, 0x35); reg_w_val(gspca_dev, ET_REG21, 0x01); reg_w_val(gspca_dev, ET_REG22, 0x00); reg_w_val(gspca_dev, ET_REG23, 0xff); reg_w_val(gspca_dev, ET_REG24, 0xff); reg_w_val(gspca_dev, ET_REG25, 0x0f); /* colors setting */ reg_w_val(gspca_dev, 0x30, 0x11); /* 0x30 */ reg_w_val(gspca_dev, 0x31, 0x40); reg_w_val(gspca_dev, 0x32, 0x00); reg_w_val(gspca_dev, ET_O_RED, 0x00); /* 0x34 */ reg_w_val(gspca_dev, ET_O_GREEN1, 0x00); reg_w_val(gspca_dev, ET_O_BLUE, 0x00); reg_w_val(gspca_dev, ET_O_GREEN2, 0x00); /*************/ reg_w_val(gspca_dev, ET_G_RED, 0x80); /* 0x4d */ reg_w_val(gspca_dev, ET_G_GREEN1, 0x80); reg_w_val(gspca_dev, ET_G_BLUE, 0x80); reg_w_val(gspca_dev, ET_G_GREEN2, 0x80); reg_w_val(gspca_dev, ET_G_GR_H, 0x00); reg_w_val(gspca_dev, ET_G_GB_H, 0x00); /* 0x52 */ /* Window control registers */ reg_w_val(gspca_dev, 0x61, 0x80); /* use cmc_out */ reg_w_val(gspca_dev, 0x62, 0x02); reg_w_val(gspca_dev, 0x63, 0x03); reg_w_val(gspca_dev, 0x64, 0x14); reg_w_val(gspca_dev, 0x65, 0x0e); reg_w_val(gspca_dev, 0x66, 0x02); reg_w_val(gspca_dev, 0x67, 0x02); /**************************************/ reg_w_val(gspca_dev, ET_SYNCHRO, 0x8f); /* 0x68 */ reg_w_val(gspca_dev, ET_STARTX, 0x69); /* 0x6a //0x69 */ reg_w_val(gspca_dev, ET_STARTY, 0x0d); /* 0x0d //0x0c */ reg_w_val(gspca_dev, ET_WIDTH_LOW, 0x80); reg_w_val(gspca_dev, ET_HEIGTH_LOW, 0xe0); reg_w_val(gspca_dev, ET_W_H_HEIGTH, 0x60); /* 6d */ reg_w_val(gspca_dev, ET_REG6e, 0x86); reg_w_val(gspca_dev, ET_REG6f, 0x01); reg_w_val(gspca_dev, ET_REG70, 0x26); reg_w_val(gspca_dev, ET_REG71, 0x7a); reg_w_val(gspca_dev, ET_REG72, 0x01); /* Clock Pattern registers ***************** */ reg_w_val(gspca_dev, ET_REG73, 0x00); reg_w_val(gspca_dev, ET_REG74, 0x18); /* 0x28 */ reg_w_val(gspca_dev, ET_REG75, 0x0f); /* 0x01 */ /**********************************************/ reg_w_val(gspca_dev, 0x8a, 0x20); reg_w_val(gspca_dev, 0x8d, 0x0f); reg_w_val(gspca_dev, 0x8e, 0x08); /**************************************/ reg_w_val(gspca_dev, 0x03, 0x08); reg_w_val(gspca_dev, ET_PXL_CLK, 0x03); reg_w_val(gspca_dev, 0x81, 0xff); reg_w_val(gspca_dev, 0x80, 0x00); reg_w_val(gspca_dev, 0x81, 0xff); reg_w_val(gspca_dev, 0x80, 0x20); reg_w_val(gspca_dev, 0x03, 0x01); reg_w_val(gspca_dev, 0x03, 0x00); reg_w_val(gspca_dev, 0x03, 0x08); /********************************************/ /* reg_r(gspca_dev, ET_I2C_BASE, 1); always 0x40 as the pas106 ??? */ /* set the sensor */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = 0x04; /* 320 */ else /* 640 */ value = 0x1e; /* 0x17 * setting PixelClock * 0x03 mean 24/(3+1) = 6 Mhz * 0x05 -> 24/(5+1) = 4 Mhz * 0x0b -> 24/(11+1) = 2 Mhz * 0x17 -> 24/(23+1) = 1 Mhz */ reg_w_val(gspca_dev, ET_PXL_CLK, value); /* now set by fifo the FormatLine setting */ reg_w(gspca_dev, 0x62, FormLine, 6); /* set exposure times [ 0..0x78] 0->longvalue 0x78->shortvalue */ reg_w_val(gspca_dev, 0x81, 0x47); /* 0x47; */ reg_w_val(gspca_dev, 0x80, 0x40); /* 0x40; */ /* Pedro change */ /* Brightness change Brith+ decrease value */ /* Brigth- increase value */ /* original value = 0x70; */ reg_w_val(gspca_dev, 0x81, 0x30); /* 0x20; - set brightness */ reg_w_val(gspca_dev, 0x80, 0x20); /* 0x20; */ } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { int i; for (i = 0; i < 4; i++) reg_w_val(gspca_dev, ET_O_RED + i, val); } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { __u8 RGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 }; memset(RGBG, val, sizeof(RGBG) - 2); reg_w(gspca_dev, ET_G_RED, RGBG, 6); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; __u8 I2cc[] = { 0x05, 0x02, 0x02, 0x05, 0x0d }; __u8 i2cflags = 0x01; /* __u8 green = 0; */ I2cc[3] = val; /* red */ I2cc[0] = 15 - val; /* blue */ /* green = 15 - ((((7*I2cc[0]) >> 2 ) + I2cc[3]) >> 1); */ /* I2cc[1] = I2cc[2] = green; */ if (sd->sensor == SENSOR_PAS106) { i2c_w(gspca_dev, PAS106_REG13, &i2cflags, 1, 3); i2c_w(gspca_dev, PAS106_REG9, I2cc, sizeof I2cc, 1); } } static s32 getcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { /* i2c_r(gspca_dev, PAS106_REG9); * blue */ i2c_r(gspca_dev, PAS106_REG9 + 3); /* red */ return gspca_dev->usb_buf[0] & 0x0f; } return 0; } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->autogain) sd->ag_cnt = AG_CNT_START; else sd->ag_cnt = -1; } static void Et_init1(struct gspca_dev *gspca_dev) { __u8 value; /* __u8 I2c0 [] = {0x0a, 0x12, 0x05, 0x22, 0xac, 0x00, 0x01, 0x00}; */ __u8 I2c0[] = { 0x0a, 0x12, 0x05, 0x6d, 0xcd, 0x00, 0x01, 0x00 }; /* try 1/120 0x6d 0xcd 0x40 */ /* __u8 I2c0 [] = {0x0a, 0x12, 0x05, 0xfe, 0xfe, 0xc0, 0x01, 0x00}; * 1/60000 hmm ?? */ gspca_dbg(gspca_dev, D_STREAM, "Open Init1 ET\n\n"); reg_w_val(gspca_dev, ET_GPIO_DIR_CTRL, 7); reg_r(gspca_dev, ET_GPIO_IN, 1); reg_w_val(gspca_dev, ET_RESET_ALL, 1); reg_w_val(gspca_dev, ET_RESET_ALL, 0); reg_w_val(gspca_dev, ET_ClCK, 0x10); reg_w_val(gspca_dev, ET_CTRL, 0x19); /* compression et subsampling */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = ET_COMP_VAL1; else value = ET_COMP_VAL0; gspca_dbg(gspca_dev, D_STREAM, "Open mode %d Compression %d\n", gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv, value); reg_w_val(gspca_dev, ET_COMP, value); reg_w_val(gspca_dev, ET_MAXQt, 0x1d); reg_w_val(gspca_dev, ET_MINQt, 0x02); /* undocumented registers */ reg_w_val(gspca_dev, ET_REG1d, 0xff); reg_w_val(gspca_dev, ET_REG1e, 0xff); reg_w_val(gspca_dev, ET_REG1f, 0xff); reg_w_val(gspca_dev, ET_REG20, 0x35); reg_w_val(gspca_dev, ET_REG21, 0x01); reg_w_val(gspca_dev, ET_REG22, 0x00); reg_w_val(gspca_dev, ET_REG23, 0xf7); reg_w_val(gspca_dev, ET_REG24, 0xff); reg_w_val(gspca_dev, ET_REG25, 0x07); /* colors setting */ reg_w_val(gspca_dev, ET_G_RED, 0x80); reg_w_val(gspca_dev, ET_G_GREEN1, 0x80); reg_w_val(gspca_dev, ET_G_BLUE, 0x80); reg_w_val(gspca_dev, ET_G_GREEN2, 0x80); reg_w_val(gspca_dev, ET_G_GR_H, 0x00); reg_w_val(gspca_dev, ET_G_GB_H, 0x00); /* Window control registers */ reg_w_val(gspca_dev, ET_SYNCHRO, 0xf0); reg_w_val(gspca_dev, ET_STARTX, 0x56); /* 0x56 */ reg_w_val(gspca_dev, ET_STARTY, 0x05); /* 0x04 */ reg_w_val(gspca_dev, ET_WIDTH_LOW, 0x60); reg_w_val(gspca_dev, ET_HEIGTH_LOW, 0x20); reg_w_val(gspca_dev, ET_W_H_HEIGTH, 0x50); reg_w_val(gspca_dev, ET_REG6e, 0x86); reg_w_val(gspca_dev, ET_REG6f, 0x01); reg_w_val(gspca_dev, ET_REG70, 0x86); reg_w_val(gspca_dev, ET_REG71, 0x14); reg_w_val(gspca_dev, ET_REG72, 0x00); /* Clock Pattern registers */ reg_w_val(gspca_dev, ET_REG73, 0x00); reg_w_val(gspca_dev, ET_REG74, 0x00); reg_w_val(gspca_dev, ET_REG75, 0x0a); reg_w_val(gspca_dev, ET_I2C_CLK, 0x04); reg_w_val(gspca_dev, ET_PXL_CLK, 0x01); /* set the sensor */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { I2c0[0] = 0x06; i2c_w(gspca_dev, PAS106_REG2, I2c0, sizeof I2c0, 1); i2c_w(gspca_dev, PAS106_REG9, I2c2, sizeof I2c2, 1); value = 0x06; i2c_w(gspca_dev, PAS106_REG2, &value, 1, 1); i2c_w(gspca_dev, PAS106_REG3, I2c3, sizeof I2c3, 1); /* value = 0x1f; */ value = 0x04; i2c_w(gspca_dev, PAS106_REG0e, &value, 1, 1); } else { I2c0[0] = 0x0a; i2c_w(gspca_dev, PAS106_REG2, I2c0, sizeof I2c0, 1); i2c_w(gspca_dev, PAS106_REG9, I2c2, sizeof I2c2, 1); value = 0x0a; i2c_w(gspca_dev, PAS106_REG2, &value, 1, 1); i2c_w(gspca_dev, PAS106_REG3, I2c3, sizeof I2c3, 1); value = 0x04; /* value = 0x10; */ i2c_w(gspca_dev, PAS106_REG0e, &value, 1, 1); /* bit 2 enable bit 1:2 select 0 1 2 3 value = 0x07; * curve 0 * i2c_w(gspca_dev, PAS106_REG0f, &value, 1, 1); */ } /* value = 0x01; */ /* value = 0x22; */ /* i2c_w(gspca_dev, PAS106_REG5, &value, 1, 1); */ /* magnetude and sign bit for DAC */ i2c_w(gspca_dev, PAS106_REG7, I2c4, sizeof I2c4, 1); /* now set by fifo the whole colors setting */ reg_w(gspca_dev, ET_G_RED, GainRGBG, 6); setcolors(gspca_dev, getcolors(gspca_dev)); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; sd->sensor = id->driver_info; if (sd->sensor == SENSOR_PAS106) { cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); } else { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); } sd->ag_cnt = -1; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) Et_init1(gspca_dev); else Et_init2(gspca_dev); reg_w_val(gspca_dev, ET_RESET_ALL, 0x08); et_video(gspca_dev, 0); /* video off */ return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) Et_init1(gspca_dev); else Et_init2(gspca_dev); setautogain(gspca_dev); reg_w_val(gspca_dev, ET_RESET_ALL, 0x08); et_video(gspca_dev, 1); /* video on */ return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { et_video(gspca_dev, 0); /* video off */ } static __u8 Et_getgainG(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { i2c_r(gspca_dev, PAS106_REG0e); gspca_dbg(gspca_dev, D_CONF, "Etoms gain G %d\n", gspca_dev->usb_buf[0]); return gspca_dev->usb_buf[0]; } return 0x1f; } static void Et_setgainG(struct gspca_dev *gspca_dev, __u8 gain) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { __u8 i2cflags = 0x01; i2c_w(gspca_dev, PAS106_REG13, &i2cflags, 1, 3); i2c_w(gspca_dev, PAS106_REG0e, &gain, 1, 1); } } #define BLIMIT(bright) \ (u8)((bright > 0x1f) ? 0x1f : ((bright < 4) ? 3 : bright)) #define LIMIT(color) \ (u8)((color > 0xff) ? 0xff : ((color < 0) ? 0 : color)) static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 luma; __u8 luma_mean = 128; __u8 luma_delta = 20; __u8 spring = 4; int Gbright; __u8 r, g, b; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; Gbright = Et_getgainG(gspca_dev); reg_r(gspca_dev, ET_LUMA_CENTER, 4); g = (gspca_dev->usb_buf[0] + gspca_dev->usb_buf[3]) >> 1; r = gspca_dev->usb_buf[1]; b = gspca_dev->usb_buf[2]; r = ((r << 8) - (r << 4) - (r << 3)) >> 10; b = ((b << 7) >> 10); g = ((g << 9) + (g << 7) + (g << 5)) >> 10; luma = LIMIT(r + g + b); gspca_dbg(gspca_dev, D_FRAM, "Etoms luma G %d\n", luma); if (luma < luma_mean - luma_delta || luma > luma_mean + luma_delta) { Gbright += (luma_mean - luma) >> spring; Gbright = BLIMIT(Gbright); gspca_dbg(gspca_dev, D_FRAM, "Etoms Gbright %d\n", Gbright); Et_setgainG(gspca_dev, (__u8) Gbright); } } #undef BLIMIT #undef LIMIT static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { int seqframe; seqframe = data[0] & 0x3f; len = (int) (((data[0] & 0xc0) << 2) | data[1]); if (seqframe == 0x3f) { gspca_dbg(gspca_dev, D_FRAM, "header packet found datalength %d !!\n", len); gspca_dbg(gspca_dev, D_FRAM, "G %d R %d G %d B %d", data[2], data[3], data[4], data[5]); data += 30; /* don't change datalength as the chips provided it */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; } if (len) { data += 8; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } else { /* Drop Packet */ gspca_dev->last_packet_type = DISCARD_PACKET; } } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: sd->autogain = ctrl->val; setautogain(gspca_dev); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 1, 127, 1, 63); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); if (sd->sensor == SENSOR_PAS106) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 15, 1, 7); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
| 13 29 6 5 54 5 14 20590 134 4 33 7 2658 128 120 11 377 4068 3887 4163 239 2526 146 249 4469 45 13 2610 32 11 33 144 554 108 1405 133 202 72 370 67 103 9 109 9571 1699 363 1426 104 274 201 662 105 153 492 34 958 9977 844 221 404 370 264 159 164 103 16 152 2157 2 40 2 73 61 23 5284 7900 10942 11810 2464 2323 105 93 256 53 20 6722 6294 360 930 13 1093 455 51 2 36 32 85 49 16 6 2 15 37 12 7 50 52 11 21 72 290 13 139 52 18 28 22 18 35 3 40 32 19 1 60 29 9030 13 2 6 9 12 10 5 99 1693 1575 162 830 796 174 203 11912 1508 74 43 1226 3202 143 11715 2 9310 2048 57 2 78 79 3 89 10 74 292 6 138 15 2 9 3 27 6 207 3 355 258 494 2 4046 241 1064 241 1616 247 3 6 42 14 363 71 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Linux Security Module Hook declarations. * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) * Copyright (C) 2015 Intel Corporation. * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2016 Mellanox Techonologies * Copyright (C) 2020 Google LLC. */ /* * The macro LSM_HOOK is used to define the data structures required by * the LSM framework using the pattern: * * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...) * * struct security_hook_heads { * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; * #include <linux/lsm_hook_defs.h> * #undef LSM_HOOK * }; */ LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr) LSM_HOOK(int, 0, binder_transaction, const struct cred *from, const struct cred *to) LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from, const struct cred *to) LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from, const struct cred *to, const struct file *file) LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child, unsigned int mode) LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent) LSM_HOOK(int, 0, capget, const struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb) LSM_HOOK(int, 0, quota_on, struct dentry *dentry) LSM_HOOK(int, 0, syslog, int type) LSM_HOOK(int, 0, settime, const struct timespec64 *ts, const struct timezone *tz) LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm) LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file) LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm) LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, struct fs_parameter *param) LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb) LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb) LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry) LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags) LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path, const struct path *new_path) LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags) LSM_HOOK(int, 0, move_mount, const struct path *from_path, const struct path *to_path) LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, struct lsm_context *cp) LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new) #ifdef CONFIG_SECURITY_PATH LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry) LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry, umode_t mode) LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry) LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) LSM_HOOK(void, LSM_RET_VOID, path_post_mknod, struct mnt_idmap *idmap, struct dentry *dentry) LSM_HOOK(int, 0, path_truncate, const struct path *path) LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry, const char *old_name) LSM_HOOK(int, 0, path_link, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) LSM_HOOK(int, 0, path_rename, const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode) LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid) LSM_HOOK(int, 0, path_chroot, const struct path *path) #endif /* CONFIG_SECURITY_PATH */ /* Needed for inode based security check */ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask, unsigned int obj_type) LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode) LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode) LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security) LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode, struct inode *dir, const struct qstr *qstr, struct xattr *xattrs, int *xattr_count) LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode, const struct qstr *name, const struct inode *context_inode) LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry, umode_t mode) LSM_HOOK(void, LSM_RET_VOID, inode_post_create_tmpfile, struct mnt_idmap *idmap, struct inode *inode) LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry) LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry, const char *old_name) LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry, umode_t mode) LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry) LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry) LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode, bool rcu) LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask) LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) LSM_HOOK(int, 0, inode_getattr, const struct path *path) LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name) LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name) LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry) LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name) LSM_HOOK(void, LSM_RET_VOID, inode_post_removexattr, struct dentry *dentry, const char *name) LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) LSM_HOOK(void, LSM_RET_VOID, inode_post_set_acl, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) LSM_HOOK(int, 0, inode_get_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) LSM_HOOK(int, 0, inode_remove_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) LSM_HOOK(void, LSM_RET_VOID, inode_post_remove_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry) LSM_HOOK(int, 0, inode_killpriv, struct mnt_idmap *idmap, struct dentry *dentry) LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct mnt_idmap *idmap, struct inode *inode, const char *name, void **buffer, bool alloc) LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode, struct lsm_prop *prop) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src, const char *name) LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode, enum lsm_integrity_type type, const void *value, size_t size) LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, struct kernfs_node *kn) LSM_HOOK(int, 0, file_permission, struct file *file, int mask) LSM_HOOK(int, 0, file_alloc_security, struct file *file) LSM_HOOK(void, LSM_RET_VOID, file_release, struct file *file) LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file) LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd, unsigned long arg) LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd, unsigned long arg) LSM_HOOK(int, 0, mmap_addr, unsigned long addr) LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd) LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd, unsigned long arg) LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file) LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk, struct fown_struct *fown, int sig) LSM_HOOK(int, 0, file_receive, struct file *file) LSM_HOOK(int, 0, file_open, struct file *file) LSM_HOOK(int, 0, file_post_open, struct file *file, int mask) LSM_HOOK(int, 0, file_truncate, struct file *file) LSM_HOOK(int, 0, task_alloc, struct task_struct *task, unsigned long clone_flags) LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task) LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp) LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred) LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old, gfp_t gfp) LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new, const struct cred *old) LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c, struct lsm_prop *prop) LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid) LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode) LSM_HOOK(int, 0, kernel_module_request, char *kmod_name) LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id, bool contents) LSM_HOOK(int, 0, kernel_post_load_data, char *buf, loff_t size, enum kernel_load_data_id id, char *description) LSM_HOOK(int, 0, kernel_read_file, struct file *file, enum kernel_read_file_id id, bool contents) LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, int flags) LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old, int flags) LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop) LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj, struct task_struct *p, struct lsm_prop *prop) LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice) LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio) LSM_HOOK(int, 0, task_getioprio, struct task_struct *p) LSM_HOOK(int, 0, task_prlimit, const struct cred *cred, const struct cred *tcred, unsigned int flags) LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p) LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p) LSM_HOOK(int, 0, task_movememory, struct task_struct *p) LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p, struct inode *inode) LSM_HOOK(int, 0, userns_create, const struct cred *cred) LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag) LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp, struct lsm_prop *prop) LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg) LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg) LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm) LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security, struct kern_ipc_perm *perm) LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg) LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd) LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm, struct msg_msg *msg, int msqflg) LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm, struct msg_msg *msg, struct task_struct *target, long type, int mode) LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm) LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm) LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg) LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd) LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr, int shmflg) LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm) LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm) LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg) LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd) LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops, unsigned nsops, int alter) LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb) LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry, struct inode *inode) LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr, struct lsm_ctx __user *ctx, u32 *size, u32 flags) LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr, struct lsm_ctx *ctx, u32 size, u32 flags) LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name, char **value) LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) LSM_HOOK(int, 0, ismaclabel, const char *name) LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, struct lsm_context *cp) LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop, struct lsm_context *cp) LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, release_secctx, struct lsm_context *cp) LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, struct lsm_context *cp) #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, const struct cred *cred, struct watch_notification *n) #endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */ #if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) LSM_HOOK(int, 0, watch_key, struct key *key) #endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */ #ifdef CONFIG_SECURITY_NETWORK LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, struct sock *newsk) LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other) LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern) LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type, int protocol, int kern) LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb) LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address, int addrlen) LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address, int addrlen) LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog) LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock) LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg, int size) LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg, int size, int flags) LSM_HOOK(int, 0, socket_getsockname, struct socket *sock) LSM_HOOK(int, 0, socket_getpeername, struct socket *sock) LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock, struct sk_buff *skb, u32 *secid) LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk, struct sock *newsk) LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent) LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb, struct request_sock *req) LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk, const struct request_sock *req) LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk, struct sk_buff *skb) LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid) LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, struct flowi_common *flic) LSM_HOOK(int, 0, tun_dev_alloc_security, void *security) LSM_HOOK(int, 0, tun_dev_create, void) LSM_HOOK(int, 0, tun_dev_attach_queue, void *security) LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security) LSM_HOOK(int, 0, tun_dev_open, void *security) LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc, struct sk_buff *skb) LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname, struct sockaddr *address, int addrlen) LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc, struct sock *sk, struct sock *newsk) LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc, struct sk_buff *skb) LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk) #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey) LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name, u8 port_num) LSM_HOOK(int, 0, ib_alloc_security, void *sec) #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx) LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security, struct xfrm_sec_ctx *ctx) LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx) LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x) LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x) LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid) LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic) LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, int ckall) #endif /* CONFIG_SECURITY_NETWORK_XFRM */ /* key management security hooks */ #ifdef CONFIG_KEYS LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred, unsigned long flags) LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer) LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring, struct key *key, const void *payload, size_t payload_len, unsigned long flags, bool create) #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp) LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op, void *lsmrule) LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule) #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode) LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog) LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr, struct bpf_token *token, bool kernel) LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map) LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token, bool kernel) LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog) LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr, const struct path *path) LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token) LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd) LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap) #endif /* CONFIG_BPF_SYSCALL */ LSM_HOOK(int, 0, locked_down, enum lockdown_reason what) #ifdef CONFIG_PERF_EVENTS LSM_HOOK(int, 0, perf_event_open, int type) LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event) LSM_HOOK(int, 0, perf_event_read, struct perf_event *event) LSM_HOOK(int, 0, perf_event_write, struct perf_event *event) #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_IO_URING LSM_HOOK(int, 0, uring_override_creds, const struct cred *new) LSM_HOOK(int, 0, uring_sqpoll, void) LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd) LSM_HOOK(int, 0, uring_allowed, void) #endif /* CONFIG_IO_URING */ LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void) LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev) LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev) LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev, enum lsm_integrity_type type, const void *value, size_t size) |
| 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 | // SPDX-License-Identifier: GPL-2.0 /* * SVC Greybus driver. * * Copyright 2015 Google Inc. * Copyright 2015 Linaro Ltd. */ #include <linux/debugfs.h> #include <linux/kstrtox.h> #include <linux/workqueue.h> #include <linux/greybus.h> #define SVC_INTF_EJECT_TIMEOUT 9000 #define SVC_INTF_ACTIVATE_TIMEOUT 6000 #define SVC_INTF_RESUME_TIMEOUT 3000 struct gb_svc_deferred_request { struct work_struct work; struct gb_operation *operation; }; static int gb_svc_queue_deferred_request(struct gb_operation *operation); static ssize_t endo_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); return sprintf(buf, "0x%04x\n", svc->endo_id); } static DEVICE_ATTR_RO(endo_id); static ssize_t ap_intf_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); return sprintf(buf, "%u\n", svc->ap_intf_id); } static DEVICE_ATTR_RO(ap_intf_id); // FIXME // This is a hack, we need to do this "right" and clean the interface up // properly, not just forcibly yank the thing out of the system and hope for the // best. But for now, people want their modules to come out without having to // throw the thing to the ground or get out a screwdriver. static ssize_t intf_eject_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_svc *svc = to_gb_svc(dev); unsigned short intf_id; int ret; ret = kstrtou16(buf, 10, &intf_id); if (ret < 0) return ret; dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); ret = gb_svc_intf_eject(svc, intf_id); if (ret < 0) return ret; return len; } static DEVICE_ATTR_WO(intf_eject); static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); return sprintf(buf, "%s\n", gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled"); } static ssize_t watchdog_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_svc *svc = to_gb_svc(dev); int retval; bool user_request; retval = kstrtobool(buf, &user_request); if (retval) return retval; if (user_request) retval = gb_svc_watchdog_enable(svc); else retval = gb_svc_watchdog_disable(svc); if (retval) return retval; return len; } static DEVICE_ATTR_RW(watchdog); static ssize_t watchdog_action_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) return sprintf(buf, "panic\n"); else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) return sprintf(buf, "reset\n"); return -EINVAL; } static ssize_t watchdog_action_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_svc *svc = to_gb_svc(dev); if (sysfs_streq(buf, "panic")) svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; else if (sysfs_streq(buf, "reset")) svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; else return -EINVAL; return len; } static DEVICE_ATTR_RW(watchdog_action); static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) { struct gb_svc_pwrmon_rail_count_get_response response; int ret; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get rail count: %d\n", ret); return ret; } *value = response.rail_count; return 0; } static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, struct gb_svc_pwrmon_rail_names_get_response *response, size_t bufsize) { int ret; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, response, bufsize); if (ret) { dev_err(&svc->dev, "failed to get rail names: %d\n", ret); return ret; } if (response->status != GB_SVC_OP_SUCCESS) { dev_err(&svc->dev, "SVC error while getting rail names: %u\n", response->status); return -EREMOTEIO; } return 0; } static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, u8 measurement_type, u32 *value) { struct gb_svc_pwrmon_sample_get_request request; struct gb_svc_pwrmon_sample_get_response response; int ret; request.rail_id = rail_id; request.measurement_type = measurement_type; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); return ret; } if (response.result) { dev_err(&svc->dev, "UniPro error while getting rail power sample (%d %d): %d\n", rail_id, measurement_type, response.result); switch (response.result) { case GB_SVC_PWRMON_GET_SAMPLE_INVAL: return -EINVAL; case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: return -ENOMSG; default: return -EREMOTEIO; } } *value = le32_to_cpu(response.measurement); return 0; } int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, u8 measurement_type, u32 *value) { struct gb_svc_pwrmon_intf_sample_get_request request; struct gb_svc_pwrmon_intf_sample_get_response response; int ret; request.intf_id = intf_id; request.measurement_type = measurement_type; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); return ret; } if (response.result) { dev_err(&svc->dev, "UniPro error while getting intf power sample (%d %d): %d\n", intf_id, measurement_type, response.result); switch (response.result) { case GB_SVC_PWRMON_GET_SAMPLE_INVAL: return -EINVAL; case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: return -ENOMSG; default: return -EREMOTEIO; } } *value = le32_to_cpu(response.measurement); return 0; } static struct attribute *svc_attrs[] = { &dev_attr_endo_id.attr, &dev_attr_ap_intf_id.attr, &dev_attr_intf_eject.attr, &dev_attr_watchdog.attr, &dev_attr_watchdog_action.attr, NULL, }; ATTRIBUTE_GROUPS(svc); int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) { struct gb_svc_intf_device_id_request request; request.intf_id = intf_id; request.device_id = device_id; return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, &request, sizeof(request), NULL, 0); } int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) { struct gb_svc_intf_eject_request request; int ret; request.intf_id = intf_id; /* * The pulse width for module release in svc is long so we need to * increase the timeout so the operation will not return to soon. */ ret = gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_INTF_EJECT, &request, sizeof(request), NULL, 0, SVC_INTF_EJECT_TIMEOUT); if (ret) { dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); return ret; } return 0; } int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) { struct gb_svc_intf_vsys_request request; struct gb_svc_intf_vsys_response response; int type, ret; request.intf_id = intf_id; if (enable) type = GB_SVC_TYPE_INTF_VSYS_ENABLE; else type = GB_SVC_TYPE_INTF_VSYS_DISABLE; ret = gb_operation_sync(svc->connection, type, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; if (response.result_code != GB_SVC_INTF_VSYS_OK) return -EREMOTEIO; return 0; } int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) { struct gb_svc_intf_refclk_request request; struct gb_svc_intf_refclk_response response; int type, ret; request.intf_id = intf_id; if (enable) type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; else type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; ret = gb_operation_sync(svc->connection, type, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; if (response.result_code != GB_SVC_INTF_REFCLK_OK) return -EREMOTEIO; return 0; } int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) { struct gb_svc_intf_unipro_request request; struct gb_svc_intf_unipro_response response; int type, ret; request.intf_id = intf_id; if (enable) type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; else type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; ret = gb_operation_sync(svc->connection, type, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; if (response.result_code != GB_SVC_INTF_UNIPRO_OK) return -EREMOTEIO; return 0; } int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) { struct gb_svc_intf_activate_request request; struct gb_svc_intf_activate_response response; int ret; request.intf_id = intf_id; ret = gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_INTF_ACTIVATE, &request, sizeof(request), &response, sizeof(response), SVC_INTF_ACTIVATE_TIMEOUT); if (ret < 0) return ret; if (response.status != GB_SVC_OP_SUCCESS) { dev_err(&svc->dev, "failed to activate interface %u: %u\n", intf_id, response.status); return -EREMOTEIO; } *intf_type = response.intf_type; return 0; } int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) { struct gb_svc_intf_resume_request request; struct gb_svc_intf_resume_response response; int ret; request.intf_id = intf_id; ret = gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_INTF_RESUME, &request, sizeof(request), &response, sizeof(response), SVC_INTF_RESUME_TIMEOUT); if (ret < 0) { dev_err(&svc->dev, "failed to send interface resume %u: %d\n", intf_id, ret); return ret; } if (response.status != GB_SVC_OP_SUCCESS) { dev_err(&svc->dev, "failed to resume interface %u: %u\n", intf_id, response.status); return -EREMOTEIO; } return 0; } int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, u32 *value) { struct gb_svc_dme_peer_get_request request; struct gb_svc_dme_peer_get_response response; u16 result; int ret; request.intf_id = intf_id; request.attr = cpu_to_le16(attr); request.selector = cpu_to_le16(selector); ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", intf_id, attr, selector, ret); return ret; } result = le16_to_cpu(response.result_code); if (result) { dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", intf_id, attr, selector, result); return -EREMOTEIO; } if (value) *value = le32_to_cpu(response.attr_value); return 0; } int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, u32 value) { struct gb_svc_dme_peer_set_request request; struct gb_svc_dme_peer_set_response response; u16 result; int ret; request.intf_id = intf_id; request.attr = cpu_to_le16(attr); request.selector = cpu_to_le16(selector); request.value = cpu_to_le32(value); ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", intf_id, attr, selector, value, ret); return ret; } result = le16_to_cpu(response.result_code); if (result) { dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", intf_id, attr, selector, value, result); return -EREMOTEIO; } return 0; } int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, u8 intf2_id, u16 cport2_id, u8 cport_flags) { struct gb_svc_conn_create_request request; request.intf1_id = intf1_id; request.cport1_id = cpu_to_le16(cport1_id); request.intf2_id = intf2_id; request.cport2_id = cpu_to_le16(cport2_id); request.tc = 0; /* TC0 */ request.flags = cport_flags; return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, &request, sizeof(request), NULL, 0); } void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, u8 intf2_id, u16 cport2_id) { struct gb_svc_conn_destroy_request request; struct gb_connection *connection = svc->connection; int ret; request.intf1_id = intf1_id; request.cport1_id = cpu_to_le16(cport1_id); request.intf2_id = intf2_id; request.cport2_id = cpu_to_le16(cport2_id); ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, &request, sizeof(request), NULL, 0); if (ret) { dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", intf1_id, cport1_id, intf2_id, cport2_id, ret); } } /* Creates bi-directional routes between the devices */ int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, u8 intf2_id, u8 dev2_id) { struct gb_svc_route_create_request request; request.intf1_id = intf1_id; request.dev1_id = dev1_id; request.intf2_id = intf2_id; request.dev2_id = dev2_id; return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, &request, sizeof(request), NULL, 0); } /* Destroys bi-directional routes between the devices */ void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) { struct gb_svc_route_destroy_request request; int ret; request.intf1_id = intf1_id; request.intf2_id = intf2_id; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, &request, sizeof(request), NULL, 0); if (ret) { dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", intf1_id, intf2_id, ret); } } int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, u8 tx_mode, u8 tx_gear, u8 tx_nlanes, u8 tx_amplitude, u8 tx_hs_equalizer, u8 rx_mode, u8 rx_gear, u8 rx_nlanes, u8 flags, u32 quirks, struct gb_svc_l2_timer_cfg *local, struct gb_svc_l2_timer_cfg *remote) { struct gb_svc_intf_set_pwrm_request request; struct gb_svc_intf_set_pwrm_response response; int ret; u16 result_code; memset(&request, 0, sizeof(request)); request.intf_id = intf_id; request.hs_series = hs_series; request.tx_mode = tx_mode; request.tx_gear = tx_gear; request.tx_nlanes = tx_nlanes; request.tx_amplitude = tx_amplitude; request.tx_hs_equalizer = tx_hs_equalizer; request.rx_mode = rx_mode; request.rx_gear = rx_gear; request.rx_nlanes = rx_nlanes; request.flags = flags; request.quirks = cpu_to_le32(quirks); if (local) request.local_l2timerdata = *local; if (remote) request.remote_l2timerdata = *remote; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; result_code = response.result_code; if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { dev_err(&svc->dev, "set power mode = %d\n", result_code); return -EIO; } return 0; } EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) { struct gb_svc_intf_set_pwrm_request request; struct gb_svc_intf_set_pwrm_response response; int ret; u16 result_code; memset(&request, 0, sizeof(request)); request.intf_id = intf_id; request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) { dev_err(&svc->dev, "failed to send set power mode operation to interface %u: %d\n", intf_id, ret); return ret; } result_code = response.result_code; if (result_code != GB_SVC_SETPWRM_PWR_OK) { dev_err(&svc->dev, "failed to hibernate the link for interface %u: %u\n", intf_id, result_code); return -EIO; } return 0; } int gb_svc_ping(struct gb_svc *svc) { return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, NULL, 0, NULL, 0, GB_OPERATION_TIMEOUT_DEFAULT * 2); } static int gb_svc_version_request(struct gb_operation *op) { struct gb_connection *connection = op->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_svc_version_request *request; struct gb_svc_version_response *response; if (op->request->payload_size < sizeof(*request)) { dev_err(&svc->dev, "short version request (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; if (request->major > GB_SVC_VERSION_MAJOR) { dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", request->major, GB_SVC_VERSION_MAJOR); return -ENOTSUPP; } svc->protocol_major = request->major; svc->protocol_minor = request->minor; if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) return -ENOMEM; response = op->response->payload; response->major = svc->protocol_major; response->minor = svc->protocol_minor; return 0; } static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; char buff[16]; ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, GB_SVC_PWRMON_TYPE_VOL, &value); if (ret) { dev_err(&svc->dev, "failed to get voltage sample %u: %d\n", pwrmon_rails->id, ret); return ret; } desc = scnprintf(buff, sizeof(buff), "%u\n", value); return simple_read_from_buffer(buf, len, offset, buff, desc); } static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; char buff[16]; ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, GB_SVC_PWRMON_TYPE_CURR, &value); if (ret) { dev_err(&svc->dev, "failed to get current sample %u: %d\n", pwrmon_rails->id, ret); return ret; } desc = scnprintf(buff, sizeof(buff), "%u\n", value); return simple_read_from_buffer(buf, len, offset, buff, desc); } static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; char buff[16]; ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, GB_SVC_PWRMON_TYPE_PWR, &value); if (ret) { dev_err(&svc->dev, "failed to get power sample %u: %d\n", pwrmon_rails->id, ret); return ret; } desc = scnprintf(buff, sizeof(buff), "%u\n", value); return simple_read_from_buffer(buf, len, offset, buff, desc); } static const struct file_operations pwrmon_debugfs_voltage_fops = { .read = pwr_debugfs_voltage_read, }; static const struct file_operations pwrmon_debugfs_current_fops = { .read = pwr_debugfs_current_read, }; static const struct file_operations pwrmon_debugfs_power_fops = { .read = pwr_debugfs_power_read, }; static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) { int i; size_t bufsize; struct dentry *dent; struct gb_svc_pwrmon_rail_names_get_response *rail_names; u8 rail_count; dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); if (IS_ERR_OR_NULL(dent)) return; if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) goto err_pwrmon_debugfs; if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) goto err_pwrmon_debugfs; bufsize = sizeof(*rail_names) + GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; rail_names = kzalloc(bufsize, GFP_KERNEL); if (!rail_names) goto err_pwrmon_debugfs; svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), GFP_KERNEL); if (!svc->pwrmon_rails) goto err_pwrmon_debugfs_free; if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) goto err_pwrmon_debugfs_free; for (i = 0; i < rail_count; i++) { struct dentry *dir; struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; snprintf(fname, sizeof(fname), "%s", (char *)&rail_names->name[i]); rail->id = i; rail->svc = svc; dir = debugfs_create_dir(fname, dent); debugfs_create_file("voltage_now", 0444, dir, rail, &pwrmon_debugfs_voltage_fops); debugfs_create_file("current_now", 0444, dir, rail, &pwrmon_debugfs_current_fops); debugfs_create_file("power_now", 0444, dir, rail, &pwrmon_debugfs_power_fops); } kfree(rail_names); return; err_pwrmon_debugfs_free: kfree(rail_names); kfree(svc->pwrmon_rails); svc->pwrmon_rails = NULL; err_pwrmon_debugfs: debugfs_remove(dent); } static void gb_svc_debugfs_init(struct gb_svc *svc) { svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), gb_debugfs_get()); gb_svc_pwrmon_debugfs_init(svc); } static void gb_svc_debugfs_exit(struct gb_svc *svc) { debugfs_remove_recursive(svc->debugfs_dentry); kfree(svc->pwrmon_rails); svc->pwrmon_rails = NULL; } static int gb_svc_hello(struct gb_operation *op) { struct gb_connection *connection = op->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_svc_hello_request *hello_request; int ret; if (op->request->payload_size < sizeof(*hello_request)) { dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", op->request->payload_size, sizeof(*hello_request)); return -EINVAL; } hello_request = op->request->payload; svc->endo_id = le16_to_cpu(hello_request->endo_id); svc->ap_intf_id = hello_request->interface_id; ret = device_add(&svc->dev); if (ret) { dev_err(&svc->dev, "failed to register svc device: %d\n", ret); return ret; } ret = gb_svc_watchdog_create(svc); if (ret) { dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); goto err_deregister_svc; } /* * FIXME: This is a temporary hack to reconfigure the link at HELLO * (which abuses the deferred request processing mechanism). */ ret = gb_svc_queue_deferred_request(op); if (ret) goto err_destroy_watchdog; gb_svc_debugfs_init(svc); return 0; err_destroy_watchdog: gb_svc_watchdog_destroy(svc); err_deregister_svc: device_del(&svc->dev); return ret; } static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, u8 intf_id) { struct gb_host_device *hd = svc->hd; struct gb_module *module; size_t num_interfaces; u8 module_id; list_for_each_entry(module, &hd->modules, hd_node) { module_id = module->module_id; num_interfaces = module->num_interfaces; if (intf_id >= module_id && intf_id < module_id + num_interfaces) { return module->interfaces[intf_id - module_id]; } } return NULL; } static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) { struct gb_host_device *hd = svc->hd; struct gb_module *module; list_for_each_entry(module, &hd->modules, hd_node) { if (module->module_id == module_id) return module; } return NULL; } static void gb_svc_process_hello_deferred(struct gb_operation *operation) { struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); int ret; /* * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged * module. * * The code should be removed once SW-2217, Heuristic for UniPro * Power Mode Changes is resolved. */ ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, GB_SVC_UNIPRO_HS_SERIES_A, GB_SVC_UNIPRO_SLOW_AUTO_MODE, 2, 1, GB_SVC_SMALL_AMPLITUDE, GB_SVC_NO_DE_EMPHASIS, GB_SVC_UNIPRO_SLOW_AUTO_MODE, 2, 1, 0, 0, NULL, NULL); if (ret) dev_warn(&svc->dev, "power mode change failed on AP to switch link: %d\n", ret); } static void gb_svc_process_module_inserted(struct gb_operation *operation) { struct gb_svc_module_inserted_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_host_device *hd = svc->hd; struct gb_module *module; size_t num_interfaces; u8 module_id; u16 flags; int ret; /* The request message size has already been verified. */ request = operation->request->payload; module_id = request->primary_intf_id; num_interfaces = request->intf_count; flags = le16_to_cpu(request->flags); dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", __func__, module_id, num_interfaces, flags); if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { dev_warn(&svc->dev, "no primary interface detected on module %u\n", module_id); } module = gb_svc_module_lookup(svc, module_id); if (module) { dev_warn(&svc->dev, "unexpected module-inserted event %u\n", module_id); return; } module = gb_module_create(hd, module_id, num_interfaces); if (!module) { dev_err(&svc->dev, "failed to create module\n"); return; } ret = gb_module_add(module); if (ret) { gb_module_put(module); return; } list_add(&module->hd_node, &hd->modules); } static void gb_svc_process_module_removed(struct gb_operation *operation) { struct gb_svc_module_removed_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_module *module; u8 module_id; /* The request message size has already been verified. */ request = operation->request->payload; module_id = request->primary_intf_id; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); module = gb_svc_module_lookup(svc, module_id); if (!module) { dev_warn(&svc->dev, "unexpected module-removed event %u\n", module_id); return; } module->disconnected = true; gb_module_del(module); list_del(&module->hd_node); gb_module_put(module); } static void gb_svc_process_intf_oops(struct gb_operation *operation) { struct gb_svc_intf_oops_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_interface *intf; u8 intf_id; u8 reason; /* The request message size has already been verified. */ request = operation->request->payload; intf_id = request->intf_id; reason = request->reason; intf = gb_svc_interface_lookup(svc, intf_id); if (!intf) { dev_warn(&svc->dev, "unexpected interface-oops event %u\n", intf_id); return; } dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", intf_id, reason); mutex_lock(&intf->mutex); intf->disconnected = true; gb_interface_disable(intf); gb_interface_deactivate(intf); mutex_unlock(&intf->mutex); } static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) { struct gb_svc_intf_mailbox_event_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_interface *intf; u8 intf_id; u16 result_code; u32 mailbox; /* The request message size has already been verified. */ request = operation->request->payload; intf_id = request->intf_id; result_code = le16_to_cpu(request->result_code); mailbox = le32_to_cpu(request->mailbox); dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", __func__, intf_id, result_code, mailbox); intf = gb_svc_interface_lookup(svc, intf_id); if (!intf) { dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); return; } gb_interface_mailbox_event(intf, result_code, mailbox); } static void gb_svc_process_deferred_request(struct work_struct *work) { struct gb_svc_deferred_request *dr; struct gb_operation *operation; struct gb_svc *svc; u8 type; dr = container_of(work, struct gb_svc_deferred_request, work); operation = dr->operation; svc = gb_connection_get_data(operation->connection); type = operation->request->header->type; switch (type) { case GB_SVC_TYPE_SVC_HELLO: gb_svc_process_hello_deferred(operation); break; case GB_SVC_TYPE_MODULE_INSERTED: gb_svc_process_module_inserted(operation); break; case GB_SVC_TYPE_MODULE_REMOVED: gb_svc_process_module_removed(operation); break; case GB_SVC_TYPE_INTF_MAILBOX_EVENT: gb_svc_process_intf_mailbox_event(operation); break; case GB_SVC_TYPE_INTF_OOPS: gb_svc_process_intf_oops(operation); break; default: dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); } gb_operation_put(operation); kfree(dr); } static int gb_svc_queue_deferred_request(struct gb_operation *operation) { struct gb_svc *svc = gb_connection_get_data(operation->connection); struct gb_svc_deferred_request *dr; dr = kmalloc(sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; gb_operation_get(operation); dr->operation = operation; INIT_WORK(&dr->work, gb_svc_process_deferred_request); queue_work(svc->wq, &dr->work); return 0; } static int gb_svc_intf_reset_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_message *request = op->request; struct gb_svc_intf_reset_request *reset; if (request->payload_size < sizeof(*reset)) { dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", request->payload_size, sizeof(*reset)); return -EINVAL; } reset = request->payload; /* FIXME Reset the interface here */ return 0; } static int gb_svc_module_inserted_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_module_inserted_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->primary_intf_id); return gb_svc_queue_deferred_request(op); } static int gb_svc_module_removed_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_module_removed_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->primary_intf_id); return gb_svc_queue_deferred_request(op); } static int gb_svc_intf_oops_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_intf_oops_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } return gb_svc_queue_deferred_request(op); } static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_intf_mailbox_event_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); return gb_svc_queue_deferred_request(op); } static int gb_svc_request_handler(struct gb_operation *op) { struct gb_connection *connection = op->connection; struct gb_svc *svc = gb_connection_get_data(connection); u8 type = op->type; int ret = 0; /* * SVC requests need to follow a specific order (at least initially) and * below code takes care of enforcing that. The expected order is: * - PROTOCOL_VERSION * - SVC_HELLO * - Any other request, but the earlier two. * * Incoming requests are guaranteed to be serialized and so we don't * need to protect 'state' for any races. */ switch (type) { case GB_SVC_TYPE_PROTOCOL_VERSION: if (svc->state != GB_SVC_STATE_RESET) ret = -EINVAL; break; case GB_SVC_TYPE_SVC_HELLO: if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) ret = -EINVAL; break; default: if (svc->state != GB_SVC_STATE_SVC_HELLO) ret = -EINVAL; break; } if (ret) { dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", type, svc->state); return ret; } switch (type) { case GB_SVC_TYPE_PROTOCOL_VERSION: ret = gb_svc_version_request(op); if (!ret) svc->state = GB_SVC_STATE_PROTOCOL_VERSION; return ret; case GB_SVC_TYPE_SVC_HELLO: ret = gb_svc_hello(op); if (!ret) svc->state = GB_SVC_STATE_SVC_HELLO; return ret; case GB_SVC_TYPE_INTF_RESET: return gb_svc_intf_reset_recv(op); case GB_SVC_TYPE_MODULE_INSERTED: return gb_svc_module_inserted_recv(op); case GB_SVC_TYPE_MODULE_REMOVED: return gb_svc_module_removed_recv(op); case GB_SVC_TYPE_INTF_MAILBOX_EVENT: return gb_svc_intf_mailbox_event_recv(op); case GB_SVC_TYPE_INTF_OOPS: return gb_svc_intf_oops_recv(op); default: dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); return -EINVAL; } } static void gb_svc_release(struct device *dev) { struct gb_svc *svc = to_gb_svc(dev); if (svc->connection) gb_connection_destroy(svc->connection); ida_destroy(&svc->device_id_map); destroy_workqueue(svc->wq); kfree(svc); } const struct device_type greybus_svc_type = { .name = "greybus_svc", .release = gb_svc_release, }; struct gb_svc *gb_svc_create(struct gb_host_device *hd) { struct gb_svc *svc; svc = kzalloc(sizeof(*svc), GFP_KERNEL); if (!svc) return NULL; svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev)); if (!svc->wq) { kfree(svc); return NULL; } svc->dev.parent = &hd->dev; svc->dev.bus = &greybus_bus_type; svc->dev.type = &greybus_svc_type; svc->dev.groups = svc_groups; svc->dev.dma_mask = svc->dev.parent->dma_mask; device_initialize(&svc->dev); dev_set_name(&svc->dev, "%d-svc", hd->bus_id); ida_init(&svc->device_id_map); svc->state = GB_SVC_STATE_RESET; svc->hd = hd; svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, gb_svc_request_handler); if (IS_ERR(svc->connection)) { dev_err(&svc->dev, "failed to create connection: %ld\n", PTR_ERR(svc->connection)); goto err_put_device; } gb_connection_set_data(svc->connection, svc); return svc; err_put_device: put_device(&svc->dev); return NULL; } int gb_svc_add(struct gb_svc *svc) { int ret; /* * The SVC protocol is currently driven by the SVC, so the SVC device * is added from the connection request handler when enough * information has been received. */ ret = gb_connection_enable(svc->connection); if (ret) return ret; return 0; } static void gb_svc_remove_modules(struct gb_svc *svc) { struct gb_host_device *hd = svc->hd; struct gb_module *module, *tmp; list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { gb_module_del(module); list_del(&module->hd_node); gb_module_put(module); } } void gb_svc_del(struct gb_svc *svc) { gb_connection_disable_rx(svc->connection); /* * The SVC device may have been registered from the request handler. */ if (device_is_registered(&svc->dev)) { gb_svc_debugfs_exit(svc); gb_svc_watchdog_destroy(svc); device_del(&svc->dev); } flush_workqueue(svc->wq); gb_svc_remove_modules(svc); gb_connection_disable(svc->connection); } void gb_svc_put(struct gb_svc *svc) { put_device(&svc->dev); } |
| 4 4 1 1 1 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for ELECOM devices: * - BM084 Bluetooth Mouse * - EX-G Trackballs (M-XT3DRBK, M-XT3URBK, M-XT4DRBK) * - DEFT Trackballs (M-DT1DRBK, M-DT1URBK, M-DT2DRBK, M-DT2URBK) * - HUGE Trackballs (M-HT1DRBK, M-HT1URBK) * * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org> * Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com> * Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red> * Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net> */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* * Certain ELECOM mice misreport their button count meaning that they only work * correctly with the ELECOM mouse assistant software which is unavailable for * Linux. A four extra INPUT reports and a FEATURE report are described by the * report descriptor but it does not appear that these enable software to * control what the extra buttons map to. The only simple and straightforward * solution seems to involve fixing up the report descriptor. */ #define MOUSE_BUTTONS_MAX 8 static void mouse_button_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int rsize, unsigned int button_bit_count, unsigned int padding_bit, unsigned int button_report_size, unsigned int button_usage_maximum, int nbuttons) { if (rsize < 32 || rdesc[button_bit_count] != 0x95 || rdesc[button_report_size] != 0x75 || rdesc[button_report_size + 1] != 0x01 || rdesc[button_usage_maximum] != 0x29 || rdesc[padding_bit] != 0x75) return; hid_info(hdev, "Fixing up Elecom mouse button count\n"); nbuttons = clamp(nbuttons, 0, MOUSE_BUTTONS_MAX); rdesc[button_bit_count + 1] = nbuttons; rdesc[button_usage_maximum + 1] = nbuttons; rdesc[padding_bit + 1] = MOUSE_BUTTONS_MAX - nbuttons; } static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { case USB_DEVICE_ID_ELECOM_BM084: /* The BM084 Bluetooth mouse includes a non-existing horizontal * wheel in the HID descriptor. */ if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); rdesc[47] = 0x00; } break; case USB_DEVICE_ID_ELECOM_M_XGL20DLBK: /* * Report descriptor format: * 20: button bit count * 28: padding bit count * 22: button report size * 14: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 20, 28, 22, 14, 8); break; case USB_DEVICE_ID_ELECOM_M_XT3URBK: case USB_DEVICE_ID_ELECOM_M_XT3DRBK: case USB_DEVICE_ID_ELECOM_M_XT4DRBK: /* * Report descriptor format: * 12: button bit count * 30: padding bit count * 14: button report size * 20: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 6); break; case USB_DEVICE_ID_ELECOM_M_DT1URBK: case USB_DEVICE_ID_ELECOM_M_DT1DRBK: case USB_DEVICE_ID_ELECOM_M_HT1URBK_010C: case USB_DEVICE_ID_ELECOM_M_HT1URBK_019B: case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D: /* * Report descriptor format: * 12: button bit count * 30: padding bit count * 14: button report size * 20: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8); break; case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C: /* * Report descriptor format: * 22: button bit count * 30: padding bit count * 24: button report size * 16: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8); break; } return rdesc; } static const struct hid_device_id elecom_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_010C) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_019B) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) }, { } }; MODULE_DEVICE_TABLE(hid, elecom_devices); static struct hid_driver elecom_driver = { .name = "elecom", .id_table = elecom_devices, .report_fixup = elecom_report_fixup }; module_hid_driver(elecom_driver); MODULE_DESCRIPTION("HID driver for ELECOM devices"); MODULE_LICENSE("GPL"); |
| 2 1 1 4 2 4 1 4 2 4 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some cypress "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/module.h> #include "hid-ids.h" #define CP_RDESC_SWAPPED_MIN_MAX 0x01 #define CP_2WHEEL_MOUSE_HACK 0x02 #define CP_2WHEEL_MOUSE_HACK_ON 0x04 #define VA_INVAL_LOGICAL_BOUNDARY 0x08 /* * Some USB barcode readers from cypress have usage min and usage max in * the wrong order */ static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned int i; if (*rsize < 4) return rdesc; for (i = 0; i < *rsize - 4; i++) if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { rdesc[i] = 0x19; rdesc[i + 2] = 0x29; swap(rdesc[i + 3], rdesc[i + 1]); } return rdesc; } static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { /* * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly * reports Logical Minimum of its Consumer Control device as 572 * (0x02 0x3c). Fix this by setting its Logical Minimum to zero. */ if (*rsize == 25 && rdesc[0] == 0x05 && rdesc[1] == 0x0c && rdesc[2] == 0x09 && rdesc[3] == 0x01 && rdesc[6] == 0x19 && rdesc[7] == 0x00 && rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) { hid_info(hdev, "fixing up varmilo VA104M consumer control report descriptor\n"); rdesc[12] = 0x00; rdesc[13] = 0x00; } return rdesc; } static const __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (quirks & CP_RDESC_SWAPPED_MIN_MAX) rdesc = cp_rdesc_fixup(hdev, rdesc, rsize); if (quirks & VA_INVAL_LOGICAL_BOUNDARY) rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize); return rdesc; } static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (!(quirks & CP_2WHEEL_MOUSE_HACK)) return 0; if (usage->type == EV_REL && usage->code == REL_WHEEL) set_bit(REL_HWHEEL, *bit); if (usage->hid == 0x00090005) return -1; return 0; } static int cp_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type || !(quirks & CP_2WHEEL_MOUSE_HACK)) return 0; if (usage->hid == 0x00090005) { if (value) quirks |= CP_2WHEEL_MOUSE_HACK_ON; else quirks &= ~CP_2WHEEL_MOUSE_HACK_ON; hid_set_drvdata(hdev, (void *)quirks); return 1; } if (usage->code == REL_WHEEL && (quirks & CP_2WHEEL_MOUSE_HACK_ON)) { struct input_dev *input = field->hidinput->input; input_event(input, usage->type, REL_HWHEEL, value); return 1; } return 0; } static int cp_probe(struct hid_device *hdev, const struct hid_device_id *id) { unsigned long quirks = id->driver_data; int ret; hid_set_drvdata(hdev, (void *)quirks); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: return ret; } static const struct hid_device_id cp_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4), .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE), .driver_data = CP_2WHEEL_MOUSE_HACK }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1), .driver_data = VA_INVAL_LOGICAL_BOUNDARY }, { } }; MODULE_DEVICE_TABLE(hid, cp_devices); static struct hid_driver cp_driver = { .name = "cypress", .id_table = cp_devices, .report_fixup = cp_report_fixup, .input_mapped = cp_input_mapped, .event = cp_event, .probe = cp_probe, }; module_hid_driver(cp_driver); MODULE_DESCRIPTION("HID driver for some cypress \"special\" devices"); MODULE_LICENSE("GPL"); |
| 168 168 168 168 168 166 166 35 36 35 55 36 53 53 53 164 161 162 165 164 25 25 25 164 165 161 162 165 165 164 165 164 165 162 162 162 162 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 | // SPDX-License-Identifier: GPL-2.0-or-later /* * LED state routines for driver control interface * Copyright (c) 2021 by Jaroslav Kysela <perex@perex.cz> */ #include <linux/slab.h> #include <linux/module.h> #include <linux/leds.h> #include <sound/core.h> #include <sound/control.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("ALSA control interface to LED trigger code."); MODULE_LICENSE("GPL"); #define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \ >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1) #define to_led_card_dev(_dev) \ container_of(_dev, struct snd_ctl_led_card, dev) enum snd_ctl_led_mode { MODE_FOLLOW_MUTE = 0, MODE_FOLLOW_ROUTE, MODE_OFF, MODE_ON, }; struct snd_ctl_led_card { struct device dev; int number; struct snd_ctl_led *led; }; struct snd_ctl_led { struct device dev; struct list_head controls; const char *name; unsigned int group; enum led_audio trigger_type; enum snd_ctl_led_mode mode; struct snd_ctl_led_card *cards[SNDRV_CARDS]; }; struct snd_ctl_led_ctl { struct list_head list; struct snd_card *card; unsigned int access; struct snd_kcontrol *kctl; unsigned int index_offset; }; static DEFINE_MUTEX(snd_ctl_led_mutex); static bool snd_ctl_led_card_valid[SNDRV_CARDS]; static struct led_trigger *snd_ctl_ledtrig_audio[NUM_AUDIO_LEDS]; static struct snd_ctl_led snd_ctl_leds[MAX_LED] = { { .name = "speaker", .group = (SNDRV_CTL_ELEM_ACCESS_SPK_LED >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) - 1, .trigger_type = LED_AUDIO_MUTE, .mode = MODE_FOLLOW_MUTE, }, { .name = "mic", .group = (SNDRV_CTL_ELEM_ACCESS_MIC_LED >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) - 1, .trigger_type = LED_AUDIO_MICMUTE, .mode = MODE_FOLLOW_MUTE, }, }; static void snd_ctl_led_sysfs_add(struct snd_card *card); static void snd_ctl_led_sysfs_remove(struct snd_card *card); #define UPDATE_ROUTE(route, cb) \ do { \ int route2 = (cb); \ if (route2 >= 0) \ route = route < 0 ? route2 : (route | route2); \ } while (0) static inline unsigned int access_to_group(unsigned int access) { return ((access & SNDRV_CTL_ELEM_ACCESS_LED_MASK) >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) - 1; } static inline unsigned int group_to_access(unsigned int group) { return (group + 1) << SNDRV_CTL_ELEM_ACCESS_LED_SHIFT; } static struct snd_ctl_led *snd_ctl_led_get_by_access(unsigned int access) { unsigned int group = access_to_group(access); if (group >= MAX_LED) return NULL; return &snd_ctl_leds[group]; } /* * A note for callers: * The two static variables info and value are protected using snd_ctl_led_mutex. */ static int snd_ctl_led_get(struct snd_ctl_led_ctl *lctl) { static struct snd_ctl_elem_info info; static struct snd_ctl_elem_value value; struct snd_kcontrol *kctl = lctl->kctl; unsigned int i; int result; memset(&info, 0, sizeof(info)); info.id = kctl->id; info.id.index += lctl->index_offset; info.id.numid += lctl->index_offset; result = kctl->info(kctl, &info); if (result < 0) return -1; memset(&value, 0, sizeof(value)); value.id = info.id; result = kctl->get(kctl, &value); if (result < 0) return -1; if (info.type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || info.type == SNDRV_CTL_ELEM_TYPE_INTEGER) { for (i = 0; i < info.count; i++) if (value.value.integer.value[i] != info.value.integer.min) return 1; } else if (info.type == SNDRV_CTL_ELEM_TYPE_INTEGER64) { for (i = 0; i < info.count; i++) if (value.value.integer64.value[i] != info.value.integer64.min) return 1; } return 0; } static void snd_ctl_led_set_state(struct snd_card *card, unsigned int access, struct snd_kcontrol *kctl, unsigned int ioff) { struct snd_ctl_led *led; struct snd_ctl_led_ctl *lctl; int route; bool found; led = snd_ctl_led_get_by_access(access); if (!led) return; route = -1; found = false; scoped_guard(mutex, &snd_ctl_led_mutex) { /* the card may not be registered (active) at this point */ if (card && !snd_ctl_led_card_valid[card->number]) return; list_for_each_entry(lctl, &led->controls, list) { if (lctl->kctl == kctl && lctl->index_offset == ioff) found = true; UPDATE_ROUTE(route, snd_ctl_led_get(lctl)); } if (!found && kctl && card) { lctl = kzalloc(sizeof(*lctl), GFP_KERNEL); if (lctl) { lctl->card = card; lctl->access = access; lctl->kctl = kctl; lctl->index_offset = ioff; list_add(&lctl->list, &led->controls); UPDATE_ROUTE(route, snd_ctl_led_get(lctl)); } } } switch (led->mode) { case MODE_OFF: route = 1; break; case MODE_ON: route = 0; break; case MODE_FOLLOW_ROUTE: if (route >= 0) route ^= 1; break; case MODE_FOLLOW_MUTE: /* noop */ break; } if (route >= 0) { struct led_trigger *trig = snd_ctl_ledtrig_audio[led->trigger_type]; led_trigger_event(trig, route ? LED_OFF : LED_ON); } } static struct snd_ctl_led_ctl *snd_ctl_led_find(struct snd_kcontrol *kctl, unsigned int ioff) { struct list_head *controls; struct snd_ctl_led_ctl *lctl; unsigned int group; for (group = 0; group < MAX_LED; group++) { controls = &snd_ctl_leds[group].controls; list_for_each_entry(lctl, controls, list) if (lctl->kctl == kctl && lctl->index_offset == ioff) return lctl; } return NULL; } static unsigned int snd_ctl_led_remove(struct snd_kcontrol *kctl, unsigned int ioff, unsigned int access) { struct snd_ctl_led_ctl *lctl; unsigned int ret = 0; guard(mutex)(&snd_ctl_led_mutex); lctl = snd_ctl_led_find(kctl, ioff); if (lctl && (access == 0 || access != lctl->access)) { ret = lctl->access; list_del(&lctl->list); kfree(lctl); } return ret; } static void snd_ctl_led_notify(struct snd_card *card, unsigned int mask, struct snd_kcontrol *kctl, unsigned int ioff) { struct snd_kcontrol_volatile *vd; unsigned int access, access2; if (mask == SNDRV_CTL_EVENT_MASK_REMOVE) { access = snd_ctl_led_remove(kctl, ioff, 0); if (access) snd_ctl_led_set_state(card, access, NULL, 0); } else if (mask & SNDRV_CTL_EVENT_MASK_INFO) { vd = &kctl->vd[ioff]; access = vd->access & SNDRV_CTL_ELEM_ACCESS_LED_MASK; access2 = snd_ctl_led_remove(kctl, ioff, access); if (access2) snd_ctl_led_set_state(card, access2, NULL, 0); if (access) snd_ctl_led_set_state(card, access, kctl, ioff); } else if ((mask & (SNDRV_CTL_EVENT_MASK_ADD | SNDRV_CTL_EVENT_MASK_VALUE)) != 0) { vd = &kctl->vd[ioff]; access = vd->access & SNDRV_CTL_ELEM_ACCESS_LED_MASK; if (access) snd_ctl_led_set_state(card, access, kctl, ioff); } } DEFINE_FREE(snd_card_unref, struct snd_card *, if (_T) snd_card_unref(_T)) static int snd_ctl_led_set_id(int card_number, struct snd_ctl_elem_id *id, unsigned int group, bool set) { struct snd_card *card __free(snd_card_unref) = NULL; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int ioff, access, new_access; card = snd_card_ref(card_number); if (!card) return -ENXIO; guard(rwsem_write)(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (!kctl) return -ENOENT; ioff = snd_ctl_get_ioff(kctl, id); vd = &kctl->vd[ioff]; access = vd->access & SNDRV_CTL_ELEM_ACCESS_LED_MASK; if (access != 0 && access != group_to_access(group)) return -EXDEV; new_access = vd->access & ~SNDRV_CTL_ELEM_ACCESS_LED_MASK; if (set) new_access |= group_to_access(group); if (new_access != vd->access) { vd->access = new_access; snd_ctl_led_notify(card, SNDRV_CTL_EVENT_MASK_INFO, kctl, ioff); } return 0; } static void snd_ctl_led_refresh(void) { unsigned int group; for (group = 0; group < MAX_LED; group++) snd_ctl_led_set_state(NULL, group_to_access(group), NULL, 0); } static void snd_ctl_led_ctl_destroy(struct snd_ctl_led_ctl *lctl) { list_del(&lctl->list); kfree(lctl); } static void snd_ctl_led_clean(struct snd_card *card) { unsigned int group; struct snd_ctl_led_ctl *lctl, *_lctl; struct snd_ctl_led *led; for (group = 0; group < MAX_LED; group++) { led = &snd_ctl_leds[group]; list_for_each_entry_safe(lctl, _lctl, &led->controls, list) if (!card || lctl->card == card) snd_ctl_led_ctl_destroy(lctl); } } static int snd_ctl_led_reset(int card_number, unsigned int group) { struct snd_card *card __free(snd_card_unref) = NULL; struct snd_ctl_led_ctl *lctl, *_lctl; struct snd_ctl_led *led; struct snd_kcontrol_volatile *vd; bool change = false; card = snd_card_ref(card_number); if (!card) return -ENXIO; scoped_guard(mutex, &snd_ctl_led_mutex) { if (!snd_ctl_led_card_valid[card_number]) return -ENXIO; led = &snd_ctl_leds[group]; list_for_each_entry_safe(lctl, _lctl, &led->controls, list) if (lctl->card == card) { vd = &lctl->kctl->vd[lctl->index_offset]; vd->access &= ~group_to_access(group); snd_ctl_led_ctl_destroy(lctl); change = true; } } if (change) snd_ctl_led_set_state(NULL, group_to_access(group), NULL, 0); return 0; } static void snd_ctl_led_register(struct snd_card *card) { struct snd_kcontrol *kctl; unsigned int ioff; if (snd_BUG_ON(card->number < 0 || card->number >= ARRAY_SIZE(snd_ctl_led_card_valid))) return; scoped_guard(mutex, &snd_ctl_led_mutex) snd_ctl_led_card_valid[card->number] = true; /* the register callback is already called with held card->controls_rwsem */ list_for_each_entry(kctl, &card->controls, list) for (ioff = 0; ioff < kctl->count; ioff++) snd_ctl_led_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, kctl, ioff); snd_ctl_led_refresh(); snd_ctl_led_sysfs_add(card); } static void snd_ctl_led_disconnect(struct snd_card *card) { snd_ctl_led_sysfs_remove(card); scoped_guard(mutex, &snd_ctl_led_mutex) { snd_ctl_led_card_valid[card->number] = false; snd_ctl_led_clean(card); } snd_ctl_led_refresh(); } static void snd_ctl_led_card_release(struct device *dev) { struct snd_ctl_led_card *led_card = to_led_card_dev(dev); kfree(led_card); } static void snd_ctl_led_release(struct device *dev) { } static void snd_ctl_led_dev_release(struct device *dev) { } /* * sysfs */ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_ctl_led *led = container_of(dev, struct snd_ctl_led, dev); const char *str = NULL; switch (led->mode) { case MODE_FOLLOW_MUTE: str = "follow-mute"; break; case MODE_FOLLOW_ROUTE: str = "follow-route"; break; case MODE_ON: str = "on"; break; case MODE_OFF: str = "off"; break; } return sysfs_emit(buf, "%s\n", str); } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_ctl_led *led = container_of(dev, struct snd_ctl_led, dev); char _buf[16]; size_t l = min(count, sizeof(_buf) - 1); enum snd_ctl_led_mode mode; memcpy(_buf, buf, l); _buf[l] = '\0'; if (strstr(_buf, "mute")) mode = MODE_FOLLOW_MUTE; else if (strstr(_buf, "route")) mode = MODE_FOLLOW_ROUTE; else if (strncmp(_buf, "off", 3) == 0 || strncmp(_buf, "0", 1) == 0) mode = MODE_OFF; else if (strncmp(_buf, "on", 2) == 0 || strncmp(_buf, "1", 1) == 0) mode = MODE_ON; else return count; scoped_guard(mutex, &snd_ctl_led_mutex) led->mode = mode; snd_ctl_led_set_state(NULL, group_to_access(led->group), NULL, 0); return count; } static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_ctl_led *led = container_of(dev, struct snd_ctl_led, dev); struct led_trigger *trig = snd_ctl_ledtrig_audio[led->trigger_type]; return sysfs_emit(buf, "%u\n", led_trigger_get_brightness(trig)); } static DEVICE_ATTR_RW(mode); static DEVICE_ATTR_RO(brightness); static struct attribute *snd_ctl_led_dev_attrs[] = { &dev_attr_mode.attr, &dev_attr_brightness.attr, NULL, }; static const struct attribute_group snd_ctl_led_dev_attr_group = { .attrs = snd_ctl_led_dev_attrs, }; static const struct attribute_group *snd_ctl_led_dev_attr_groups[] = { &snd_ctl_led_dev_attr_group, NULL, }; static char *find_eos(char *s) { while (*s && *s != ',') s++; if (*s) s++; return s; } static char *parse_uint(char *s, unsigned int *val) { unsigned long long res; if (kstrtoull(s, 10, &res)) res = 0; *val = res; return find_eos(s); } static char *parse_string(char *s, char *val, size_t val_size) { if (*s == '"' || *s == '\'') { char c = *s; s++; while (*s && *s != c) { if (val_size > 1) { *val++ = *s; val_size--; } s++; } } else { while (*s && *s != ',') { if (val_size > 1) { *val++ = *s; val_size--; } s++; } } *val = '\0'; if (*s) s++; return s; } static char *parse_iface(char *s, snd_ctl_elem_iface_t *val) { if (!strncasecmp(s, "card", 4)) *val = SNDRV_CTL_ELEM_IFACE_CARD; else if (!strncasecmp(s, "mixer", 5)) *val = SNDRV_CTL_ELEM_IFACE_MIXER; return find_eos(s); } /* * These types of input strings are accepted: * * unsigned integer - numid (equivaled to numid=UINT) * string - basic mixer name (equivalent to iface=MIXER,name=STR) * numid=UINT * [iface=MIXER,][device=UINT,][subdevice=UINT,]name=STR[,index=UINT] */ static ssize_t set_led_id(struct snd_ctl_led_card *led_card, const char *buf, size_t count, bool attach) { char buf2[256], *s, *os; struct snd_ctl_elem_id id; int err; if (strscpy(buf2, buf, sizeof(buf2)) < 0) return -E2BIG; memset(&id, 0, sizeof(id)); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; s = buf2; while (*s) { os = s; if (!strncasecmp(s, "numid=", 6)) { s = parse_uint(s + 6, &id.numid); } else if (!strncasecmp(s, "iface=", 6)) { s = parse_iface(s + 6, &id.iface); } else if (!strncasecmp(s, "device=", 7)) { s = parse_uint(s + 7, &id.device); } else if (!strncasecmp(s, "subdevice=", 10)) { s = parse_uint(s + 10, &id.subdevice); } else if (!strncasecmp(s, "name=", 5)) { s = parse_string(s + 5, id.name, sizeof(id.name)); } else if (!strncasecmp(s, "index=", 6)) { s = parse_uint(s + 6, &id.index); } else if (s == buf2) { while (*s) { if (*s < '0' || *s > '9') break; s++; } if (*s == '\0') parse_uint(buf2, &id.numid); else { for (; *s >= ' '; s++); *s = '\0'; strscpy(id.name, buf2, sizeof(id.name)); } break; } if (*s == ',') s++; if (s == os) break; } err = snd_ctl_led_set_id(led_card->number, &id, led_card->led->group, attach); if (err < 0) return err; return count; } static ssize_t attach_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev); return set_led_id(led_card, buf, count, true); } static ssize_t detach_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev); return set_led_id(led_card, buf, count, false); } static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev); int err; if (count > 0 && buf[0] == '1') { err = snd_ctl_led_reset(led_card->number, led_card->led->group); if (err < 0) return err; } return count; } static ssize_t list_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev); struct snd_card *card __free(snd_card_unref) = NULL; struct snd_ctl_led_ctl *lctl; size_t l = 0; card = snd_card_ref(led_card->number); if (!card) return -ENXIO; guard(rwsem_read)(&card->controls_rwsem); guard(mutex)(&snd_ctl_led_mutex); if (snd_ctl_led_card_valid[led_card->number]) { list_for_each_entry(lctl, &led_card->led->controls, list) { if (lctl->card != card) continue; if (l) l += sysfs_emit_at(buf, l, " "); l += sysfs_emit_at(buf, l, "%u", lctl->kctl->id.numid + lctl->index_offset); } } return l; } static DEVICE_ATTR_WO(attach); static DEVICE_ATTR_WO(detach); static DEVICE_ATTR_WO(reset); static DEVICE_ATTR_RO(list); static struct attribute *snd_ctl_led_card_attrs[] = { &dev_attr_attach.attr, &dev_attr_detach.attr, &dev_attr_reset.attr, &dev_attr_list.attr, NULL, }; static const struct attribute_group snd_ctl_led_card_attr_group = { .attrs = snd_ctl_led_card_attrs, }; static const struct attribute_group *snd_ctl_led_card_attr_groups[] = { &snd_ctl_led_card_attr_group, NULL, }; static struct device snd_ctl_led_dev; static void snd_ctl_led_sysfs_add(struct snd_card *card) { unsigned int group; struct snd_ctl_led_card *led_card; struct snd_ctl_led *led; char link_name[32]; for (group = 0; group < MAX_LED; group++) { led = &snd_ctl_leds[group]; led_card = kzalloc(sizeof(*led_card), GFP_KERNEL); if (!led_card) goto cerr2; led_card->number = card->number; led_card->led = led; device_initialize(&led_card->dev); led_card->dev.release = snd_ctl_led_card_release; if (dev_set_name(&led_card->dev, "card%d", card->number) < 0) goto cerr; led_card->dev.parent = &led->dev; led_card->dev.groups = snd_ctl_led_card_attr_groups; if (device_add(&led_card->dev)) goto cerr; led->cards[card->number] = led_card; snprintf(link_name, sizeof(link_name), "led-%s", led->name); if (sysfs_create_link(&card->ctl_dev->kobj, &led_card->dev.kobj, link_name)) dev_err(card->dev, "%s: can't create symlink to controlC%i device\n", __func__, card->number); if (sysfs_create_link(&led_card->dev.kobj, &card->card_dev.kobj, "card")) dev_err(card->dev, "%s: can't create symlink to card%i\n", __func__, card->number); continue; cerr: put_device(&led_card->dev); cerr2: dev_err(card->dev, "snd_ctl_led: unable to add card%d", card->number); } } static void snd_ctl_led_sysfs_remove(struct snd_card *card) { unsigned int group; struct snd_ctl_led_card *led_card; struct snd_ctl_led *led; char link_name[32]; for (group = 0; group < MAX_LED; group++) { led = &snd_ctl_leds[group]; led_card = led->cards[card->number]; if (!led_card) continue; snprintf(link_name, sizeof(link_name), "led-%s", led->name); sysfs_remove_link(&card->ctl_dev->kobj, link_name); sysfs_remove_link(&led_card->dev.kobj, "card"); device_unregister(&led_card->dev); led->cards[card->number] = NULL; } } /* * Control layer registration */ static struct snd_ctl_layer_ops snd_ctl_led_lops = { .module_name = SND_CTL_LAYER_MODULE_LED, .lregister = snd_ctl_led_register, .ldisconnect = snd_ctl_led_disconnect, .lnotify = snd_ctl_led_notify, }; static int __init snd_ctl_led_init(void) { struct snd_ctl_led *led; unsigned int group; led_trigger_register_simple("audio-mute", &snd_ctl_ledtrig_audio[LED_AUDIO_MUTE]); led_trigger_register_simple("audio-micmute", &snd_ctl_ledtrig_audio[LED_AUDIO_MICMUTE]); device_initialize(&snd_ctl_led_dev); snd_ctl_led_dev.class = &sound_class; snd_ctl_led_dev.release = snd_ctl_led_dev_release; dev_set_name(&snd_ctl_led_dev, "ctl-led"); if (device_add(&snd_ctl_led_dev)) { put_device(&snd_ctl_led_dev); return -ENOMEM; } for (group = 0; group < MAX_LED; group++) { led = &snd_ctl_leds[group]; INIT_LIST_HEAD(&led->controls); device_initialize(&led->dev); led->dev.parent = &snd_ctl_led_dev; led->dev.release = snd_ctl_led_release; led->dev.groups = snd_ctl_led_dev_attr_groups; dev_set_name(&led->dev, led->name); if (device_add(&led->dev)) { put_device(&led->dev); for (; group > 0; group--) { led = &snd_ctl_leds[group - 1]; device_unregister(&led->dev); } device_unregister(&snd_ctl_led_dev); return -ENOMEM; } } snd_ctl_register_layer(&snd_ctl_led_lops); return 0; } static void __exit snd_ctl_led_exit(void) { struct snd_ctl_led *led; struct snd_card *card; unsigned int group, card_number; snd_ctl_disconnect_layer(&snd_ctl_led_lops); for (card_number = 0; card_number < SNDRV_CARDS; card_number++) { if (!snd_ctl_led_card_valid[card_number]) continue; card = snd_card_ref(card_number); if (card) { snd_ctl_led_sysfs_remove(card); snd_card_unref(card); } } for (group = 0; group < MAX_LED; group++) { led = &snd_ctl_leds[group]; device_unregister(&led->dev); } device_unregister(&snd_ctl_led_dev); snd_ctl_led_clean(NULL); led_trigger_unregister_simple(snd_ctl_ledtrig_audio[LED_AUDIO_MUTE]); led_trigger_unregister_simple(snd_ctl_ledtrig_audio[LED_AUDIO_MICMUTE]); } module_init(snd_ctl_led_init) module_exit(snd_ctl_led_exit) MODULE_ALIAS("ledtrig:audio-mute"); MODULE_ALIAS("ledtrig:audio-micmute"); |
| 12 12 12 12 12 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 | /* * linux/fs/nls/nls_iso8859-5.c * * Charset iso8859-5 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x00ad, 0x040e, 0x040f, /* 0xb0*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0xc0*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xd0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xe0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, /* 0xf0*/ 0x2116, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045a, 0x045b, 0x045c, 0x00a7, 0x045e, 0x045f, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */ }; static const unsigned char page04[256] = { 0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x00, 0xae, 0xaf, /* 0x08-0x0f */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, 0xfe, 0xff, /* 0x58-0x5f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x00, /* 0x10-0x17 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xa0-0xa7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xad, 0xfe, 0xff, /* 0xa8-0xaf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xb0-0xb7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xd0-0xd7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xf0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xf0-0xf7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xfd, 0xae, 0xaf, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-5", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_iso8859_5(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_5(void) { unregister_nls(&table); } module_init(init_nls_iso8859_5) module_exit(exit_nls_iso8859_5) MODULE_DESCRIPTION("NLS ISO 8859-5 (Cyrillic)"); MODULE_LICENSE("Dual BSD/GPL"); |
| 536 428 38 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UTSNAME_H #define _LINUX_UTSNAME_H #include <linux/sched.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/err.h> #include <uapi/linux/utsname.h> enum uts_proc { UTS_PROC_ARCH, UTS_PROC_OSTYPE, UTS_PROC_OSRELEASE, UTS_PROC_VERSION, UTS_PROC_HOSTNAME, UTS_PROC_DOMAINNAME, }; struct user_namespace; extern struct user_namespace init_user_ns; struct uts_namespace { struct new_utsname name; struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; } __randomize_layout; extern struct uts_namespace init_uts_ns; #ifdef CONFIG_UTS_NS static inline void get_uts_ns(struct uts_namespace *ns) { refcount_inc(&ns->ns.count); } extern struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns); extern void free_uts_ns(struct uts_namespace *ns); static inline void put_uts_ns(struct uts_namespace *ns) { if (refcount_dec_and_test(&ns->ns.count)) free_uts_ns(ns); } void uts_ns_init(void); #else static inline void get_uts_ns(struct uts_namespace *ns) { } static inline void put_uts_ns(struct uts_namespace *ns) { } static inline struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns) { if (flags & CLONE_NEWUTS) return ERR_PTR(-EINVAL); return old_ns; } static inline void uts_ns_init(void) { } #endif #ifdef CONFIG_PROC_SYSCTL extern void uts_proc_notify(enum uts_proc proc); #else static inline void uts_proc_notify(enum uts_proc proc) { } #endif static inline struct new_utsname *utsname(void) { return ¤t->nsproxy->uts_ns->name; } static inline struct new_utsname *init_utsname(void) { return &init_uts_ns.name; } extern struct rw_semaphore uts_sem; #endif /* _LINUX_UTSNAME_H */ |
| 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef DRIVERS_PCI_H #define DRIVERS_PCI_H #include <linux/pci.h> struct pcie_tlp_log; /* Number of possible devfns: 0.0 to 1f.7 inclusive */ #define MAX_NR_DEVFNS 256 #define MAX_NR_LANES 16 #define PCI_FIND_CAP_TTL 48 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ #define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000 /* * Power stable to PERST# inactive. * * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol * "T_PVPERL". */ #define PCIE_T_PVPERL_MS 100 /* * REFCLK stable before PERST# inactive. * * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol * "T_PERST-CLK". */ #define PCIE_T_PERST_CLK_US 100 /* * End of conventional reset (PERST# de-asserted) to first configuration * request (device able to respond with a "Request Retry Status" completion), * from PCIe r6.0, sec 6.6.1. */ #define PCIE_T_RRS_READY_MS 100 /* * PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization> * Recommends 1ms to 10ms timeout to check L2 ready. */ #define PCIE_PME_TO_L2_TIMEOUT_US 10000 /* * PCIe r6.0, sec 6.6.1 <Conventional Reset> * * - "With a Downstream Port that does not support Link speeds greater * than 5.0 GT/s, software must wait a minimum of 100 ms following exit * from a Conventional Reset before sending a Configuration Request to * the device immediately below that Port." * * - "With a Downstream Port that supports Link speeds greater than * 5.0 GT/s, software must wait a minimum of 100 ms after Link training * completes before sending a Configuration Request to the device * immediately below that Port." */ #define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100 /* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */ #define PCIE_MSG_TYPE_R_RC 0 #define PCIE_MSG_TYPE_R_ADDR 1 #define PCIE_MSG_TYPE_R_ID 2 #define PCIE_MSG_TYPE_R_BC 3 #define PCIE_MSG_TYPE_R_LOCAL 4 #define PCIE_MSG_TYPE_R_GATHER 5 /* Power Management Messages; PCIe r6.0, sec 2.2.8.2 */ #define PCIE_MSG_CODE_PME_TURN_OFF 0x19 /* INTx Mechanism Messages; PCIe r6.0, sec 2.2.8.1 */ #define PCIE_MSG_CODE_ASSERT_INTA 0x20 #define PCIE_MSG_CODE_ASSERT_INTB 0x21 #define PCIE_MSG_CODE_ASSERT_INTC 0x22 #define PCIE_MSG_CODE_ASSERT_INTD 0x23 #define PCIE_MSG_CODE_DEASSERT_INTA 0x24 #define PCIE_MSG_CODE_DEASSERT_INTB 0x25 #define PCIE_MSG_CODE_DEASSERT_INTC 0x26 #define PCIE_MSG_CODE_DEASSERT_INTD 0x27 extern const unsigned char pcie_link_speed[]; extern bool pci_early_dump; bool pcie_cap_has_lnkctl(const struct pci_dev *dev); bool pcie_cap_has_lnkctl2(const struct pci_dev *dev); bool pcie_cap_has_rtctl(const struct pci_dev *dev); /* Functions internal to the PCI core code */ #ifdef CONFIG_DMI extern const struct attribute_group pci_dev_smbios_attr_group; #endif enum pci_mmap_api { PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ }; int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, enum pci_mmap_api mmap_api); bool pci_reset_supported(struct pci_dev *dev); void pci_init_reset_methods(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev); int __pci_reset_bus(struct pci_bus *bus); struct pci_cap_saved_data { u16 cap_nr; bool cap_extended; unsigned int size; u32 data[]; }; struct pci_cap_saved_state { struct hlist_node next; struct pci_cap_saved_data cap; }; void pci_allocate_cap_save_buffers(struct pci_dev *dev); void pci_free_cap_save_buffers(struct pci_dev *dev); int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size); struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap); #define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */ #define PCI_PM_D3HOT_WAIT 10 /* msec */ #define PCI_PM_D3COLD_WAIT 100 /* msec */ void pci_update_current_state(struct pci_dev *dev, pci_power_t state); void pci_refresh_power_state(struct pci_dev *dev); int pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); int pci_finish_runtime_suspend(struct pci_dev *dev); void pcie_clear_device_status(struct pci_dev *dev); void pcie_clear_root_pme_status(struct pci_dev *dev); bool pci_check_pme_status(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); void pci_pme_restore(struct pci_dev *dev); bool pci_dev_need_resume(struct pci_dev *dev); void pci_dev_adjust_pme(struct pci_dev *dev); void pci_dev_complete_resume(struct pci_dev *pci_dev); void pci_config_pm_runtime_get(struct pci_dev *dev); void pci_config_pm_runtime_put(struct pci_dev *dev); void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev); void pci_pm_init(struct pci_dev *dev); void pci_ea_init(struct pci_dev *dev); void pci_msi_init(struct pci_dev *dev); void pci_msix_init(struct pci_dev *dev); bool pci_bridge_d3_possible(struct pci_dev *dev); void pci_bridge_d3_update(struct pci_dev *dev); int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type); static inline bool pci_bus_rrs_vendor_id(u32 l) { return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG; } static inline void pci_wakeup_event(struct pci_dev *dev) { /* Wait 100 ms before the system can be put into a sleep state. */ pm_wakeup_event(&dev->dev, 100); } /** * pci_bar_index_is_valid - Check whether a BAR index is within valid range * @bar: BAR index * * Protects against overflowing &struct pci_dev.resource array. * * Return: true for valid index, false otherwise. */ static inline bool pci_bar_index_is_valid(int bar) { if (bar >= 0 && bar < PCI_NUM_RESOURCES) return true; return false; } static inline bool pci_has_subordinate(struct pci_dev *pci_dev) { return !!(pci_dev->subordinate); } static inline bool pci_power_manageable(struct pci_dev *pci_dev) { /* * Currently we allow normal PCI devices and PCI bridges transition * into D3 if their bridge_d3 is set. */ return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3; } static inline bool pcie_downstream_port(const struct pci_dev *dev) { int type = pci_pcie_type(dev); return type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_PCIE_BRIDGE; } void pci_vpd_init(struct pci_dev *dev); extern const struct attribute_group pci_dev_vpd_attr_group; /* PCI Virtual Channel */ int pci_save_vc_state(struct pci_dev *dev); void pci_restore_vc_state(struct pci_dev *dev); void pci_allocate_vc_save_buffers(struct pci_dev *dev); /* PCI /proc functions */ #ifdef CONFIG_PROC_FS int pci_proc_attach_device(struct pci_dev *dev); int pci_proc_detach_device(struct pci_dev *dev); int pci_proc_detach_bus(struct pci_bus *bus); #else static inline int pci_proc_attach_device(struct pci_dev *dev) { return 0; } static inline int pci_proc_detach_device(struct pci_dev *dev) { return 0; } static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } #endif /* Functions for PCI Hotplug drivers to use */ int pci_hp_add_bridge(struct pci_dev *dev); bool pci_hp_spurious_link_change(struct pci_dev *pdev); #if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY) void pci_create_legacy_files(struct pci_bus *bus); void pci_remove_legacy_files(struct pci_bus *bus); #else static inline void pci_create_legacy_files(struct pci_bus *bus) { } static inline void pci_remove_legacy_files(struct pci_bus *bus) { } #endif /* Lock for read/write access to pci device and bus lists */ extern struct rw_semaphore pci_bus_sem; extern struct mutex pci_slot_mutex; extern raw_spinlock_t pci_lock; extern unsigned int pci_pm_d3hot_delay; #ifdef CONFIG_PCI_MSI void pci_no_msi(void); #else static inline void pci_no_msi(void) { } #endif void pci_realloc_get_opt(char *); static inline int pci_no_d1d2(struct pci_dev *dev) { unsigned int parent_dstates = 0; if (dev->bus->self) parent_dstates = dev->bus->self->no_d1d2; return (dev->no_d1d2 || parent_dstates); } #ifdef CONFIG_SYSFS int pci_create_sysfs_dev_files(struct pci_dev *pdev); void pci_remove_sysfs_dev_files(struct pci_dev *pdev); extern const struct attribute_group *pci_dev_groups[]; extern const struct attribute_group *pci_dev_attr_groups[]; extern const struct attribute_group *pcibus_groups[]; extern const struct attribute_group *pci_bus_groups[]; extern const struct attribute_group pci_doe_sysfs_group; #else static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; } static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { } #define pci_dev_groups NULL #define pci_dev_attr_groups NULL #define pcibus_groups NULL #define pci_bus_groups NULL #endif extern unsigned long pci_hotplug_io_size; extern unsigned long pci_hotplug_mmio_size; extern unsigned long pci_hotplug_mmio_pref_size; extern unsigned long pci_hotplug_bus_size; extern unsigned long pci_cardbus_io_size; extern unsigned long pci_cardbus_mem_size; /** * pci_match_one_device - Tell if a PCI device structure has a matching * PCI device id structure * @id: single PCI device id structure to match * @dev: the PCI device structure to match against * * Returns the matching pci_device_id structure or %NULL if there is no match. */ static inline const struct pci_device_id * pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) { if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) && (id->device == PCI_ANY_ID || id->device == dev->device) && (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) && (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) && !((id->class ^ dev->class) & id->class_mask)) return id; return NULL; } /* PCI slot sysfs helper code */ #define to_pci_slot(s) container_of(s, struct pci_slot, kobj) extern struct kset *pci_slots_kset; struct pci_slot_attribute { struct attribute attr; ssize_t (*show)(struct pci_slot *, char *); ssize_t (*store)(struct pci_slot *, const char *, size_t); }; #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) enum pci_bar_type { pci_bar_unknown, /* Standard PCI BAR probe */ pci_bar_io, /* An I/O port BAR */ pci_bar_mem32, /* A 32-bit memory BAR */ pci_bar_mem64, /* A 64-bit memory BAR */ }; struct device *pci_get_host_bridge_device(struct pci_dev *dev); void pci_put_host_bridge_device(struct device *dev); unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type); int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); int pci_configure_extended_tags(struct pci_dev *dev, void *ign); bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout); bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout); int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout); int pci_setup_device(struct pci_dev *dev); void __pci_size_stdbars(struct pci_dev *dev, int count, unsigned int pos, u32 *sizes); int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int reg, u32 *sizes); void pci_configure_ari(struct pci_dev *dev); void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head); void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head); bool pci_bus_clip_resource(struct pci_dev *dev, int idx); void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata); const char *pci_resource_name(struct pci_dev *dev, unsigned int i); bool pci_resource_is_optional(const struct pci_dev *dev, int resno); /** * pci_resource_num - Reverse lookup resource number from device resources * @dev: PCI device * @res: Resource to lookup index for (MUST be a @dev's resource) * * Perform reverse lookup to determine the resource number for @res within * @dev resource array. NOTE: The caller is responsible for ensuring @res is * among @dev's resources! * * Returns: resource number. */ static inline int pci_resource_num(const struct pci_dev *dev, const struct resource *res) { int resno = res - &dev->resource[0]; /* Passing a resource that is not among dev's resources? */ WARN_ON_ONCE(resno >= PCI_NUM_RESOURCES); return resno; } void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); struct pci_bus *pci_bus_get(struct pci_bus *bus); void pci_bus_put(struct pci_bus *bus); #define PCIE_LNKCAP_SLS2SPEED(lnkcap) \ ({ \ ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ PCI_SPEED_UNKNOWN); \ }) /* PCIe link information from Link Capabilities 2 */ #define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \ ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ PCI_SPEED_UNKNOWN) #define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \ ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \ PCI_SPEED_UNKNOWN) /* PCIe speed to Mb/s reduced by encoding overhead */ #define PCIE_SPEED2MBS_ENC(speed) \ ((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \ (speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \ (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ 0) static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed) { switch (speed) { case PCIE_SPEED_2_5GT: return 2500; case PCIE_SPEED_5_0GT: return 5000; case PCIE_SPEED_8_0GT: return 8000; case PCIE_SPEED_16_0GT: return 16000; case PCIE_SPEED_32_0GT: return 32000; case PCIE_SPEED_64_0GT: return 64000; default: break; } return -EINVAL; } u8 pcie_get_supported_speeds(struct pci_dev *dev); const char *pci_speed_string(enum pci_bus_speed speed); void __pcie_print_link_status(struct pci_dev *dev, bool verbose); void pcie_report_downtraining(struct pci_dev *dev); static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta, u16 linksta2) { bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; bus->flit_mode = (linksta2 & PCI_EXP_LNKSTA2_FLIT) ? 1 : 0; } void pcie_update_link_speed(struct pci_bus *bus); /* Single Root I/O Virtualization */ struct pci_sriov { int pos; /* Capability position */ int nres; /* Number of resources */ u32 cap; /* SR-IOV Capabilities */ u16 ctrl; /* SR-IOV Control */ u16 total_VFs; /* Total VFs associated with the PF */ u16 initial_VFs; /* Initial VFs associated with the PF */ u16 num_VFs; /* Number of VFs available */ u16 offset; /* First VF Routing ID offset */ u16 stride; /* Following VF stride */ u16 vf_device; /* VF device ID */ u32 pgsz; /* Page size for BAR alignment */ u8 link; /* Function Dependency Link */ u8 max_VF_buses; /* Max buses consumed by VFs */ u16 driver_max_VFs; /* Max num VFs driver supports */ struct pci_dev *dev; /* Lowest numbered PF */ struct pci_dev *self; /* This PF */ u32 class; /* VF device */ u8 hdr_type; /* VF header type */ u16 subsystem_vendor; /* VF subsystem vendor */ u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ }; #ifdef CONFIG_PCI_DOE void pci_doe_init(struct pci_dev *pdev); void pci_doe_destroy(struct pci_dev *pdev); void pci_doe_disconnected(struct pci_dev *pdev); #else static inline void pci_doe_init(struct pci_dev *pdev) { } static inline void pci_doe_destroy(struct pci_dev *pdev) { } static inline void pci_doe_disconnected(struct pci_dev *pdev) { } #endif #ifdef CONFIG_PCI_NPEM void pci_npem_create(struct pci_dev *dev); void pci_npem_remove(struct pci_dev *dev); #else static inline void pci_npem_create(struct pci_dev *dev) { } static inline void pci_npem_remove(struct pci_dev *dev) { } #endif #if defined(CONFIG_PCI_DOE) && defined(CONFIG_SYSFS) void pci_doe_sysfs_init(struct pci_dev *pci_dev); void pci_doe_sysfs_teardown(struct pci_dev *pdev); #else static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { } static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { } #endif /** * pci_dev_set_io_state - Set the new error state if possible. * * @dev: PCI device to set new error_state * @new: the state we want dev to be in * * If the device is experiencing perm_failure, it has to remain in that state. * Any other transition is allowed. * * Returns true if state has been changed to the requested state. */ static inline bool pci_dev_set_io_state(struct pci_dev *dev, pci_channel_state_t new) { pci_channel_state_t old; switch (new) { case pci_channel_io_perm_failure: xchg(&dev->error_state, pci_channel_io_perm_failure); return true; case pci_channel_io_frozen: old = cmpxchg(&dev->error_state, pci_channel_io_normal, pci_channel_io_frozen); return old != pci_channel_io_perm_failure; case pci_channel_io_normal: old = cmpxchg(&dev->error_state, pci_channel_io_frozen, pci_channel_io_normal); return old != pci_channel_io_perm_failure; default: return false; } } static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) { pci_dev_set_io_state(dev, pci_channel_io_perm_failure); pci_doe_disconnected(dev); return 0; } /* pci_dev priv_flags */ #define PCI_DEV_ADDED 0 #define PCI_DPC_RECOVERED 1 #define PCI_DPC_RECOVERING 2 #define PCI_DEV_REMOVED 3 #define PCI_LINK_CHANGED 4 #define PCI_LINK_CHANGING 5 #define PCI_LINK_LBMS_SEEN 6 #define PCI_DEV_ALLOW_BINDING 7 static inline void pci_dev_assign_added(struct pci_dev *dev) { smp_mb__before_atomic(); set_bit(PCI_DEV_ADDED, &dev->priv_flags); smp_mb__after_atomic(); } static inline bool pci_dev_test_and_clear_added(struct pci_dev *dev) { return test_and_clear_bit(PCI_DEV_ADDED, &dev->priv_flags); } static inline bool pci_dev_is_added(const struct pci_dev *dev) { return test_bit(PCI_DEV_ADDED, &dev->priv_flags); } static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev) { return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags); } static inline void pci_dev_allow_binding(struct pci_dev *dev) { set_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags); } static inline bool pci_dev_binding_disallowed(struct pci_dev *dev) { return !test_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags); } #ifdef CONFIG_PCIEAER #include <linux/aer.h> #define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */ struct aer_err_info { struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; int ratelimit_print[AER_MAX_MULTI_ERR_DEVICES]; int error_dev_num; const char *level; /* printk level */ unsigned int id:16; unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */ unsigned int root_ratelimit_print:1; /* 0=skip, 1=print */ unsigned int __pad1:4; unsigned int multi_error_valid:1; unsigned int first_error:5; unsigned int __pad2:2; unsigned int tlp_header_valid:1; unsigned int status; /* COR/UNCOR Error Status */ unsigned int mask; /* COR/UNCOR Error Mask */ struct pcie_tlp_log tlp; /* TLP Header */ }; int aer_get_device_error_info(struct aer_err_info *info, int i); void aer_print_error(struct aer_err_info *info, int i); int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2, unsigned int tlp_len, bool flit, struct pcie_tlp_log *log); unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc); void pcie_print_tlp_log(const struct pci_dev *dev, const struct pcie_tlp_log *log, const char *level, const char *pfx); #endif /* CONFIG_PCIEAER */ #ifdef CONFIG_PCIEPORTBUS /* Cached RCEC Endpoint Association */ struct rcec_ea { u8 nextbusn; u8 lastbusn; u32 bitmap; }; #endif #ifdef CONFIG_PCIE_DPC void pci_save_dpc_state(struct pci_dev *dev); void pci_restore_dpc_state(struct pci_dev *dev); void pci_dpc_init(struct pci_dev *pdev); void dpc_process_error(struct pci_dev *pdev); pci_ers_result_t dpc_reset_link(struct pci_dev *pdev); bool pci_dpc_recovered(struct pci_dev *pdev); unsigned int dpc_tlp_log_len(struct pci_dev *dev); #else static inline void pci_save_dpc_state(struct pci_dev *dev) { } static inline void pci_restore_dpc_state(struct pci_dev *dev) { } static inline void pci_dpc_init(struct pci_dev *pdev) { } static inline bool pci_dpc_recovered(struct pci_dev *pdev) { return false; } #endif #ifdef CONFIG_PCIEPORTBUS void pci_rcec_init(struct pci_dev *dev); void pci_rcec_exit(struct pci_dev *dev); void pcie_link_rcec(struct pci_dev *rcec); void pcie_walk_rcec(struct pci_dev *rcec, int (*cb)(struct pci_dev *, void *), void *userdata); #else static inline void pci_rcec_init(struct pci_dev *dev) { } static inline void pci_rcec_exit(struct pci_dev *dev) { } static inline void pcie_link_rcec(struct pci_dev *rcec) { } static inline void pcie_walk_rcec(struct pci_dev *rcec, int (*cb)(struct pci_dev *, void *), void *userdata) { } #endif #ifdef CONFIG_PCI_ATS /* Address Translation Service */ void pci_ats_init(struct pci_dev *dev); void pci_restore_ats_state(struct pci_dev *dev); #else static inline void pci_ats_init(struct pci_dev *d) { } static inline void pci_restore_ats_state(struct pci_dev *dev) { } #endif /* CONFIG_PCI_ATS */ #ifdef CONFIG_PCI_PRI void pci_pri_init(struct pci_dev *dev); void pci_restore_pri_state(struct pci_dev *pdev); #else static inline void pci_pri_init(struct pci_dev *dev) { } static inline void pci_restore_pri_state(struct pci_dev *pdev) { } #endif #ifdef CONFIG_PCI_PASID void pci_pasid_init(struct pci_dev *dev); void pci_restore_pasid_state(struct pci_dev *pdev); #else static inline void pci_pasid_init(struct pci_dev *dev) { } static inline void pci_restore_pasid_state(struct pci_dev *pdev) { } #endif #ifdef CONFIG_PCI_IOV int pci_iov_init(struct pci_dev *dev); void pci_iov_release(struct pci_dev *dev); void pci_iov_remove(struct pci_dev *dev); void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); int pci_iov_bus_range(struct pci_bus *bus); static inline bool pci_resource_is_iov(int resno) { return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END; } extern const struct attribute_group sriov_pf_dev_attr_group; extern const struct attribute_group sriov_vf_dev_attr_group; #else static inline int pci_iov_init(struct pci_dev *dev) { return -ENODEV; } static inline void pci_iov_release(struct pci_dev *dev) { } static inline void pci_iov_remove(struct pci_dev *dev) { } static inline void pci_iov_update_resource(struct pci_dev *dev, int resno) { } static inline resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) { return 0; } static inline void pci_restore_iov_state(struct pci_dev *dev) { } static inline int pci_iov_bus_range(struct pci_bus *bus) { return 0; } static inline bool pci_resource_is_iov(int resno) { return false; } #endif /* CONFIG_PCI_IOV */ #ifdef CONFIG_PCIE_TPH void pci_restore_tph_state(struct pci_dev *dev); void pci_save_tph_state(struct pci_dev *dev); void pci_no_tph(void); void pci_tph_init(struct pci_dev *dev); #else static inline void pci_restore_tph_state(struct pci_dev *dev) { } static inline void pci_save_tph_state(struct pci_dev *dev) { } static inline void pci_no_tph(void) { } static inline void pci_tph_init(struct pci_dev *dev) { } #endif #ifdef CONFIG_PCIE_PTM void pci_ptm_init(struct pci_dev *dev); void pci_save_ptm_state(struct pci_dev *dev); void pci_restore_ptm_state(struct pci_dev *dev); void pci_suspend_ptm(struct pci_dev *dev); void pci_resume_ptm(struct pci_dev *dev); #else static inline void pci_ptm_init(struct pci_dev *dev) { } static inline void pci_save_ptm_state(struct pci_dev *dev) { } static inline void pci_restore_ptm_state(struct pci_dev *dev) { } static inline void pci_suspend_ptm(struct pci_dev *dev) { } static inline void pci_resume_ptm(struct pci_dev *dev) { } #endif unsigned long pci_cardbus_resource_alignment(struct resource *); static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, struct resource *res) { int resno = pci_resource_num(dev, res); if (pci_resource_is_iov(resno)) return pci_sriov_resource_alignment(dev, resno); if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS) return pci_cardbus_resource_alignment(res); return resource_alignment(res); } void pci_acs_init(struct pci_dev *dev); #ifdef CONFIG_PCI_QUIRKS int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); int pci_dev_specific_disable_acs_redir(struct pci_dev *dev); int pcie_failed_link_retrain(struct pci_dev *dev); #else static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { return -ENOTTY; } static inline int pci_dev_specific_enable_acs(struct pci_dev *dev) { return -ENOTTY; } static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) { return -ENOTTY; } static inline int pcie_failed_link_retrain(struct pci_dev *dev) { return -ENOTTY; } #endif /* PCI error reporting and recovery */ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pci_channel_state_t state, pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev)); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); int pcie_retrain_link(struct pci_dev *pdev, bool use_lt); /* ASPM-related functionality we need even without CONFIG_PCIEASPM */ void pci_save_ltr_state(struct pci_dev *dev); void pci_restore_ltr_state(struct pci_dev *dev); void pci_configure_aspm_l1ss(struct pci_dev *dev); void pci_save_aspm_l1ss_state(struct pci_dev *dev); void pci_restore_aspm_l1ss_state(struct pci_dev *dev); #ifdef CONFIG_PCIEASPM void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked); void pcie_aspm_powersave_config_link(struct pci_dev *pdev); void pci_configure_ltr(struct pci_dev *pdev); void pci_bridge_reconfigure_ltr(struct pci_dev *pdev); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { } static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } static inline void pci_configure_ltr(struct pci_dev *pdev) { } static inline void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) { } #endif #ifdef CONFIG_PCIE_ECRC void pcie_set_ecrc_checking(struct pci_dev *dev); void pcie_ecrc_get_policy(char *str); #else static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif #ifdef CONFIG_PCIEPORTBUS void pcie_reset_lbms(struct pci_dev *port); #else static inline void pcie_reset_lbms(struct pci_dev *port) {} #endif struct pci_dev_reset_methods { u16 vendor; u16 device; int (*reset)(struct pci_dev *dev, bool probe); }; struct pci_reset_fn_method { int (*reset_fn)(struct pci_dev *pdev, bool probe); char *name; }; extern const struct pci_reset_fn_method pci_reset_fn_methods[]; #ifdef CONFIG_PCI_QUIRKS int pci_dev_specific_reset(struct pci_dev *dev, bool probe); #else static inline int pci_dev_specific_reset(struct pci_dev *dev, bool probe) { return -ENOTTY; } #endif #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, struct resource *res); #else static inline int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, struct resource *res) { return -ENODEV; } #endif void pci_rebar_init(struct pci_dev *pdev); int pci_rebar_get_current_size(struct pci_dev *pdev, int bar); int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size); static inline u64 pci_rebar_size_to_bytes(int size) { return 1ULL << (size + 20); } struct device_node; #define PCI_EQ_RESV 0xff enum equalization_preset_type { EQ_PRESET_TYPE_8GTS, EQ_PRESET_TYPE_16GTS, EQ_PRESET_TYPE_32GTS, EQ_PRESET_TYPE_64GTS, EQ_PRESET_TYPE_MAX }; struct pci_eq_presets { u16 eq_presets_8gts[MAX_NR_LANES]; u8 eq_presets_Ngts[EQ_PRESET_TYPE_MAX - 1][MAX_NR_LANES]; }; #ifdef CONFIG_OF int of_get_pci_domain_nr(struct device_node *node); int of_pci_get_max_link_speed(struct device_node *node); u32 of_pci_get_slot_power_limit(struct device_node *node, u8 *slot_power_limit_value, u8 *slot_power_limit_scale); bool of_pci_preserve_config(struct device_node *node); int pci_set_of_node(struct pci_dev *dev); void pci_release_of_node(struct pci_dev *dev); void pci_set_bus_of_node(struct pci_bus *bus); void pci_release_bus_of_node(struct pci_bus *bus); int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge); bool of_pci_supply_present(struct device_node *np); int of_pci_get_equalization_presets(struct device *dev, struct pci_eq_presets *presets, int num_lanes); #else static inline int of_get_pci_domain_nr(struct device_node *node) { return -1; } static inline int of_pci_get_max_link_speed(struct device_node *node) { return -EINVAL; } static inline u32 of_pci_get_slot_power_limit(struct device_node *node, u8 *slot_power_limit_value, u8 *slot_power_limit_scale) { if (slot_power_limit_value) *slot_power_limit_value = 0; if (slot_power_limit_scale) *slot_power_limit_scale = 0; return 0; } static inline bool of_pci_preserve_config(struct device_node *node) { return false; } static inline int pci_set_of_node(struct pci_dev *dev) { return 0; } static inline void pci_release_of_node(struct pci_dev *dev) { } static inline void pci_set_bus_of_node(struct pci_bus *bus) { } static inline void pci_release_bus_of_node(struct pci_bus *bus) { } static inline int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) { return 0; } static inline bool of_pci_supply_present(struct device_node *np) { return false; } static inline int of_pci_get_equalization_presets(struct device *dev, struct pci_eq_presets *presets, int num_lanes) { presets->eq_presets_8gts[0] = PCI_EQ_RESV; for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++) presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV; return 0; } #endif /* CONFIG_OF */ struct of_changeset; #ifdef CONFIG_PCI_DYNAMIC_OF_NODES void of_pci_make_dev_node(struct pci_dev *pdev); void of_pci_remove_node(struct pci_dev *pdev); int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np); void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge); void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge); int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge, struct of_changeset *ocs, struct device_node *np); #else static inline void of_pci_make_dev_node(struct pci_dev *pdev) { } static inline void of_pci_remove_node(struct pci_dev *pdev) { } static inline void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge) { } static inline void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge) { } #endif #ifdef CONFIG_PCIEAER void pci_no_aer(void); void pci_aer_init(struct pci_dev *dev); void pci_aer_exit(struct pci_dev *dev); extern const struct attribute_group aer_stats_attr_group; extern const struct attribute_group aer_attr_group; void pci_aer_clear_fatal_status(struct pci_dev *dev); int pci_aer_clear_status(struct pci_dev *dev); int pci_aer_raw_clear_status(struct pci_dev *dev); void pci_save_aer_state(struct pci_dev *dev); void pci_restore_aer_state(struct pci_dev *dev); #else static inline void pci_no_aer(void) { } static inline void pci_aer_init(struct pci_dev *d) { } static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } static inline int pci_aer_clear_status(struct pci_dev *dev) { return -EINVAL; } static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL; } static inline void pci_save_aer_state(struct pci_dev *dev) { } static inline void pci_restore_aer_state(struct pci_dev *dev) { } #endif #ifdef CONFIG_ACPI bool pci_acpi_preserve_config(struct pci_host_bridge *bridge); int pci_acpi_program_hp_params(struct pci_dev *dev); extern const struct attribute_group pci_dev_acpi_attr_group; void pci_set_acpi_fwnode(struct pci_dev *dev); int pci_dev_acpi_reset(struct pci_dev *dev, bool probe); bool acpi_pci_power_manageable(struct pci_dev *dev); bool acpi_pci_bridge_d3(struct pci_dev *dev); int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t acpi_pci_get_power_state(struct pci_dev *dev); void acpi_pci_refresh_power_state(struct pci_dev *dev); int acpi_pci_wakeup(struct pci_dev *dev, bool enable); bool acpi_pci_need_resume(struct pci_dev *dev); pci_power_t acpi_pci_choose_state(struct pci_dev *pdev); #else static inline bool pci_acpi_preserve_config(struct pci_host_bridge *bridge) { return false; } static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe) { return -ENOTTY; } static inline void pci_set_acpi_fwnode(struct pci_dev *dev) { } static inline int pci_acpi_program_hp_params(struct pci_dev *dev) { return -ENODEV; } static inline bool acpi_pci_power_manageable(struct pci_dev *dev) { return false; } static inline bool acpi_pci_bridge_d3(struct pci_dev *dev) { return false; } static inline int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return -ENODEV; } static inline pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) { return PCI_UNKNOWN; } static inline void acpi_pci_refresh_power_state(struct pci_dev *dev) { } static inline int acpi_pci_wakeup(struct pci_dev *dev, bool enable) { return -ENODEV; } static inline bool acpi_pci_need_resume(struct pci_dev *dev) { return false; } static inline pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) { return PCI_POWER_ERROR; } #endif #ifdef CONFIG_PCIEASPM extern const struct attribute_group aspm_ctrl_attr_group; #endif #ifdef CONFIG_X86_INTEL_MID bool pci_use_mid_pm(void); int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); pci_power_t mid_pci_get_power_state(struct pci_dev *pdev); #else static inline bool pci_use_mid_pm(void) { return false; } static inline int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) { return -ENODEV; } static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) { return PCI_UNKNOWN; } #endif #ifdef CONFIG_PCI_MSI int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag); #else static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) { return -ENODEV; } #endif /* * Config Address for PCI Configuration Mechanism #1 * * See PCI Local Bus Specification, Revision 3.0, * Section 3.2.2.3.2, Figure 3-2, p. 50. */ #define PCI_CONF1_BUS_SHIFT 16 /* Bus number */ #define PCI_CONF1_DEV_SHIFT 11 /* Device number */ #define PCI_CONF1_FUNC_SHIFT 8 /* Function number */ #define PCI_CONF1_BUS_MASK 0xff #define PCI_CONF1_DEV_MASK 0x1f #define PCI_CONF1_FUNC_MASK 0x7 #define PCI_CONF1_REG_MASK 0xfc /* Limit aligned offset to a maximum of 256B */ #define PCI_CONF1_ENABLE BIT(31) #define PCI_CONF1_BUS(x) (((x) & PCI_CONF1_BUS_MASK) << PCI_CONF1_BUS_SHIFT) #define PCI_CONF1_DEV(x) (((x) & PCI_CONF1_DEV_MASK) << PCI_CONF1_DEV_SHIFT) #define PCI_CONF1_FUNC(x) (((x) & PCI_CONF1_FUNC_MASK) << PCI_CONF1_FUNC_SHIFT) #define PCI_CONF1_REG(x) ((x) & PCI_CONF1_REG_MASK) #define PCI_CONF1_ADDRESS(bus, dev, func, reg) \ (PCI_CONF1_ENABLE | \ PCI_CONF1_BUS(bus) | \ PCI_CONF1_DEV(dev) | \ PCI_CONF1_FUNC(func) | \ PCI_CONF1_REG(reg)) /* * Extension of PCI Config Address for accessing extended PCIe registers * * No standardized specification, but used on lot of non-ECAM-compliant ARM SoCs * or on AMD Barcelona and new CPUs. Reserved bits [27:24] of PCI Config Address * are used for specifying additional 4 high bits of PCI Express register. */ #define PCI_CONF1_EXT_REG_SHIFT 16 #define PCI_CONF1_EXT_REG_MASK 0xf00 #define PCI_CONF1_EXT_REG(x) (((x) & PCI_CONF1_EXT_REG_MASK) << PCI_CONF1_EXT_REG_SHIFT) #define PCI_CONF1_EXT_ADDRESS(bus, dev, func, reg) \ (PCI_CONF1_ADDRESS(bus, dev, func, reg) | \ PCI_CONF1_EXT_REG(reg)) #endif /* DRIVERS_PCI_H */ |
| 9 9 9 9 9 9 9 9 8 8 9 8 8 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Randomness driver for virtio * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation */ #include <asm/barrier.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/scatterlist.h> #include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_rng.h> #include <linux/module.h> #include <linux/slab.h> static DEFINE_IDA(rng_index_ida); struct virtrng_info { struct hwrng hwrng; struct virtqueue *vq; char name[25]; int index; bool hwrng_register_done; bool hwrng_removed; /* data transfer */ struct completion have_data; unsigned int data_avail; unsigned int data_idx; /* minimal size returned by rng_buffer_size() */ #if SMP_CACHE_BYTES < 32 u8 data[32]; #else u8 data[SMP_CACHE_BYTES]; #endif }; static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; unsigned int len; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!virtqueue_get_buf(vi->vq, &len)) return; smp_store_release(&vi->data_avail, len); complete(&vi->have_data); } static void request_entropy(struct virtrng_info *vi) { struct scatterlist sg; reinit_completion(&vi->have_data); vi->data_idx = 0; sg_init_one(&sg, vi->data, sizeof(vi->data)); /* There should always be room for one buffer. */ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL); virtqueue_kick(vi->vq); } static unsigned int copy_data(struct virtrng_info *vi, void *buf, unsigned int size) { size = min_t(unsigned int, size, vi->data_avail); memcpy(buf, vi->data + vi->data_idx, size); vi->data_idx += size; vi->data_avail -= size; if (vi->data_avail == 0) request_entropy(vi); return size; } static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) { int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; unsigned int chunk; size_t read; if (vi->hwrng_removed) return -ENODEV; read = 0; /* copy available data */ if (smp_load_acquire(&vi->data_avail)) { chunk = copy_data(vi, buf, size); size -= chunk; read += chunk; } if (!wait) return read; /* We have already copied available entropy, * so either size is 0 or data_avail is 0 */ while (size != 0) { /* data_avail is 0 but a request is pending */ ret = wait_for_completion_killable(&vi->have_data); if (ret < 0) return ret; /* if vi->data_avail is 0, we have been interrupted * by a cleanup, but buffer stays in the queue */ if (vi->data_avail == 0) return read; chunk = copy_data(vi, buf + read, size); size -= chunk; read += chunk; } return read; } static void virtio_cleanup(struct hwrng *rng) { struct virtrng_info *vi = (struct virtrng_info *)rng->priv; complete(&vi->have_data); } static int probe_common(struct virtio_device *vdev) { int err, index; struct virtrng_info *vi = NULL; vi = kzalloc(sizeof(struct virtrng_info), GFP_KERNEL); if (!vi) return -ENOMEM; vi->index = index = ida_alloc(&rng_index_ida, GFP_KERNEL); if (index < 0) { err = index; goto err_ida; } sprintf(vi->name, "virtio_rng.%d", index); init_completion(&vi->have_data); vi->hwrng = (struct hwrng) { .read = virtio_read, .cleanup = virtio_cleanup, .priv = (unsigned long)vi, .name = vi->name, }; vdev->priv = vi; /* We expect a single virtqueue. */ vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input"); if (IS_ERR(vi->vq)) { err = PTR_ERR(vi->vq); goto err_find; } virtio_device_ready(vdev); /* we always have a pending entropy request */ request_entropy(vi); return 0; err_find: ida_free(&rng_index_ida, index); err_ida: kfree(vi); return err; } static void remove_common(struct virtio_device *vdev) { struct virtrng_info *vi = vdev->priv; vi->hwrng_removed = true; vi->data_avail = 0; vi->data_idx = 0; complete(&vi->have_data); if (vi->hwrng_register_done) hwrng_unregister(&vi->hwrng); virtio_reset_device(vdev); vdev->config->del_vqs(vdev); ida_free(&rng_index_ida, vi->index); kfree(vi); } static int virtrng_probe(struct virtio_device *vdev) { return probe_common(vdev); } static void virtrng_remove(struct virtio_device *vdev) { remove_common(vdev); } static void virtrng_scan(struct virtio_device *vdev) { struct virtrng_info *vi = vdev->priv; int err; err = hwrng_register(&vi->hwrng); if (!err) vi->hwrng_register_done = true; } static int virtrng_freeze(struct virtio_device *vdev) { remove_common(vdev); return 0; } static int virtrng_restore(struct virtio_device *vdev) { int err; err = probe_common(vdev); if (!err) { struct virtrng_info *vi = vdev->priv; /* * Set hwrng_removed to ensure that virtio_read() * does not block waiting for data before the * registration is complete. */ vi->hwrng_removed = true; err = hwrng_register(&vi->hwrng); if (!err) { vi->hwrng_register_done = true; vi->hwrng_removed = false; } } return err; } static const struct virtio_device_id id_table[] = { { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID }, { 0 }, }; static struct virtio_driver virtio_rng_driver = { .driver.name = KBUILD_MODNAME, .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, .scan = virtrng_scan, .freeze = pm_sleep_ptr(virtrng_freeze), .restore = pm_sleep_ptr(virtrng_restore), }; module_virtio_driver(virtio_rng_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio random number driver"); MODULE_LICENSE("GPL"); |
| 468 11 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_NOSPEC_BRANCH_H_ #define _ASM_X86_NOSPEC_BRANCH_H_ #include <linux/static_key.h> #include <linux/objtool.h> #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/cpufeatures.h> #include <asm/msr-index.h> #include <asm/unwind_hints.h> #include <asm/percpu.h> /* * Call depth tracking for Intel SKL CPUs to address the RSB underflow * issue in software. * * The tracking does not use a counter. It uses uses arithmetic shift * right on call entry and logical shift left on return. * * The depth tracking variable is initialized to 0x8000.... when the call * depth is zero. The arithmetic shift right sign extends the MSB and * saturates after the 12th call. The shift count is 5 for both directions * so the tracking covers 12 nested calls. * * Call * 0: 0x8000000000000000 0x0000000000000000 * 1: 0xfc00000000000000 0xf000000000000000 * ... * 11: 0xfffffffffffffff8 0xfffffffffffffc00 * 12: 0xffffffffffffffff 0xffffffffffffffe0 * * After a return buffer fill the depth is credited 12 calls before the * next stuffing has to take place. * * There is a inaccuracy for situations like this: * * 10 calls * 5 returns * 3 calls * 4 returns * 3 calls * .... * * The shift count might cause this to be off by one in either direction, * but there is still a cushion vs. the RSB depth. The algorithm does not * claim to be perfect and it can be speculated around by the CPU, but it * is considered that it obfuscates the problem enough to make exploitation * extremely difficult. */ #define RET_DEPTH_SHIFT 5 #define RSB_RET_STUFF_LOOPS 16 #define RET_DEPTH_INIT 0x8000000000000000ULL #define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL #define RET_DEPTH_CREDIT 0xffffffffffffffffULL #ifdef CONFIG_CALL_THUNKS_DEBUG # define CALL_THUNKS_DEBUG_INC_CALLS \ incq PER_CPU_VAR(__x86_call_count); # define CALL_THUNKS_DEBUG_INC_RETS \ incq PER_CPU_VAR(__x86_ret_count); # define CALL_THUNKS_DEBUG_INC_STUFFS \ incq PER_CPU_VAR(__x86_stuffs_count); # define CALL_THUNKS_DEBUG_INC_CTXSW \ incq PER_CPU_VAR(__x86_ctxsw_count); #else # define CALL_THUNKS_DEBUG_INC_CALLS # define CALL_THUNKS_DEBUG_INC_RETS # define CALL_THUNKS_DEBUG_INC_STUFFS # define CALL_THUNKS_DEBUG_INC_CTXSW #endif #if defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS) #include <asm/asm-offsets.h> #define CREDIT_CALL_DEPTH \ movq $-1, PER_CPU_VAR(__x86_call_depth); #define RESET_CALL_DEPTH \ xor %eax, %eax; \ bts $63, %rax; \ movq %rax, PER_CPU_VAR(__x86_call_depth); #define RESET_CALL_DEPTH_FROM_CALL \ movb $0xfc, %al; \ shl $56, %rax; \ movq %rax, PER_CPU_VAR(__x86_call_depth); \ CALL_THUNKS_DEBUG_INC_CALLS #define INCREMENT_CALL_DEPTH \ sarq $5, PER_CPU_VAR(__x86_call_depth); \ CALL_THUNKS_DEBUG_INC_CALLS #else #define CREDIT_CALL_DEPTH #define RESET_CALL_DEPTH #define RESET_CALL_DEPTH_FROM_CALL #define INCREMENT_CALL_DEPTH #endif /* * Fill the CPU return stack buffer. * * Each entry in the RSB, if used for a speculative 'ret', contains an * infinite 'pause; lfence; jmp' loop to capture speculative execution. * * This is required in various cases for retpoline and IBRS-based * mitigations for the Spectre variant 2 vulnerability. Sometimes to * eliminate potentially bogus entries from the RSB, and sometimes * purely to ensure that it doesn't get empty, which on some CPUs would * allow predictions from other (unwanted!) sources to be used. * * We define a CPP macro such that it can be used from both .S files and * inline assembly. It's possible to do a .macro and then include that * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. */ #define RETPOLINE_THUNK_SIZE 32 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ /* * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. */ #define __FILL_RETURN_SLOT \ ANNOTATE_INTRA_FUNCTION_CALL; \ call 772f; \ int3; \ 772: /* * Stuff the entire RSB. * * Google experimented with loop-unrolling and this turned out to be * the optimal version - two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ #ifdef CONFIG_X86_64 #define __FILL_RETURN_BUFFER(reg, nr) \ mov $(nr/2), reg; \ 771: \ __FILL_RETURN_SLOT \ __FILL_RETURN_SLOT \ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ dec reg; \ jnz 771b; \ /* barrier for jnz misprediction */ \ lfence; \ CREDIT_CALL_DEPTH \ CALL_THUNKS_DEBUG_INC_CTXSW #else /* * i386 doesn't unconditionally have LFENCE, as such it can't * do a loop. */ #define __FILL_RETURN_BUFFER(reg, nr) \ .rept nr; \ __FILL_RETURN_SLOT; \ .endr; \ add $(BITS_PER_LONG/8) * nr, %_ASM_SP; #endif /* * Stuff a single RSB slot. * * To mitigate Post-Barrier RSB speculation, one CALL instruction must be * forced to retire before letting a RET instruction execute. * * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed * before this point. */ #define __FILL_ONE_RETURN \ __FILL_RETURN_SLOT \ add $(BITS_PER_LONG/8), %_ASM_SP; \ lfence; #ifdef __ASSEMBLER__ /* * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions * vs RETBleed validation. */ #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE /* * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should * eventually turn into its own annotation. */ .macro VALIDATE_UNRET_END #if defined(CONFIG_NOINSTR_VALIDATION) && \ (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)) ANNOTATE_RETPOLINE_SAFE nop #endif .endm /* * Emits a conditional CS prefix that is compatible with * -mindirect-branch-cs-prefix. */ .macro __CS_PREFIX reg:req .irp rs,r8,r9,r10,r11,r12,r13,r14,r15 .ifc \reg,\rs .byte 0x2e .endif .endr .endm /* * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple * indirect jmp/call which may be susceptible to the Spectre variant 2 * attack. * * NOTE: these do not take kCFI into account and are thus not comparable to C * indirect calls, take care when using. The target of these should be an ENDBR * instruction irrespective of kCFI. */ .macro JMP_NOSPEC reg:req #ifdef CONFIG_MITIGATION_RETPOLINE __CS_PREFIX \reg jmp __x86_indirect_thunk_\reg #else jmp *%\reg int3 #endif .endm .macro CALL_NOSPEC reg:req #ifdef CONFIG_MITIGATION_RETPOLINE __CS_PREFIX \reg call __x86_indirect_thunk_\reg #else call *%\reg #endif .endm /* * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP * monstrosity above, manually. */ .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2 .Lskip_rsb_\@: .endm /* * The CALL to srso_alias_untrain_ret() must be patched in directly at * the spot where untraining must be done, ie., srso_alias_untrain_ret() * must be the target of a CALL instruction instead of indirectly * jumping to a wrapper which then calls it. Therefore, this macro is * called outside of __UNTRAIN_RET below, for the time being, before the * kernel can support nested alternatives with arbitrary nesting. */ .macro CALL_UNTRAIN_RET #if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO) ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif .endm /* * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * write_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point * where we have a stack but before any RET instruction. */ .macro __UNTRAIN_RET ibpb_feature, call_depth_insns #if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY) VALIDATE_UNRET_END CALL_UNTRAIN_RET ALTERNATIVE_2 "", \ "call write_ibpb", \ibpb_feature, \ __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH #endif .endm #define UNTRAIN_RET \ __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH) #define UNTRAIN_RET_VM \ __UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH) #define UNTRAIN_RET_FROM_CALL \ __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL) .macro CALL_DEPTH_ACCOUNT #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING ALTERNATIVE "", \ __stringify(INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif .endm /* * Macro to execute VERW insns that mitigate transient data sampling * attacks such as MDS or TSA. On affected systems a microcode update * overloaded VERW insns to also clear the CPU buffers. VERW clobbers * CFLAGS.ZF. * Note: Only the memory operand variant of VERW clears the CPU buffers. */ .macro __CLEAR_CPU_BUFFERS feature #ifdef CONFIG_X86_64 ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature #else /* * In 32bit mode, the memory operand must be a %cs reference. The data * segments may not be usable (vm86 mode), and the stack segment may not * be flat (ESPFIX32). */ ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature #endif .endm #define CLEAR_CPU_BUFFERS \ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF #define VM_CLEAR_CPU_BUFFERS \ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM #ifdef CONFIG_X86_64 .macro CLEAR_BRANCH_HISTORY ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP .endm .macro CLEAR_BRANCH_HISTORY_VMEXIT ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_VMEXIT .endm #else #define CLEAR_BRANCH_HISTORY #define CLEAR_BRANCH_HISTORY_VMEXIT #endif #else /* __ASSEMBLER__ */ #define ITS_THUNK_SIZE 64 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; typedef u8 its_thunk_t[ITS_THUNK_SIZE]; extern retpoline_thunk_t __x86_indirect_thunk_array[]; extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; extern its_thunk_t __x86_indirect_its_thunk_array[]; #ifdef CONFIG_MITIGATION_RETHUNK extern void __x86_return_thunk(void); #else static inline void __x86_return_thunk(void) {} #endif #ifdef CONFIG_MITIGATION_UNRET_ENTRY extern void retbleed_return_thunk(void); #else static inline void retbleed_return_thunk(void) {} #endif extern void srso_alias_untrain_ret(void); #ifdef CONFIG_MITIGATION_SRSO extern void srso_return_thunk(void); extern void srso_alias_return_thunk(void); #else static inline void srso_return_thunk(void) {} static inline void srso_alias_return_thunk(void) {} #endif #ifdef CONFIG_MITIGATION_ITS extern void its_return_thunk(void); #else static inline void its_return_thunk(void) {} #endif extern void retbleed_return_thunk(void); extern void srso_return_thunk(void); extern void srso_alias_return_thunk(void); extern void entry_untrain_ret(void); extern void write_ibpb(void); #ifdef CONFIG_X86_64 extern void clear_bhb_loop(void); #endif extern void (*x86_return_thunk)(void); extern void __warn_thunk(void); #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING extern void call_depth_return_thunk(void); #define CALL_DEPTH_ACCOUNT \ ALTERNATIVE("", \ __stringify(INCREMENT_CALL_DEPTH), \ X86_FEATURE_CALL_DEPTH) DECLARE_PER_CPU_CACHE_HOT(u64, __x86_call_depth); #ifdef CONFIG_CALL_THUNKS_DEBUG DECLARE_PER_CPU(u64, __x86_call_count); DECLARE_PER_CPU(u64, __x86_ret_count); DECLARE_PER_CPU(u64, __x86_stuffs_count); DECLARE_PER_CPU(u64, __x86_ctxsw_count); #endif #else /* !CONFIG_MITIGATION_CALL_DEPTH_TRACKING */ static inline void call_depth_return_thunk(void) {} #define CALL_DEPTH_ACCOUNT "" #endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */ #ifdef CONFIG_MITIGATION_RETPOLINE #define GEN(reg) \ extern retpoline_thunk_t __x86_indirect_thunk_ ## reg; #include <asm/GEN-for-each-reg.h> #undef GEN #define GEN(reg) \ extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg; #include <asm/GEN-for-each-reg.h> #undef GEN #define GEN(reg) \ extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg; #include <asm/GEN-for-each-reg.h> #undef GEN #ifdef CONFIG_X86_64 /* * Emits a conditional CS prefix that is compatible with * -mindirect-branch-cs-prefix. */ #define __CS_PREFIX(reg) \ ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \ ".ifc \\rs," reg "\n" \ ".byte 0x2e\n" \ ".endif\n" \ ".endr\n" /* * Inline asm uses the %V modifier which is only in newer GCC * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined. */ #define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \ "call __x86_indirect_thunk_%V[thunk_target]\n" # define THUNK_TARGET(addr) [thunk_target] "r" (addr) #else /* CONFIG_X86_32 */ /* * For i386 we use the original ret-equivalent retpoline, because * otherwise we'll run out of registers. We don't care about CET * here, anyway. */ # define CALL_NOSPEC \ ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ " jmp 904f;\n" \ " .align 16\n" \ "901: call 903f;\n" \ "902: pause;\n" \ " lfence;\n" \ " jmp 902b;\n" \ " .align 16\n" \ "903: lea 4(%%esp), %%esp;\n" \ " pushl %[thunk_target];\n" \ " ret;\n" \ " .align 16\n" \ "904: call 901b;\n", \ X86_FEATURE_RETPOLINE, \ "lfence;\n" \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ X86_FEATURE_RETPOLINE_LFENCE) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) #endif #else /* No retpoline for C / inline asm */ # define CALL_NOSPEC "call *%[thunk_target]\n" # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) #endif /* The Spectre V2 mitigation variants */ enum spectre_v2_mitigation { SPECTRE_V2_NONE, SPECTRE_V2_RETPOLINE, SPECTRE_V2_LFENCE, SPECTRE_V2_EIBRS, SPECTRE_V2_EIBRS_RETPOLINE, SPECTRE_V2_EIBRS_LFENCE, SPECTRE_V2_IBRS, }; /* The indirect branch speculation control variants */ enum spectre_v2_user_mitigation { SPECTRE_V2_USER_NONE, SPECTRE_V2_USER_STRICT, SPECTRE_V2_USER_STRICT_PREFERRED, SPECTRE_V2_USER_PRCTL, SPECTRE_V2_USER_SECCOMP, }; /* The Speculative Store Bypass disable variants */ enum ssb_mitigation { SPEC_STORE_BYPASS_NONE, SPEC_STORE_BYPASS_DISABLE, SPEC_STORE_BYPASS_PRCTL, SPEC_STORE_BYPASS_SECCOMP, }; static __always_inline void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) { asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)), [feature] "i" (feature) : "memory"); } static inline void indirect_branch_prediction_barrier(void) { asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB) : ASM_CALL_CONSTRAINT :: "rax", "rcx", "rdx", "memory"); } /* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; DECLARE_PER_CPU(u64, x86_spec_ctrl_current); extern void update_spec_ctrl_cond(u64 val); extern u64 spec_ctrl_current(void); /* * With retpoline, we must use IBRS to restrict branch prediction * before calling into firmware. * * (Implemented as CPP macros due to header hell.) */ #define firmware_restrict_branch_speculation_start() \ do { \ preempt_disable(); \ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ spec_ctrl_current() | SPEC_CTRL_IBRS, \ X86_FEATURE_USE_IBRS_FW); \ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ X86_FEATURE_USE_IBPB_FW); \ } while (0) #define firmware_restrict_branch_speculation_end() \ do { \ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ spec_ctrl_current(), \ X86_FEATURE_USE_IBRS_FW); \ preempt_enable(); \ } while (0) DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb); DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); extern u16 x86_verw_sel; #include <asm/segment.h> /** * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns * * This uses the otherwise unused and obsolete VERW instruction in * combination with microcode which triggers a CPU buffer flush when the * instruction is executed. */ static __always_inline void x86_clear_cpu_buffers(void) { static const u16 ds = __KERNEL_DS; /* * Has to be the memory-operand variant because only that * guarantees the CPU buffer flush functionality according to * documentation. The register-operand variant does not. * Works with any segment selector, but a valid writable * data segment is the fastest variant. * * "cc" clobber is required because VERW modifies ZF. */ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); } /** * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS * and TSA vulnerabilities. * * Clear CPU buffers if the corresponding static key is enabled */ static __always_inline void x86_idle_clear_cpu_buffers(void) { if (static_branch_likely(&cpu_buf_idle_clear)) x86_clear_cpu_buffers(); } #endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ |
| 2 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 | // SPDX-License-Identifier: GPL-2.0 /* * io_uring opcode handling table */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/io_uring.h> #include <linux/io_uring/cmd.h> #include "io_uring.h" #include "opdef.h" #include "refs.h" #include "tctx.h" #include "sqpoll.h" #include "fdinfo.h" #include "kbuf.h" #include "rsrc.h" #include "xattr.h" #include "nop.h" #include "fs.h" #include "splice.h" #include "sync.h" #include "advise.h" #include "openclose.h" #include "uring_cmd.h" #include "epoll.h" #include "statx.h" #include "net.h" #include "msg_ring.h" #include "timeout.h" #include "poll.h" #include "cancel.h" #include "rw.h" #include "waitid.h" #include "futex.h" #include "truncate.h" #include "zcrx.h" static int io_no_issue(struct io_kiocb *req, unsigned int issue_flags) { WARN_ON_ONCE(1); return -ECANCELED; } static __maybe_unused int io_eopnotsupp_prep(struct io_kiocb *kiocb, const struct io_uring_sqe *sqe) { return -EOPNOTSUPP; } const struct io_issue_def io_issue_defs[] = { [IORING_OP_NOP] = { .audit_skip = 1, .iopoll = 1, .prep = io_nop_prep, .issue = io_nop, }, [IORING_OP_READV] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .vectored = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_readv, .issue = io_read, }, [IORING_OP_WRITEV] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .vectored = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_writev, .issue = io_write, }, [IORING_OP_FSYNC] = { .needs_file = 1, .audit_skip = 1, .prep = io_fsync_prep, .issue = io_fsync, }, [IORING_OP_READ_FIXED] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_read_fixed, .issue = io_read_fixed, }, [IORING_OP_WRITE_FIXED] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_write_fixed, .issue = io_write_fixed, }, [IORING_OP_POLL_ADD] = { .needs_file = 1, .unbound_nonreg_file = 1, .audit_skip = 1, .prep = io_poll_add_prep, .issue = io_poll_add, }, [IORING_OP_POLL_REMOVE] = { .audit_skip = 1, .prep = io_poll_remove_prep, .issue = io_poll_remove, }, [IORING_OP_SYNC_FILE_RANGE] = { .needs_file = 1, .audit_skip = 1, .prep = io_sfr_prep, .issue = io_sync_file_range, }, [IORING_OP_SENDMSG] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .ioprio = 1, #if defined(CONFIG_NET) .async_size = sizeof(struct io_async_msghdr), .prep = io_sendmsg_prep, .issue = io_sendmsg, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_RECVMSG] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, .ioprio = 1, #if defined(CONFIG_NET) .async_size = sizeof(struct io_async_msghdr), .prep = io_recvmsg_prep, .issue = io_recvmsg, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_TIMEOUT] = { .audit_skip = 1, .async_size = sizeof(struct io_timeout_data), .prep = io_timeout_prep, .issue = io_timeout, }, [IORING_OP_TIMEOUT_REMOVE] = { /* used by timeout updates' prep() */ .audit_skip = 1, .prep = io_timeout_remove_prep, .issue = io_timeout_remove, }, [IORING_OP_ACCEPT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .poll_exclusive = 1, .ioprio = 1, /* used for flags */ #if defined(CONFIG_NET) .prep = io_accept_prep, .issue = io_accept, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_ASYNC_CANCEL] = { .audit_skip = 1, .prep = io_async_cancel_prep, .issue = io_async_cancel, }, [IORING_OP_LINK_TIMEOUT] = { .audit_skip = 1, .async_size = sizeof(struct io_timeout_data), .prep = io_link_timeout_prep, .issue = io_no_issue, }, [IORING_OP_CONNECT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, #if defined(CONFIG_NET) .async_size = sizeof(struct io_async_msghdr), .prep = io_connect_prep, .issue = io_connect, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_FALLOCATE] = { .needs_file = 1, .hash_reg_file = 1, .prep = io_fallocate_prep, .issue = io_fallocate, }, [IORING_OP_OPENAT] = { .prep = io_openat_prep, .issue = io_openat, }, [IORING_OP_CLOSE] = { .prep = io_close_prep, .issue = io_close, }, [IORING_OP_FILES_UPDATE] = { .audit_skip = 1, .iopoll = 1, .prep = io_files_update_prep, .issue = io_files_update, }, [IORING_OP_STATX] = { .audit_skip = 1, .prep = io_statx_prep, .issue = io_statx, }, [IORING_OP_READ] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_read, .issue = io_read, }, [IORING_OP_WRITE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_write, .issue = io_write, }, [IORING_OP_FADVISE] = { .needs_file = 1, .audit_skip = 1, .prep = io_fadvise_prep, .issue = io_fadvise, }, [IORING_OP_MADVISE] = { .audit_skip = 1, .prep = io_madvise_prep, .issue = io_madvise, }, [IORING_OP_SEND] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .audit_skip = 1, .ioprio = 1, .buffer_select = 1, #if defined(CONFIG_NET) .async_size = sizeof(struct io_async_msghdr), .prep = io_sendmsg_prep, .issue = io_send, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_RECV] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, .audit_skip = 1, .ioprio = 1, #if defined(CONFIG_NET) .async_size = sizeof(struct io_async_msghdr), .prep = io_recvmsg_prep, .issue = io_recv, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_OPENAT2] = { .prep = io_openat2_prep, .issue = io_openat2, }, [IORING_OP_EPOLL_CTL] = { .unbound_nonreg_file = 1, .audit_skip = 1, #if defined(CONFIG_EPOLL) .prep = io_epoll_ctl_prep, .issue = io_epoll_ctl, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_SPLICE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .audit_skip = 1, .prep = io_splice_prep, .issue = io_splice, }, [IORING_OP_PROVIDE_BUFFERS] = { .audit_skip = 1, .iopoll = 1, .prep = io_provide_buffers_prep, .issue = io_manage_buffers_legacy, }, [IORING_OP_REMOVE_BUFFERS] = { .audit_skip = 1, .iopoll = 1, .prep = io_remove_buffers_prep, .issue = io_manage_buffers_legacy, }, [IORING_OP_TEE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .audit_skip = 1, .prep = io_tee_prep, .issue = io_tee, }, [IORING_OP_SHUTDOWN] = { .needs_file = 1, #if defined(CONFIG_NET) .prep = io_shutdown_prep, .issue = io_shutdown, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_RENAMEAT] = { .prep = io_renameat_prep, .issue = io_renameat, }, [IORING_OP_UNLINKAT] = { .prep = io_unlinkat_prep, .issue = io_unlinkat, }, [IORING_OP_MKDIRAT] = { .prep = io_mkdirat_prep, .issue = io_mkdirat, }, [IORING_OP_SYMLINKAT] = { .prep = io_symlinkat_prep, .issue = io_symlinkat, }, [IORING_OP_LINKAT] = { .prep = io_linkat_prep, .issue = io_linkat, }, [IORING_OP_MSG_RING] = { .needs_file = 1, .iopoll = 1, .prep = io_msg_ring_prep, .issue = io_msg_ring, }, [IORING_OP_FSETXATTR] = { .needs_file = 1, .prep = io_fsetxattr_prep, .issue = io_fsetxattr, }, [IORING_OP_SETXATTR] = { .prep = io_setxattr_prep, .issue = io_setxattr, }, [IORING_OP_FGETXATTR] = { .needs_file = 1, .prep = io_fgetxattr_prep, .issue = io_fgetxattr, }, [IORING_OP_GETXATTR] = { .prep = io_getxattr_prep, .issue = io_getxattr, }, [IORING_OP_SOCKET] = { .audit_skip = 1, #if defined(CONFIG_NET) .prep = io_socket_prep, .issue = io_socket, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_URING_CMD] = { .needs_file = 1, .plug = 1, .iopoll = 1, .iopoll_queue = 1, .async_size = sizeof(struct io_async_cmd), .prep = io_uring_cmd_prep, .issue = io_uring_cmd, }, [IORING_OP_SEND_ZC] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .audit_skip = 1, .ioprio = 1, #if defined(CONFIG_NET) .async_size = sizeof(struct io_async_msghdr), .prep = io_send_zc_prep, .issue = io_send_zc, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_SENDMSG_ZC] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .ioprio = 1, #if defined(CONFIG_NET) .async_size = sizeof(struct io_async_msghdr), .prep = io_send_zc_prep, .issue = io_sendmsg_zc, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_READ_MULTISHOT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, .audit_skip = 1, .async_size = sizeof(struct io_async_rw), .prep = io_read_mshot_prep, .issue = io_read_mshot, }, [IORING_OP_WAITID] = { .async_size = sizeof(struct io_waitid_async), .prep = io_waitid_prep, .issue = io_waitid, }, [IORING_OP_FUTEX_WAIT] = { #if defined(CONFIG_FUTEX) .prep = io_futex_prep, .issue = io_futex_wait, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_FUTEX_WAKE] = { #if defined(CONFIG_FUTEX) .prep = io_futex_prep, .issue = io_futex_wake, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_FUTEX_WAITV] = { #if defined(CONFIG_FUTEX) .prep = io_futexv_prep, .issue = io_futexv_wait, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_FIXED_FD_INSTALL] = { .needs_file = 1, .prep = io_install_fixed_fd_prep, .issue = io_install_fixed_fd, }, [IORING_OP_FTRUNCATE] = { .needs_file = 1, .hash_reg_file = 1, .prep = io_ftruncate_prep, .issue = io_ftruncate, }, [IORING_OP_BIND] = { #if defined(CONFIG_NET) .needs_file = 1, .prep = io_bind_prep, .issue = io_bind, .async_size = sizeof(struct io_async_msghdr), #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_LISTEN] = { #if defined(CONFIG_NET) .needs_file = 1, .prep = io_listen_prep, .issue = io_listen, .async_size = sizeof(struct io_async_msghdr), #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_RECV_ZC] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .ioprio = 1, #if defined(CONFIG_NET) .prep = io_recvzc_prep, .issue = io_recvzc, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_EPOLL_WAIT] = { .needs_file = 1, .audit_skip = 1, .pollin = 1, #if defined(CONFIG_EPOLL) .prep = io_epoll_wait_prep, .issue = io_epoll_wait, #else .prep = io_eopnotsupp_prep, #endif }, [IORING_OP_READV_FIXED] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .vectored = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_readv_fixed, .issue = io_read, }, [IORING_OP_WRITEV_FIXED] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, .plug = 1, .audit_skip = 1, .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, .vectored = 1, .async_size = sizeof(struct io_async_rw), .prep = io_prep_writev_fixed, .issue = io_write, }, [IORING_OP_PIPE] = { .prep = io_pipe_prep, .issue = io_pipe, }, }; const struct io_cold_def io_cold_defs[] = { [IORING_OP_NOP] = { .name = "NOP", }, [IORING_OP_READV] = { .name = "READV", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_WRITEV] = { .name = "WRITEV", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_FSYNC] = { .name = "FSYNC", }, [IORING_OP_READ_FIXED] = { .name = "READ_FIXED", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_WRITE_FIXED] = { .name = "WRITE_FIXED", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_POLL_ADD] = { .name = "POLL_ADD", }, [IORING_OP_POLL_REMOVE] = { .name = "POLL_REMOVE", }, [IORING_OP_SYNC_FILE_RANGE] = { .name = "SYNC_FILE_RANGE", }, [IORING_OP_SENDMSG] = { .name = "SENDMSG", #if defined(CONFIG_NET) .cleanup = io_sendmsg_recvmsg_cleanup, .fail = io_sendrecv_fail, #endif }, [IORING_OP_RECVMSG] = { .name = "RECVMSG", #if defined(CONFIG_NET) .cleanup = io_sendmsg_recvmsg_cleanup, .fail = io_sendrecv_fail, #endif }, [IORING_OP_TIMEOUT] = { .name = "TIMEOUT", }, [IORING_OP_TIMEOUT_REMOVE] = { .name = "TIMEOUT_REMOVE", }, [IORING_OP_ACCEPT] = { .name = "ACCEPT", }, [IORING_OP_ASYNC_CANCEL] = { .name = "ASYNC_CANCEL", }, [IORING_OP_LINK_TIMEOUT] = { .name = "LINK_TIMEOUT", }, [IORING_OP_CONNECT] = { .name = "CONNECT", }, [IORING_OP_FALLOCATE] = { .name = "FALLOCATE", }, [IORING_OP_OPENAT] = { .name = "OPENAT", .cleanup = io_open_cleanup, }, [IORING_OP_CLOSE] = { .name = "CLOSE", }, [IORING_OP_FILES_UPDATE] = { .name = "FILES_UPDATE", }, [IORING_OP_STATX] = { .name = "STATX", .cleanup = io_statx_cleanup, }, [IORING_OP_READ] = { .name = "READ", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_WRITE] = { .name = "WRITE", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_FADVISE] = { .name = "FADVISE", }, [IORING_OP_MADVISE] = { .name = "MADVISE", }, [IORING_OP_SEND] = { .name = "SEND", #if defined(CONFIG_NET) .cleanup = io_sendmsg_recvmsg_cleanup, .fail = io_sendrecv_fail, #endif }, [IORING_OP_RECV] = { .name = "RECV", #if defined(CONFIG_NET) .cleanup = io_sendmsg_recvmsg_cleanup, .fail = io_sendrecv_fail, #endif }, [IORING_OP_OPENAT2] = { .name = "OPENAT2", .cleanup = io_open_cleanup, }, [IORING_OP_EPOLL_CTL] = { .name = "EPOLL", }, [IORING_OP_SPLICE] = { .name = "SPLICE", .cleanup = io_splice_cleanup, }, [IORING_OP_PROVIDE_BUFFERS] = { .name = "PROVIDE_BUFFERS", }, [IORING_OP_REMOVE_BUFFERS] = { .name = "REMOVE_BUFFERS", }, [IORING_OP_TEE] = { .name = "TEE", .cleanup = io_splice_cleanup, }, [IORING_OP_SHUTDOWN] = { .name = "SHUTDOWN", }, [IORING_OP_RENAMEAT] = { .name = "RENAMEAT", .cleanup = io_renameat_cleanup, }, [IORING_OP_UNLINKAT] = { .name = "UNLINKAT", .cleanup = io_unlinkat_cleanup, }, [IORING_OP_MKDIRAT] = { .name = "MKDIRAT", .cleanup = io_mkdirat_cleanup, }, [IORING_OP_SYMLINKAT] = { .name = "SYMLINKAT", .cleanup = io_link_cleanup, }, [IORING_OP_LINKAT] = { .name = "LINKAT", .cleanup = io_link_cleanup, }, [IORING_OP_MSG_RING] = { .name = "MSG_RING", .cleanup = io_msg_ring_cleanup, }, [IORING_OP_FSETXATTR] = { .name = "FSETXATTR", .cleanup = io_xattr_cleanup, }, [IORING_OP_SETXATTR] = { .name = "SETXATTR", .cleanup = io_xattr_cleanup, }, [IORING_OP_FGETXATTR] = { .name = "FGETXATTR", .cleanup = io_xattr_cleanup, }, [IORING_OP_GETXATTR] = { .name = "GETXATTR", .cleanup = io_xattr_cleanup, }, [IORING_OP_SOCKET] = { .name = "SOCKET", }, [IORING_OP_URING_CMD] = { .name = "URING_CMD", .cleanup = io_uring_cmd_cleanup, }, [IORING_OP_SEND_ZC] = { .name = "SEND_ZC", #if defined(CONFIG_NET) .cleanup = io_send_zc_cleanup, .fail = io_sendrecv_fail, #endif }, [IORING_OP_SENDMSG_ZC] = { .name = "SENDMSG_ZC", #if defined(CONFIG_NET) .cleanup = io_send_zc_cleanup, .fail = io_sendrecv_fail, #endif }, [IORING_OP_READ_MULTISHOT] = { .name = "READ_MULTISHOT", .cleanup = io_readv_writev_cleanup, }, [IORING_OP_WAITID] = { .name = "WAITID", }, [IORING_OP_FUTEX_WAIT] = { .name = "FUTEX_WAIT", }, [IORING_OP_FUTEX_WAKE] = { .name = "FUTEX_WAKE", }, [IORING_OP_FUTEX_WAITV] = { .name = "FUTEX_WAITV", }, [IORING_OP_FIXED_FD_INSTALL] = { .name = "FIXED_FD_INSTALL", }, [IORING_OP_FTRUNCATE] = { .name = "FTRUNCATE", }, [IORING_OP_BIND] = { .name = "BIND", }, [IORING_OP_LISTEN] = { .name = "LISTEN", }, [IORING_OP_RECV_ZC] = { .name = "RECV_ZC", }, [IORING_OP_EPOLL_WAIT] = { .name = "EPOLL_WAIT", }, [IORING_OP_READV_FIXED] = { .name = "READV_FIXED", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_WRITEV_FIXED] = { .name = "WRITEV_FIXED", .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_PIPE] = { .name = "PIPE", }, }; const char *io_uring_get_opcode(u8 opcode) { if (opcode < IORING_OP_LAST) return io_cold_defs[opcode].name; return "INVALID"; } bool io_uring_op_supported(u8 opcode) { if (opcode < IORING_OP_LAST && io_issue_defs[opcode].prep != io_eopnotsupp_prep) return true; return false; } void __init io_uring_optable_init(void) { int i; BUILD_BUG_ON(ARRAY_SIZE(io_cold_defs) != IORING_OP_LAST); BUILD_BUG_ON(ARRAY_SIZE(io_issue_defs) != IORING_OP_LAST); for (i = 0; i < ARRAY_SIZE(io_issue_defs); i++) { BUG_ON(!io_issue_defs[i].prep); if (io_issue_defs[i].prep != io_eopnotsupp_prep) BUG_ON(!io_issue_defs[i].issue); WARN_ON_ONCE(!io_cold_defs[i].name); } } |
| 255 253 277 276 276 127 127 127 106 123 123 106 27 96 95 96 95 96 95 123 14 123 87 10 87 17 87 37 8 37 123 359 172 187 4 357 187 2 182 3 1 182 39 30 13 182 181 14 14 181 182 80 178 123 123 181 180 36 181 87 181 16 16 14 181 20 369 1 369 1 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 | /* * Ext4 orphan inode handling */ #include <linux/fs.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include "ext4.h" #include "ext4_jbd2.h" static int ext4_orphan_file_add(handle_t *handle, struct inode *inode) { int i, j, start; struct ext4_orphan_info *oi = &EXT4_SB(inode->i_sb)->s_orphan_info; int ret = 0; bool found = false; __le32 *bdata; int inodes_per_ob = ext4_inodes_per_orphan_block(inode->i_sb); int looped = 0; /* * Find block with free orphan entry. Use CPU number for a naive hash * for a search start in the orphan file */ start = raw_smp_processor_id()*13 % oi->of_blocks; i = start; do { if (atomic_dec_if_positive(&oi->of_binfo[i].ob_free_entries) >= 0) { found = true; break; } if (++i >= oi->of_blocks) i = 0; } while (i != start); if (!found) { /* * For now we don't grow or shrink orphan file. We just use * whatever was allocated at mke2fs time. The additional * credits we would have to reserve for each orphan inode * operation just don't seem worth it. */ return -ENOSPC; } ret = ext4_journal_get_write_access(handle, inode->i_sb, oi->of_binfo[i].ob_bh, EXT4_JTR_ORPHAN_FILE); if (ret) { atomic_inc(&oi->of_binfo[i].ob_free_entries); return ret; } bdata = (__le32 *)(oi->of_binfo[i].ob_bh->b_data); /* Find empty slot in a block */ j = 0; do { if (looped) { /* * Did we walk through the block several times without * finding free entry? It is theoretically possible * if entries get constantly allocated and freed or * if the block is corrupted. Avoid indefinite looping * and bail. We'll use orphan list instead. */ if (looped > 3) { atomic_inc(&oi->of_binfo[i].ob_free_entries); return -ENOSPC; } cond_resched(); } while (bdata[j]) { if (++j >= inodes_per_ob) { j = 0; looped++; } } } while (cmpxchg(&bdata[j], (__le32)0, cpu_to_le32(inode->i_ino)) != (__le32)0); EXT4_I(inode)->i_orphan_idx = i * inodes_per_ob + j; ext4_set_inode_state(inode, EXT4_STATE_ORPHAN_FILE); return ext4_handle_dirty_metadata(handle, NULL, oi->of_binfo[i].ob_bh); } /* * ext4_orphan_add() links an unlinked or truncated inode into a list of * such inodes, starting at the superblock, in case we crash before the * file is closed/deleted, or in case the inode truncate spans multiple * transactions and the last transaction is not recovered after a crash. * * At filesystem recovery time, we walk this list deleting unlinked * inodes and truncating linked inodes in ext4_orphan_cleanup(). * * Orphan list manipulation functions must be called under i_rwsem unless * we are just creating the inode or deleting it. */ int ext4_orphan_add(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_iloc iloc; int err = 0, rc; bool dirty = false; if (!sbi->s_journal || is_bad_inode(inode)) return 0; WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && !inode_is_locked(inode)); /* * Inode orphaned in orphan file or in orphan list? */ if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) || !list_empty(&EXT4_I(inode)->i_orphan)) return 0; /* * Orphan handling is only valid for files with data blocks * being truncated, or files being unlinked. Note that we either * hold i_rwsem, or the inode can not be referenced from outside, * so i_nlink should not be bumped due to race */ ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); if (sbi->s_orphan_info.of_blocks) { err = ext4_orphan_file_add(handle, inode); /* * Fallback to normal orphan list of orphan file is * out of space */ if (err != -ENOSPC) return err; } BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, EXT4_JTR_NONE); if (err) goto out; err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto out; mutex_lock(&sbi->s_orphan_lock); /* * Due to previous errors inode may be already a part of on-disk * orphan list. If so skip on-disk list modification. */ if (!NEXT_ORPHAN(inode) || NEXT_ORPHAN(inode) > (le32_to_cpu(sbi->s_es->s_inodes_count))) { /* Insert this inode at the head of the on-disk orphan list */ NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan); lock_buffer(sbi->s_sbh); sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); ext4_superblock_csum_set(sb); unlock_buffer(sbi->s_sbh); dirty = true; } list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan); mutex_unlock(&sbi->s_orphan_lock); if (dirty) { err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); rc = ext4_mark_iloc_dirty(handle, inode, &iloc); if (!err) err = rc; if (err) { /* * We have to remove inode from in-memory list if * addition to on disk orphan list failed. Stray orphan * list entries can cause panics at unmount time. */ mutex_lock(&sbi->s_orphan_lock); list_del_init(&EXT4_I(inode)->i_orphan); mutex_unlock(&sbi->s_orphan_lock); } } else brelse(iloc.bh); ext4_debug("superblock will point to %lu\n", inode->i_ino); ext4_debug("orphan inode %lu will point to %d\n", inode->i_ino, NEXT_ORPHAN(inode)); out: ext4_std_error(sb, err); return err; } static int ext4_orphan_file_del(handle_t *handle, struct inode *inode) { struct ext4_orphan_info *oi = &EXT4_SB(inode->i_sb)->s_orphan_info; __le32 *bdata; int blk, off; int inodes_per_ob = ext4_inodes_per_orphan_block(inode->i_sb); int ret = 0; if (!handle) goto out; blk = EXT4_I(inode)->i_orphan_idx / inodes_per_ob; off = EXT4_I(inode)->i_orphan_idx % inodes_per_ob; if (WARN_ON_ONCE(blk >= oi->of_blocks)) goto out; ret = ext4_journal_get_write_access(handle, inode->i_sb, oi->of_binfo[blk].ob_bh, EXT4_JTR_ORPHAN_FILE); if (ret) goto out; bdata = (__le32 *)(oi->of_binfo[blk].ob_bh->b_data); bdata[off] = 0; atomic_inc(&oi->of_binfo[blk].ob_free_entries); ret = ext4_handle_dirty_metadata(handle, NULL, oi->of_binfo[blk].ob_bh); out: ext4_clear_inode_state(inode, EXT4_STATE_ORPHAN_FILE); INIT_LIST_HEAD(&EXT4_I(inode)->i_orphan); return ret; } /* * ext4_orphan_del() removes an unlinked or truncated inode from the list * of such inodes stored on disk, because it is finally being cleaned up. */ int ext4_orphan_del(handle_t *handle, struct inode *inode) { struct list_head *prev; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 ino_next; struct ext4_iloc iloc; int err = 0; if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS)) return 0; WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && !inode_is_locked(inode)); if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE)) return ext4_orphan_file_del(handle, inode); /* Do this quick check before taking global s_orphan_lock. */ if (list_empty(&ei->i_orphan)) return 0; if (handle) { /* Grab inode buffer early before taking global s_orphan_lock */ err = ext4_reserve_inode_write(handle, inode, &iloc); } mutex_lock(&sbi->s_orphan_lock); ext4_debug("remove inode %lu from orphan list\n", inode->i_ino); prev = ei->i_orphan.prev; list_del_init(&ei->i_orphan); /* If we're on an error path, we may not have a valid * transaction handle with which to update the orphan list on * disk, but we still need to remove the inode from the linked * list in memory. */ if (!handle || err) { mutex_unlock(&sbi->s_orphan_lock); goto out_err; } ino_next = NEXT_ORPHAN(inode); if (prev == &sbi->s_orphan) { ext4_debug("superblock will point to %u\n", ino_next); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode->i_sb, sbi->s_sbh, EXT4_JTR_NONE); if (err) { mutex_unlock(&sbi->s_orphan_lock); goto out_brelse; } lock_buffer(sbi->s_sbh); sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); ext4_superblock_csum_set(inode->i_sb); unlock_buffer(sbi->s_sbh); mutex_unlock(&sbi->s_orphan_lock); err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); } else { struct ext4_iloc iloc2; struct inode *i_prev = &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; ext4_debug("orphan inode %lu will point to %u\n", i_prev->i_ino, ino_next); err = ext4_reserve_inode_write(handle, i_prev, &iloc2); if (err) { mutex_unlock(&sbi->s_orphan_lock); goto out_brelse; } NEXT_ORPHAN(i_prev) = ino_next; err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2); mutex_unlock(&sbi->s_orphan_lock); } if (err) goto out_brelse; NEXT_ORPHAN(inode) = 0; err = ext4_mark_iloc_dirty(handle, inode, &iloc); out_err: ext4_std_error(inode->i_sb, err); return err; out_brelse: brelse(iloc.bh); goto out_err; } #ifdef CONFIG_QUOTA static int ext4_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, rcu_dereference_protected(EXT4_SB(sb)->s_qf_names[type], lockdep_is_held(&sb->s_umount)), EXT4_SB(sb)->s_jquota_fmt, type); } #endif static void ext4_process_orphan(struct inode *inode, int *nr_truncates, int *nr_orphans) { struct super_block *sb = inode->i_sb; int ret; dquot_initialize(inode); if (inode->i_nlink) { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: truncating inode %lu to %lld bytes", __func__, inode->i_ino, inode->i_size); ext4_debug("truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); inode_lock(inode); truncate_inode_pages(inode->i_mapping, inode->i_size); ret = ext4_truncate(inode); if (ret) { /* * We need to clean up the in-core orphan list * manually if ext4_truncate() failed to get a * transaction handle. */ ext4_orphan_del(NULL, inode); ext4_std_error(inode->i_sb, ret); } inode_unlock(inode); (*nr_truncates)++; } else { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: deleting unreferenced inode %lu", __func__, inode->i_ino); ext4_debug("deleting unreferenced inode %lu\n", inode->i_ino); (*nr_orphans)++; } iput(inode); /* The delete magic happens here! */ } /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext4_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; struct inode *inode; int i, j; #ifdef CONFIG_QUOTA int quota_update = 0; #endif __le32 *bdata; struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info; int inodes_per_ob = ext4_inodes_per_orphan_block(sb); if (!es->s_last_orphan && !oi->of_blocks) { ext4_debug("no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, skipping orphan cleanup"); return; } /* Check if feature set would not allow a r/w mount */ if (!ext4_feature_set_ok(sb, 0)) { ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & SB_RDONLY)) { ext4_msg(sb, KERN_INFO, "Errors on filesystem, " "clearing orphan list."); es->s_last_orphan = 0; } ext4_debug("Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & SB_RDONLY) { ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~SB_RDONLY; } #ifdef CONFIG_QUOTA /* * Turn on quotas which were not enabled for read-only mounts if * filesystem has quota feature, so that they are updated correctly. */ if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) { int ret = ext4_enable_quotas(sb); if (!ret) quota_update = 1; else ext4_msg(sb, KERN_ERR, "Cannot turn on quotas: error %d", ret); } /* Turn on journaled quotas used for old sytle */ for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (EXT4_SB(sb)->s_qf_names[i]) { int ret = ext4_quota_on_mount(sb, i); if (!ret) quota_update = 1; else ext4_msg(sb, KERN_ERR, "Cannot turn on journaled " "quota: type %d: error %d", i, ret); } } #endif while (es->s_last_orphan) { /* * We may have encountered an error during cleanup; if * so, skip the rest. */ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { ext4_debug("Skipping orphan recovery on fs with errors.\n"); es->s_last_orphan = 0; break; } inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); ext4_process_orphan(inode, &nr_truncates, &nr_orphans); } for (i = 0; i < oi->of_blocks; i++) { bdata = (__le32 *)(oi->of_binfo[i].ob_bh->b_data); for (j = 0; j < inodes_per_ob; j++) { if (!bdata[j]) continue; inode = ext4_orphan_get(sb, le32_to_cpu(bdata[j])); if (IS_ERR(inode)) continue; ext4_set_inode_state(inode, EXT4_STATE_ORPHAN_FILE); EXT4_I(inode)->i_orphan_idx = i * inodes_per_ob + j; ext4_process_orphan(inode, &nr_truncates, &nr_orphans); } } #define PLURAL(x) (x), ((x) == 1) ? "" : "s" if (nr_orphans) ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn off quotas if they were enabled for orphan cleanup */ if (quota_update) { for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } } #endif sb->s_flags = s_flags; /* Restore SB_RDONLY status */ } void ext4_release_orphan_info(struct super_block *sb) { int i; struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info; if (!oi->of_blocks) return; for (i = 0; i < oi->of_blocks; i++) brelse(oi->of_binfo[i].ob_bh); kfree(oi->of_binfo); } static struct ext4_orphan_block_tail *ext4_orphan_block_tail( struct super_block *sb, struct buffer_head *bh) { return (struct ext4_orphan_block_tail *)(bh->b_data + sb->s_blocksize - sizeof(struct ext4_orphan_block_tail)); } static int ext4_orphan_file_block_csum_verify(struct super_block *sb, struct buffer_head *bh) { __u32 calculated; int inodes_per_ob = ext4_inodes_per_orphan_block(sb); struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info; struct ext4_orphan_block_tail *ot; __le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr); if (!ext4_has_feature_metadata_csum(sb)) return 1; ot = ext4_orphan_block_tail(sb, bh); calculated = ext4_chksum(oi->of_csum_seed, (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr)); calculated = ext4_chksum(calculated, (__u8 *)bh->b_data, inodes_per_ob * sizeof(__u32)); return le32_to_cpu(ot->ob_checksum) == calculated; } /* This gets called only when checksumming is enabled */ void ext4_orphan_file_block_trigger(struct jbd2_buffer_trigger_type *triggers, struct buffer_head *bh, void *data, size_t size) { struct super_block *sb = EXT4_TRIGGER(triggers)->sb; __u32 csum; int inodes_per_ob = ext4_inodes_per_orphan_block(sb); struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info; struct ext4_orphan_block_tail *ot; __le64 dsk_block_nr = cpu_to_le64(bh->b_blocknr); csum = ext4_chksum(oi->of_csum_seed, (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr)); csum = ext4_chksum(csum, (__u8 *)data, inodes_per_ob * sizeof(__u32)); ot = ext4_orphan_block_tail(sb, bh); ot->ob_checksum = cpu_to_le32(csum); } int ext4_init_orphan_info(struct super_block *sb) { struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info; struct inode *inode; int i, j; int ret; int free; __le32 *bdata; int inodes_per_ob = ext4_inodes_per_orphan_block(sb); struct ext4_orphan_block_tail *ot; ino_t orphan_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_orphan_file_inum); if (!ext4_has_feature_orphan_file(sb)) return 0; inode = ext4_iget(sb, orphan_ino, EXT4_IGET_SPECIAL); if (IS_ERR(inode)) { ext4_msg(sb, KERN_ERR, "get orphan inode failed"); return PTR_ERR(inode); } oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; oi->of_binfo = kmalloc(oi->of_blocks*sizeof(struct ext4_orphan_block), GFP_KERNEL); if (!oi->of_binfo) { ret = -ENOMEM; goto out_put; } for (i = 0; i < oi->of_blocks; i++) { oi->of_binfo[i].ob_bh = ext4_bread(NULL, inode, i, 0); if (IS_ERR(oi->of_binfo[i].ob_bh)) { ret = PTR_ERR(oi->of_binfo[i].ob_bh); goto out_free; } if (!oi->of_binfo[i].ob_bh) { ret = -EIO; goto out_free; } ot = ext4_orphan_block_tail(sb, oi->of_binfo[i].ob_bh); if (le32_to_cpu(ot->ob_magic) != EXT4_ORPHAN_BLOCK_MAGIC) { ext4_error(sb, "orphan file block %d: bad magic", i); ret = -EIO; goto out_free; } if (!ext4_orphan_file_block_csum_verify(sb, oi->of_binfo[i].ob_bh)) { ext4_error(sb, "orphan file block %d: bad checksum", i); ret = -EIO; goto out_free; } bdata = (__le32 *)(oi->of_binfo[i].ob_bh->b_data); free = 0; for (j = 0; j < inodes_per_ob; j++) if (bdata[j] == 0) free++; atomic_set(&oi->of_binfo[i].ob_free_entries, free); } iput(inode); return 0; out_free: for (i--; i >= 0; i--) brelse(oi->of_binfo[i].ob_bh); kfree(oi->of_binfo); out_put: iput(inode); return ret; } int ext4_orphan_file_empty(struct super_block *sb) { struct ext4_orphan_info *oi = &EXT4_SB(sb)->s_orphan_info; int i; int inodes_per_ob = ext4_inodes_per_orphan_block(sb); if (!ext4_has_feature_orphan_file(sb)) return 1; for (i = 0; i < oi->of_blocks; i++) if (atomic_read(&oi->of_binfo[i].ob_free_entries) != inodes_per_ob) return 0; return 1; } |
| 72 17 56 72 73 73 73 73 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 | // SPDX-License-Identifier: GPL-2.0-only /* * AppArmor security module * * This file contains AppArmor ipc mediation * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2017 Canonical Ltd. */ #include <linux/gfp.h> #include "include/audit.h" #include "include/capability.h" #include "include/cred.h" #include "include/policy.h" #include "include/ipc.h" #include "include/sig_names.h" static inline int map_signal_num(int sig) { if (sig > SIGRTMAX) return SIGUNKNOWN; else if (sig >= SIGRTMIN) return sig - SIGRTMIN + SIGRT_BASE; else if (sig < MAXMAPPED_SIG) return sig_map[sig]; return SIGUNKNOWN; } /** * audit_signal_mask - convert mask to permission string * @mask: permission mask to convert * * Returns: pointer to static string */ static const char *audit_signal_mask(u32 mask) { if (mask & MAY_READ) return "receive"; if (mask & MAY_WRITE) return "send"; return ""; } /** * audit_signal_cb() - call back for signal specific audit fields * @ab: audit_buffer (NOT NULL) * @va: audit struct to audit values of (NOT NULL) */ static void audit_signal_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; struct apparmor_audit_data *ad = aad(sa); if (ad->request & AA_SIGNAL_PERM_MASK) { audit_log_format(ab, " requested_mask=\"%s\"", audit_signal_mask(ad->request)); if (ad->denied & AA_SIGNAL_PERM_MASK) { audit_log_format(ab, " denied_mask=\"%s\"", audit_signal_mask(ad->denied)); } } if (ad->signal == SIGUNKNOWN) audit_log_format(ab, "signal=unknown(%d)", ad->unmappedsig); else if (ad->signal < MAXMAPPED_SIGNAME) audit_log_format(ab, " signal=%s", sig_names[ad->signal]); else audit_log_format(ab, " signal=rtmin+%d", ad->signal - SIGRT_BASE); audit_log_format(ab, " peer="); aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer, FLAGS_NONE, GFP_ATOMIC); } static int profile_signal_perm(const struct cred *cred, struct aa_profile *profile, struct aa_label *peer, u32 request, struct apparmor_audit_data *ad) { struct aa_ruleset *rules = list_first_entry(&profile->rules, typeof(*rules), list); struct aa_perms perms; aa_state_t state; if (profile_unconfined(profile) || !ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL)) return 0; ad->subj_cred = cred; ad->peer = peer; /* TODO: secondary cache check <profile, profile, perm> */ state = aa_dfa_next(rules->policy->dfa, rules->policy->start[AA_CLASS_SIGNAL], ad->signal); aa_label_match(profile, rules, peer, state, false, request, &perms); aa_apply_modes_to_perms(profile, &perms); return aa_check_perms(profile, &perms, request, ad, audit_signal_cb); } int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender, const struct cred *target_cred, struct aa_label *target, int sig) { struct aa_profile *profile; DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL); ad.signal = map_signal_num(sig); ad.unmappedsig = sig; return xcheck_labels(sender, target, profile, profile_signal_perm(subj_cred, profile, target, MAY_WRITE, &ad), profile_signal_perm(target_cred, profile, sender, MAY_READ, &ad)); } |
| 8 2 2 8 6 8 1 8 4 3 3 2 4 5 5 1 1 1 4 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 | // SPDX-License-Identifier: GPL-2.0-or-later /* * sch_plug.c Queue traffic until an explicit release command * * There are two ways to use this qdisc: * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands. * * 2. For network output buffering (a.k.a output commit) functionality. * Output commit property is commonly used by applications using checkpoint * based fault-tolerance to ensure that the checkpoint from which a system * is being restored is consistent w.r.t outside world. * * Consider for e.g. Remus - a Virtual Machine checkpointing system, * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated * asynchronously to the backup host, while the VM continues executing the * next epoch speculatively. * * The following is a typical sequence of output buffer operations: * 1.At epoch i, start_buffer(i) * 2. At end of epoch i (i.e. after 50ms): * 2.1 Stop VM and take checkpoint(i). * 2.2 start_buffer(i+1) and Resume VM * 3. While speculatively executing epoch(i+1), asynchronously replicate * checkpoint(i) to backup host. * 4. When checkpoint_ack(i) is received from backup, release_buffer(i) * Thus, this Qdisc would receive the following sequence of commands: * TCQ_PLUG_BUFFER (epoch i) * .. TCQ_PLUG_BUFFER (epoch i+1) * ....TCQ_PLUG_RELEASE_ONE (epoch i) * ......TCQ_PLUG_BUFFER (epoch i+2) * ........ */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> /* * State of the queue, when used for network output buffering: * * plug(i+1) plug(i) head * ------------------+--------------------+----------------> * | | * | | * pkts_current_epoch| pkts_last_epoch |pkts_to_release * ----------------->|<--------+--------->|+---------------> * v v * */ struct plug_sched_data { /* If true, the dequeue function releases all packets * from head to end of the queue. The queue turns into * a pass-through queue for newly arriving packets. */ bool unplug_indefinite; bool throttled; /* Queue Limit in bytes */ u32 limit; /* Number of packets (output) from the current speculatively * executing epoch. */ u32 pkts_current_epoch; /* Number of packets corresponding to the recently finished * epoch. These will be released when we receive a * TCQ_PLUG_RELEASE_ONE command. This command is typically * issued after committing a checkpoint at the target. */ u32 pkts_last_epoch; /* * Number of packets from the head of the queue, that can * be released (committed checkpoint). */ u32 pkts_to_release; }; static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct plug_sched_data *q = qdisc_priv(sch); if (likely(sch->qstats.backlog + skb->len <= q->limit)) { if (!q->unplug_indefinite) q->pkts_current_epoch++; return qdisc_enqueue_tail(skb, sch); } return qdisc_drop(skb, sch, to_free); } static struct sk_buff *plug_dequeue(struct Qdisc *sch) { struct plug_sched_data *q = qdisc_priv(sch); if (q->throttled) return NULL; if (!q->unplug_indefinite) { if (!q->pkts_to_release) { /* No more packets to dequeue. Block the queue * and wait for the next release command. */ q->throttled = true; return NULL; } q->pkts_to_release--; } return qdisc_dequeue_head(sch); } static int plug_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct plug_sched_data *q = qdisc_priv(sch); q->pkts_current_epoch = 0; q->pkts_last_epoch = 0; q->pkts_to_release = 0; q->unplug_indefinite = false; if (opt == NULL) { q->limit = qdisc_dev(sch)->tx_queue_len * psched_mtu(qdisc_dev(sch)); } else { struct tc_plug_qopt *ctl = nla_data(opt); if (nla_len(opt) < sizeof(*ctl)) return -EINVAL; q->limit = ctl->limit; } q->throttled = true; return 0; } /* Receives 4 types of messages: * TCQ_PLUG_BUFFER: Inset a plug into the queue and * buffer any incoming packets * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head * to beginning of the next plug. * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. * Stop buffering packets until the next TCQ_PLUG_BUFFER * command is received (just act as a pass-thru queue). * TCQ_PLUG_LIMIT: Increase/decrease queue size */ static int plug_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct plug_sched_data *q = qdisc_priv(sch); struct tc_plug_qopt *msg; msg = nla_data(opt); if (nla_len(opt) < sizeof(*msg)) return -EINVAL; switch (msg->action) { case TCQ_PLUG_BUFFER: /* Save size of the current buffer */ q->pkts_last_epoch = q->pkts_current_epoch; q->pkts_current_epoch = 0; if (q->unplug_indefinite) q->throttled = true; q->unplug_indefinite = false; break; case TCQ_PLUG_RELEASE_ONE: /* Add packets from the last complete buffer to the * packets to be released set. */ q->pkts_to_release += q->pkts_last_epoch; q->pkts_last_epoch = 0; q->throttled = false; netif_schedule_queue(sch->dev_queue); break; case TCQ_PLUG_RELEASE_INDEFINITE: q->unplug_indefinite = true; q->pkts_to_release = 0; q->pkts_last_epoch = 0; q->pkts_current_epoch = 0; q->throttled = false; netif_schedule_queue(sch->dev_queue); break; case TCQ_PLUG_LIMIT: /* Limit is supplied in bytes */ q->limit = msg->limit; break; default: return -EINVAL; } return 0; } static struct Qdisc_ops plug_qdisc_ops __read_mostly = { .id = "plug", .priv_size = sizeof(struct plug_sched_data), .enqueue = plug_enqueue, .dequeue = plug_dequeue, .peek = qdisc_peek_dequeued, .init = plug_init, .change = plug_change, .reset = qdisc_reset_queue, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("plug"); static int __init plug_module_init(void) { return register_qdisc(&plug_qdisc_ops); } static void __exit plug_module_exit(void) { unregister_qdisc(&plug_qdisc_ops); } module_init(plug_module_init) module_exit(plug_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Qdisc to plug and unplug traffic via netlink control"); |
| 940 13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UIDGID_H #define _LINUX_UIDGID_H /* * A set of types for the internal kernel types representing uids and gids. * * The types defined in this header allow distinguishing which uids and gids in * the kernel are values used by userspace and which uid and gid values are * the internal kernel values. With the addition of user namespaces the values * can be different. Using the type system makes it possible for the compiler * to detect when we overlook these differences. * */ #include <linux/uidgid_types.h> #include <linux/highuid.h> struct user_namespace; extern struct user_namespace init_user_ns; struct uid_gid_map; #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } #ifdef CONFIG_MULTIUSER static inline uid_t __kuid_val(kuid_t uid) { return uid.val; } static inline gid_t __kgid_val(kgid_t gid) { return gid.val; } #else static inline uid_t __kuid_val(kuid_t uid) { return 0; } static inline gid_t __kgid_val(kgid_t gid) { return 0; } #endif #define GLOBAL_ROOT_UID KUIDT_INIT(0) #define GLOBAL_ROOT_GID KGIDT_INIT(0) #define INVALID_UID KUIDT_INIT(-1) #define INVALID_GID KGIDT_INIT(-1) static inline bool uid_eq(kuid_t left, kuid_t right) { return __kuid_val(left) == __kuid_val(right); } static inline bool gid_eq(kgid_t left, kgid_t right) { return __kgid_val(left) == __kgid_val(right); } static inline bool uid_gt(kuid_t left, kuid_t right) { return __kuid_val(left) > __kuid_val(right); } static inline bool gid_gt(kgid_t left, kgid_t right) { return __kgid_val(left) > __kgid_val(right); } static inline bool uid_gte(kuid_t left, kuid_t right) { return __kuid_val(left) >= __kuid_val(right); } static inline bool gid_gte(kgid_t left, kgid_t right) { return __kgid_val(left) >= __kgid_val(right); } static inline bool uid_lt(kuid_t left, kuid_t right) { return __kuid_val(left) < __kuid_val(right); } static inline bool gid_lt(kgid_t left, kgid_t right) { return __kgid_val(left) < __kgid_val(right); } static inline bool uid_lte(kuid_t left, kuid_t right) { return __kuid_val(left) <= __kuid_val(right); } static inline bool gid_lte(kgid_t left, kgid_t right) { return __kgid_val(left) <= __kgid_val(right); } static inline bool uid_valid(kuid_t uid) { return __kuid_val(uid) != (uid_t) -1; } static inline bool gid_valid(kgid_t gid) { return __kgid_val(gid) != (gid_t) -1; } #ifdef CONFIG_USER_NS extern kuid_t make_kuid(struct user_namespace *from, uid_t uid); extern kgid_t make_kgid(struct user_namespace *from, gid_t gid); extern uid_t from_kuid(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid(struct user_namespace *to, kgid_t gid); extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid); static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return from_kuid(ns, uid) != (uid_t) -1; } static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return from_kgid(ns, gid) != (gid_t) -1; } u32 map_id_down(struct uid_gid_map *map, u32 id); u32 map_id_up(struct uid_gid_map *map, u32 id); u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count); #else static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid) { return KUIDT_INIT(uid); } static inline kgid_t make_kgid(struct user_namespace *from, gid_t gid) { return KGIDT_INIT(gid); } static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid) { return __kuid_val(kuid); } static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid) { return __kgid_val(kgid); } static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid) { uid_t uid = from_kuid(to, kuid); if (uid == (uid_t)-1) uid = overflowuid; return uid; } static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid) { gid_t gid = from_kgid(to, kgid); if (gid == (gid_t)-1) gid = overflowgid; return gid; } static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return uid_valid(uid); } static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return gid_valid(gid); } static inline u32 map_id_down(struct uid_gid_map *map, u32 id) { return id; } static inline u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count) { return id; } static inline u32 map_id_up(struct uid_gid_map *map, u32 id) { return id; } #endif /* CONFIG_USER_NS */ #endif /* _LINUX_UIDGID_H */ |
| 9566 4741 4741 4741 6134 117 118 6142 6140 28 6162 4918 6160 6163 117 6232 357 6249 14 87 6250 6229 6238 6238 6257 6236 6238 6236 6246 8779 8780 8779 5 8799 8682 3920 8776 5 8780 15 15 15 15 15 15 5 5 5 2 2 2 2 2 2 2 2 4735 2 2 2 2 7151 7133 3572 6485 4814 3597 5705 470 304 304 468 7149 7154 7171 8771 8745 4716 8741 4478 102 4402 153 153 153 153 150 150 150 4 4 149 149 150 150 150 7 7 7 7 7 7 7 25 25 25 25 25 25 25 25 25 25 4737 4456 4735 12 12 152 4735 4 4736 14 4732 4735 442 441 156 8 7 5 28 26 24 4739 824 4732 865 863 865 859 864 866 863 1077 1074 3 1072 1080 1077 866 1076 1080 1077 1076 957 125 6795 6802 6793 1707 6797 6802 6802 4856 30 2 30 4687 4836 4847 4835 2065 4834 6795 4688 6799 6215 6800 4842 4833 4837 282 282 281 279 281 1 281 284 3875 3879 3868 2372 2367 2370 2371 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * This file contains the default values for the operation of the * Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/admin-guide/sysctl/vm.rst. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/export.h> #include <linux/mm_inline.h> #include <linux/percpu_counter.h> #include <linux/memremap.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> #include <linux/uio.h> #include <linux/hugetlb.h> #include <linux/page_idle.h> #include <linux/local_lock.h> #include <linux/buffer_head.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/pagemap.h> /* How many pages do we try to swap or page in/out together? As a power of 2 */ int page_cluster; static const int page_cluster_max = 31; struct cpu_fbatches { /* * The following folio batches are grouped together because they are protected * by disabling preemption (and interrupts remain enabled). */ local_lock_t lock; struct folio_batch lru_add; struct folio_batch lru_deactivate_file; struct folio_batch lru_deactivate; struct folio_batch lru_lazyfree; #ifdef CONFIG_SMP struct folio_batch lru_activate; #endif /* Protecting the following batches which require disabling interrupts */ local_lock_t lock_irq; struct folio_batch lru_move_tail; }; static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { .lock = INIT_LOCAL_LOCK(lock), .lock_irq = INIT_LOCAL_LOCK(lock_irq), }; static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, unsigned long *flagsp) { if (folio_test_lru(folio)) { folio_lruvec_relock_irqsave(folio, lruvecp, flagsp); lruvec_del_folio(*lruvecp, folio); __folio_clear_lru_flags(folio); } } /* * This path almost never happens for VM activity - pages are normally freed * in batches. But it gets used by networking - and for compound pages. */ static void page_cache_release(struct folio *folio) { struct lruvec *lruvec = NULL; unsigned long flags; __page_cache_release(folio, &lruvec, &flags); if (lruvec) unlock_page_lruvec_irqrestore(lruvec, flags); } void __folio_put(struct folio *folio) { if (unlikely(folio_is_zone_device(folio))) { free_zone_device_folio(folio); return; } if (folio_test_hugetlb(folio)) { free_huge_folio(folio); return; } page_cache_release(folio); folio_unqueue_deferred_split(folio); mem_cgroup_uncharge(folio); free_frozen_pages(&folio->page, folio_order(folio)); } EXPORT_SYMBOL(__folio_put); typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); static void lru_add(struct lruvec *lruvec, struct folio *folio) { int was_unevictable = folio_test_clear_unevictable(folio); long nr_pages = folio_nr_pages(folio); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); /* * Is an smp_mb__after_atomic() still required here, before * folio_evictable() tests the mlocked flag, to rule out the possibility * of stranding an evictable folio on an unevictable LRU? I think * not, because __munlock_folio() only clears the mlocked flag * while the LRU lock is held. * * (That is not true of __page_cache_release(), and not necessarily * true of folios_put(): but those only clear the mlocked flag after * folio_put_testzero() has excluded any other users of the folio.) */ if (folio_evictable(folio)) { if (was_unevictable) __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); } else { folio_clear_active(folio); folio_set_unevictable(folio); /* * folio->mlock_count = !!folio_test_mlocked(folio)? * But that leaves __mlock_folio() in doubt whether another * actor has already counted the mlock or not. Err on the * safe side, underestimate, let page reclaim fix it, rather * than leaving a page on the unevictable LRU indefinitely. */ folio->mlock_count = 0; if (!was_unevictable) __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); } lruvec_add_folio(lruvec, folio); trace_mm_lru_insertion(folio); } static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) { int i; struct lruvec *lruvec = NULL; unsigned long flags = 0; for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; folio_lruvec_relock_irqsave(folio, &lruvec, &flags); move_fn(lruvec, folio); folio_set_lru(folio); } if (lruvec) unlock_page_lruvec_irqrestore(lruvec, flags); folios_put(fbatch); } static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, struct folio *folio, move_fn_t move_fn, bool on_lru, bool disable_irq) { unsigned long flags; if (on_lru && !folio_test_clear_lru(folio)) return; folio_get(folio); if (disable_irq) local_lock_irqsave(&cpu_fbatches.lock_irq, flags); else local_lock(&cpu_fbatches.lock); if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || lru_cache_disabled()) folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); if (disable_irq) local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags); else local_unlock(&cpu_fbatches.lock); } #define folio_batch_add_and_move(folio, op, on_lru) \ __folio_batch_add_and_move( \ &cpu_fbatches.op, \ folio, \ op, \ on_lru, \ offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \ ) static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) { if (folio_test_unevictable(folio)) return; lruvec_del_folio(lruvec, folio); folio_clear_active(folio); lruvec_add_folio_tail(lruvec, folio); __count_vm_events(PGROTATED, folio_nr_pages(folio)); } /* * Writeback is about to end against a folio which has been marked for * immediate reclaim. If it still appears to be reclaimable, move it * to the tail of the inactive list. * * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races. */ void folio_rotate_reclaimable(struct folio *folio) { if (folio_test_locked(folio) || folio_test_dirty(folio) || folio_test_unevictable(folio)) return; folio_batch_add_and_move(folio, lru_move_tail, true); } void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_io, unsigned int nr_rotated) { unsigned long cost; /* * Reflect the relative cost of incurring IO and spending CPU * time on rotations. This doesn't attempt to make a precise * comparison, it just says: if reloads are about comparable * between the LRU lists, or rotations are overwhelmingly * different between them, adjust scan balance for CPU work. */ cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated; do { unsigned long lrusize; /* * Hold lruvec->lru_lock is safe here, since * 1) The pinned lruvec in reclaim, or * 2) From a pre-LRU page during refault (which also holds the * rcu lock, so would be safe even if the page was on the LRU * and could move simultaneously to a new lruvec). */ spin_lock_irq(&lruvec->lru_lock); /* Record cost event */ if (file) lruvec->file_cost += cost; else lruvec->anon_cost += cost; /* * Decay previous events * * Because workloads change over time (and to avoid * overflow) we keep these statistics as a floating * average, which ends up weighing recent refaults * more than old ones. */ lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + lruvec_page_state(lruvec, NR_ACTIVE_ANON) + lruvec_page_state(lruvec, NR_INACTIVE_FILE) + lruvec_page_state(lruvec, NR_ACTIVE_FILE); if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { lruvec->file_cost /= 2; lruvec->anon_cost /= 2; } spin_unlock_irq(&lruvec->lru_lock); } while ((lruvec = parent_lruvec(lruvec))); } void lru_note_cost_refault(struct folio *folio) { lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), folio_nr_pages(folio), 0); } static void lru_activate(struct lruvec *lruvec, struct folio *folio) { long nr_pages = folio_nr_pages(folio); if (folio_test_active(folio) || folio_test_unevictable(folio)) return; lruvec_del_folio(lruvec, folio); folio_set_active(folio); lruvec_add_folio(lruvec, folio); trace_mm_lru_activate(folio); __count_vm_events(PGACTIVATE, nr_pages); count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages); } #ifdef CONFIG_SMP static void folio_activate_drain(int cpu) { struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu); if (folio_batch_count(fbatch)) folio_batch_move_lru(fbatch, lru_activate); } void folio_activate(struct folio *folio) { if (folio_test_active(folio) || folio_test_unevictable(folio)) return; folio_batch_add_and_move(folio, lru_activate, true); } #else static inline void folio_activate_drain(int cpu) { } void folio_activate(struct folio *folio) { struct lruvec *lruvec; if (!folio_test_clear_lru(folio)) return; lruvec = folio_lruvec_lock_irq(folio); lru_activate(lruvec, folio); unlock_page_lruvec_irq(lruvec); folio_set_lru(folio); } #endif static void __lru_cache_activate_folio(struct folio *folio) { struct folio_batch *fbatch; int i; local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); /* * Search backwards on the optimistic assumption that the folio being * activated has just been added to this batch. Note that only * the local batch is examined as a !LRU folio could be in the * process of being released, reclaimed, migrated or on a remote * batch that is currently being drained. Furthermore, marking * a remote batch's folio active potentially hits a race where * a folio is marked active just after it is added to the inactive * list causing accounting errors and BUG_ON checks to trigger. */ for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) { struct folio *batch_folio = fbatch->folios[i]; if (batch_folio == folio) { folio_set_active(folio); break; } } local_unlock(&cpu_fbatches.lock); } #ifdef CONFIG_LRU_GEN static void lru_gen_inc_refs(struct folio *folio) { unsigned long new_flags, old_flags = READ_ONCE(folio->flags); if (folio_test_unevictable(folio)) return; /* see the comment on LRU_REFS_FLAGS */ if (!folio_test_referenced(folio)) { set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); return; } do { if ((old_flags & LRU_REFS_MASK) == LRU_REFS_MASK) { if (!folio_test_workingset(folio)) folio_set_workingset(folio); return; } new_flags = old_flags + BIT(LRU_REFS_PGOFF); } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); } static bool lru_gen_clear_refs(struct folio *folio) { struct lru_gen_folio *lrugen; int gen = folio_lru_gen(folio); int type = folio_is_file_lru(folio); if (gen < 0) return true; set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0); lrugen = &folio_lruvec(folio)->lrugen; /* whether can do without shuffling under the LRU lock */ return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type])); } #else /* !CONFIG_LRU_GEN */ static void lru_gen_inc_refs(struct folio *folio) { } static bool lru_gen_clear_refs(struct folio *folio) { return false; } #endif /* CONFIG_LRU_GEN */ /** * folio_mark_accessed - Mark a folio as having seen activity. * @folio: The folio to mark. * * This function will perform one of the following transitions: * * * inactive,unreferenced -> inactive,referenced * * inactive,referenced -> active,unreferenced * * active,unreferenced -> active,referenced * * When a newly allocated folio is not yet visible, so safe for non-atomic ops, * __folio_set_referenced() may be substituted for folio_mark_accessed(). */ void folio_mark_accessed(struct folio *folio) { if (folio_test_dropbehind(folio)) return; if (lru_gen_enabled()) { lru_gen_inc_refs(folio); return; } if (!folio_test_referenced(folio)) { folio_set_referenced(folio); } else if (folio_test_unevictable(folio)) { /* * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, * this list is never rotated or maintained, so marking an * unevictable page accessed has no effect. */ } else if (!folio_test_active(folio)) { /* * If the folio is on the LRU, queue it for activation via * cpu_fbatches.lru_activate. Otherwise, assume the folio is in a * folio_batch, mark it active and it'll be moved to the active * LRU on the next drain. */ if (folio_test_lru(folio)) folio_activate(folio); else __lru_cache_activate_folio(folio); folio_clear_referenced(folio); workingset_activation(folio); } if (folio_test_idle(folio)) folio_clear_idle(folio); } EXPORT_SYMBOL(folio_mark_accessed); /** * folio_add_lru - Add a folio to an LRU list. * @folio: The folio to be added to the LRU. * * Queue the folio for addition to the LRU. The decision on whether * to add the page to the [in]active [file|anon] list is deferred until the * folio_batch is drained. This gives a chance for the caller of folio_add_lru() * have the folio added to the active list using folio_mark_accessed(). */ void folio_add_lru(struct folio *folio) { VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); /* see the comment in lru_gen_folio_seq() */ if (lru_gen_enabled() && !folio_test_unevictable(folio) && lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) folio_set_active(folio); folio_batch_add_and_move(folio, lru_add, false); } EXPORT_SYMBOL(folio_add_lru); /** * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA. * @folio: The folio to be added to the LRU. * @vma: VMA in which the folio is mapped. * * If the VMA is mlocked, @folio is added to the unevictable list. * Otherwise, it is treated the same way as folio_add_lru(). */ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) { VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) mlock_new_folio(folio); else folio_add_lru(folio); } /* * If the folio cannot be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the folio isn't mapped and dirty/writeback, the folio * could be reclaimed asap using the reclaim flag. * * 1. active, mapped folio -> none * 2. active, dirty/writeback folio -> inactive, head, reclaim * 3. inactive, mapped folio -> none * 4. inactive, dirty/writeback folio -> inactive, head, reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, it moves to the head of the inactive list so the folio is * written out by flusher threads as this is much more efficient * than the single-page writeout from reclaim. */ static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio) { bool active = folio_test_active(folio) || lru_gen_enabled(); long nr_pages = folio_nr_pages(folio); if (folio_test_unevictable(folio)) return; /* Some processes are using the folio */ if (folio_mapped(folio)) return; lruvec_del_folio(lruvec, folio); folio_clear_active(folio); folio_clear_referenced(folio); if (folio_test_writeback(folio) || folio_test_dirty(folio)) { /* * Setting the reclaim flag could race with * folio_end_writeback() and confuse readahead. But the * race window is _really_ small and it's not a critical * problem. */ lruvec_add_folio(lruvec, folio); folio_set_reclaim(folio); } else { /* * The folio's writeback ended while it was in the batch. * We move that folio to the tail of the inactive list. */ lruvec_add_folio_tail(lruvec, folio); __count_vm_events(PGROTATED, nr_pages); } if (active) { __count_vm_events(PGDEACTIVATE, nr_pages); count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages); } } static void lru_deactivate(struct lruvec *lruvec, struct folio *folio) { long nr_pages = folio_nr_pages(folio); if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled())) return; lruvec_del_folio(lruvec, folio); folio_clear_active(folio); folio_clear_referenced(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(PGDEACTIVATE, nr_pages); count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages); } static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio) { long nr_pages = folio_nr_pages(folio); if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || folio_test_swapcache(folio) || folio_test_unevictable(folio)) return; lruvec_del_folio(lruvec, folio); folio_clear_active(folio); if (lru_gen_enabled()) lru_gen_clear_refs(folio); else folio_clear_referenced(folio); /* * Lazyfree folios are clean anonymous folios. They have * the swapbacked flag cleared, to distinguish them from normal * anonymous folios */ folio_clear_swapbacked(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(PGLAZYFREE, nr_pages); count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages); } /* * Drain pages out of the cpu's folio_batch. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ void lru_add_drain_cpu(int cpu) { struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); struct folio_batch *fbatch = &fbatches->lru_add; if (folio_batch_count(fbatch)) folio_batch_move_lru(fbatch, lru_add); fbatch = &fbatches->lru_move_tail; /* Disabling interrupts below acts as a compiler barrier. */ if (data_race(folio_batch_count(fbatch))) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_lock_irqsave(&cpu_fbatches.lock_irq, flags); folio_batch_move_lru(fbatch, lru_move_tail); local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags); } fbatch = &fbatches->lru_deactivate_file; if (folio_batch_count(fbatch)) folio_batch_move_lru(fbatch, lru_deactivate_file); fbatch = &fbatches->lru_deactivate; if (folio_batch_count(fbatch)) folio_batch_move_lru(fbatch, lru_deactivate); fbatch = &fbatches->lru_lazyfree; if (folio_batch_count(fbatch)) folio_batch_move_lru(fbatch, lru_lazyfree); folio_activate_drain(cpu); } /** * deactivate_file_folio() - Deactivate a file folio. * @folio: Folio to deactivate. * * This function hints to the VM that @folio is a good reclaim candidate, * for example if its invalidation fails due to the folio being dirty * or under writeback. * * Context: Caller holds a reference on the folio. */ void deactivate_file_folio(struct folio *folio) { /* Deactivating an unevictable folio will not accelerate reclaim */ if (folio_test_unevictable(folio)) return; if (lru_gen_enabled() && lru_gen_clear_refs(folio)) return; folio_batch_add_and_move(folio, lru_deactivate_file, true); } /* * folio_deactivate - deactivate a folio * @folio: folio to deactivate * * folio_deactivate() moves @folio to the inactive list if @folio was on the * active list and was not unevictable. This is done to accelerate the * reclaim of @folio. */ void folio_deactivate(struct folio *folio) { if (folio_test_unevictable(folio)) return; if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio)) return; folio_batch_add_and_move(folio, lru_deactivate, true); } /** * folio_mark_lazyfree - make an anon folio lazyfree * @folio: folio to deactivate * * folio_mark_lazyfree() moves @folio to the inactive file list. * This is done to accelerate the reclaim of @folio. */ void folio_mark_lazyfree(struct folio *folio) { if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || folio_test_swapcache(folio) || folio_test_unevictable(folio)) return; folio_batch_add_and_move(folio, lru_lazyfree, true); } void lru_add_drain(void) { local_lock(&cpu_fbatches.lock); lru_add_drain_cpu(smp_processor_id()); local_unlock(&cpu_fbatches.lock); mlock_drain_local(); } /* * It's called from per-cpu workqueue context in SMP case so * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on * the same cpu. It shouldn't be a problem in !SMP case since * the core is only one and the locks will disable preemption. */ static void lru_add_and_bh_lrus_drain(void) { local_lock(&cpu_fbatches.lock); lru_add_drain_cpu(smp_processor_id()); local_unlock(&cpu_fbatches.lock); invalidate_bh_lrus_cpu(); mlock_drain_local(); } void lru_add_drain_cpu_zone(struct zone *zone) { local_lock(&cpu_fbatches.lock); lru_add_drain_cpu(smp_processor_id()); drain_local_pages(zone); local_unlock(&cpu_fbatches.lock); mlock_drain_local(); } #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_and_bh_lrus_drain(); } static bool cpu_needs_drain(unsigned int cpu) { struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); /* Check these in order of likelihood that they're not zero */ return folio_batch_count(&fbatches->lru_add) || folio_batch_count(&fbatches->lru_move_tail) || folio_batch_count(&fbatches->lru_deactivate_file) || folio_batch_count(&fbatches->lru_deactivate) || folio_batch_count(&fbatches->lru_lazyfree) || folio_batch_count(&fbatches->lru_activate) || need_mlock_drain(cpu) || has_bh_in_lru(cpu, NULL); } /* * Doesn't need any cpu hotplug locking because we do rely on per-cpu * kworkers being shut down before our page_alloc_cpu_dead callback is * executed on the offlined cpu. * Calling this function with cpu hotplug locks held can actually lead * to obscure indirect dependencies via WQ context. */ static inline void __lru_add_drain_all(bool force_all_cpus) { /* * lru_drain_gen - Global pages generation number * * (A) Definition: global lru_drain_gen = x implies that all generations * 0 < n <= x are already *scheduled* for draining. * * This is an optimization for the highly-contended use case where a * user space workload keeps constantly generating a flow of pages for * each CPU. */ static unsigned int lru_drain_gen; static struct cpumask has_work; static DEFINE_MUTEX(lock); unsigned cpu, this_gen; /* * Make sure nobody triggers this path before mm_percpu_wq is fully * initialized. */ if (WARN_ON(!mm_percpu_wq)) return; /* * Guarantee folio_batch counter stores visible by this CPU * are visible to other CPUs before loading the current drain * generation. */ smp_mb(); /* * (B) Locally cache global LRU draining generation number * * The read barrier ensures that the counter is loaded before the mutex * is taken. It pairs with smp_mb() inside the mutex critical section * at (D). */ this_gen = smp_load_acquire(&lru_drain_gen); mutex_lock(&lock); /* * (C) Exit the draining operation if a newer generation, from another * lru_add_drain_all(), was already scheduled for draining. Check (A). */ if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) goto done; /* * (D) Increment global generation number * * Pairs with smp_load_acquire() at (B), outside of the critical * section. Use a full memory barrier to guarantee that the * new global drain generation number is stored before loading * folio_batch counters. * * This pairing must be done here, before the for_each_online_cpu loop * below which drains the page vectors. * * Let x, y, and z represent some system CPU numbers, where x < y < z. * Assume CPU #z is in the middle of the for_each_online_cpu loop * below and has already reached CPU #y's per-cpu data. CPU #x comes * along, adds some pages to its per-cpu vectors, then calls * lru_add_drain_all(). * * If the paired barrier is done at any later step, e.g. after the * loop, CPU #x will just exit at (C) and miss flushing out all of its * added pages. */ WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); smp_mb(); cpumask_clear(&has_work); for_each_online_cpu(cpu) { struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); if (cpu_needs_drain(cpu)) { INIT_WORK(work, lru_add_drain_per_cpu); queue_work_on(cpu, mm_percpu_wq, work); __cpumask_set_cpu(cpu, &has_work); } } for_each_cpu(cpu, &has_work) flush_work(&per_cpu(lru_add_drain_work, cpu)); done: mutex_unlock(&lock); } void lru_add_drain_all(void) { __lru_add_drain_all(false); } #else void lru_add_drain_all(void) { lru_add_drain(); } #endif /* CONFIG_SMP */ atomic_t lru_disable_count = ATOMIC_INIT(0); /* * lru_cache_disable() needs to be called before we start compiling * a list of folios to be migrated using folio_isolate_lru(). * It drains folios on LRU cache and then disable on all cpus until * lru_cache_enable is called. * * Must be paired with a call to lru_cache_enable(). */ void lru_cache_disable(void) { atomic_inc(&lru_disable_count); /* * Readers of lru_disable_count are protected by either disabling * preemption or rcu_read_lock: * * preempt_disable, local_irq_disable [bh_lru_lock()] * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT] * preempt_disable [local_lock !CONFIG_PREEMPT_RT] * * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on * preempt_disable() regions of code. So any CPU which sees * lru_disable_count = 0 will have exited the critical * section when synchronize_rcu() returns. */ synchronize_rcu_expedited(); #ifdef CONFIG_SMP __lru_add_drain_all(true); #else lru_add_and_bh_lrus_drain(); #endif } /** * folios_put_refs - Reduce the reference count on a batch of folios. * @folios: The folios. * @refs: The number of refs to subtract from each folio. * * Like folio_put(), but for a batch of folios. This is more efficient * than writing the loop yourself as it will optimise the locks which need * to be taken if the folios are freed. The folios batch is returned * empty and ready to be reused for another batch; there is no need * to reinitialise it. If @refs is NULL, we subtract one from each * folio refcount. * * Context: May be called in process or interrupt context, but not in NMI * context. May be called while holding a spinlock. */ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) { int i, j; struct lruvec *lruvec = NULL; unsigned long flags = 0; for (i = 0, j = 0; i < folios->nr; i++) { struct folio *folio = folios->folios[i]; unsigned int nr_refs = refs ? refs[i] : 1; if (is_huge_zero_folio(folio)) continue; if (folio_is_zone_device(folio)) { if (lruvec) { unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_folio(folio); continue; } if (!folio_ref_sub_and_test(folio, nr_refs)) continue; /* hugetlb has its own memcg */ if (folio_test_hugetlb(folio)) { if (lruvec) { unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } free_huge_folio(folio); continue; } folio_unqueue_deferred_split(folio); __page_cache_release(folio, &lruvec, &flags); if (j != i) folios->folios[j] = folio; j++; } if (lruvec) unlock_page_lruvec_irqrestore(lruvec, flags); if (!j) { folio_batch_reinit(folios); return; } folios->nr = j; mem_cgroup_uncharge_folios(folios); free_unref_folios(folios); } EXPORT_SYMBOL(folios_put_refs); /** * release_pages - batched put_page() * @arg: array of pages to release * @nr: number of pages * * Decrement the reference count on all the pages in @arg. If it * fell to zero, remove the page from the LRU and free it. * * Note that the argument can be an array of pages, encoded pages, * or folio pointers. We ignore any encoded bits, and turn any of * them into just a folio that gets free'd. */ void release_pages(release_pages_arg arg, int nr) { struct folio_batch fbatch; int refs[PAGEVEC_SIZE]; struct encoded_page **encoded = arg.encoded_pages; int i; folio_batch_init(&fbatch); for (i = 0; i < nr; i++) { /* Turn any of the argument types into a folio */ struct folio *folio = page_folio(encoded_page_ptr(encoded[i])); /* Is our next entry actually "nr_pages" -> "nr_refs" ? */ refs[fbatch.nr] = 1; if (unlikely(encoded_page_flags(encoded[i]) & ENCODED_PAGE_BIT_NR_PAGES_NEXT)) refs[fbatch.nr] = encoded_nr_pages(encoded[++i]); if (folio_batch_add(&fbatch, folio) > 0) continue; folios_put_refs(&fbatch, refs); } if (fbatch.nr) folios_put_refs(&fbatch, refs); } EXPORT_SYMBOL(release_pages); /* * The folios which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those folios may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __folio_batch_release() will drain those queues here. * folio_batch_move_lru() calls folios_put() directly to avoid * mutual recursion. */ void __folio_batch_release(struct folio_batch *fbatch) { if (!fbatch->percpu_pvec_drained) { lru_add_drain(); fbatch->percpu_pvec_drained = true; } folios_put(fbatch); } EXPORT_SYMBOL(__folio_batch_release); /** * folio_batch_remove_exceptionals() - Prune non-folios from a batch. * @fbatch: The batch to prune * * find_get_entries() fills a batch with both folios and shadow/swap/DAX * entries. This function prunes all the non-folio entries from @fbatch * without leaving holes, so that it can be passed on to folio-only batch * operations. */ void folio_batch_remove_exceptionals(struct folio_batch *fbatch) { unsigned int i, j; for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; if (!xa_is_value(folio)) fbatch->folios[j++] = folio; } fbatch->nr = j; } static const struct ctl_table swap_sysctl_table[] = { { .procname = "page-cluster", .data = &page_cluster, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = (void *)&page_cluster_max, } }; /* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ register_sysctl_init("vm", swap_sysctl_table); } |
| 45 43 43 42 35 42 1 43 43 26 26 43 2 1 45 112 84 84 84 16 84 12 84 79 32 6 31 80 4 28 28 112 82 93 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | /* * linux/fs/hfs/trans.c * * Copyright (C) 1995-1997 Paul H. Hargrove * This file may be distributed under the terms of the GNU General Public License. * * This file contains routines for converting between the Macintosh * character set and various other encodings. This includes dealing * with ':' vs. '/' as the path-element separator. */ #include <linux/types.h> #include <linux/nls.h> #include "hfs_fs.h" /*================ Global functions ================*/ /* * hfs_mac2asc() * * Given a 'Pascal String' (a string preceded by a length byte) in * the Macintosh character set produce the corresponding filename using * the 'trivial' name-mangling scheme, returning the length of the * mangled filename. Note that the output string is not NULL * terminated. * * The name-mangling works as follows: * The character '/', which is illegal in Linux filenames is replaced * by ':' which never appears in HFS filenames. All other characters * are passed unchanged from input to output. */ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in) { struct nls_table *nls_disk = HFS_SB(sb)->nls_disk; struct nls_table *nls_io = HFS_SB(sb)->nls_io; const char *src; char *dst; int srclen, dstlen, size; src = in->name; srclen = in->len; if (srclen > HFS_NAMELEN) srclen = HFS_NAMELEN; dst = out; dstlen = HFS_MAX_NAMELEN; if (nls_io) { wchar_t ch; while (srclen > 0) { if (nls_disk) { size = nls_disk->char2uni(src, srclen, &ch); if (size <= 0) { ch = '?'; size = 1; } src += size; srclen -= size; } else { ch = *src++; srclen--; } if (ch == '/') ch = ':'; size = nls_io->uni2char(ch, dst, dstlen); if (size < 0) { if (size == -ENAMETOOLONG) goto out; *dst = '?'; size = 1; } dst += size; dstlen -= size; } } else { char ch; while (--srclen >= 0) *dst++ = (ch = *src++) == '/' ? ':' : ch; } out: return dst - out; } /* * hfs_asc2mac() * * Given an ASCII string (not null-terminated) and its length, * generate the corresponding filename in the Macintosh character set * using the 'trivial' name-mangling scheme, returning the length of * the mangled filename. Note that the output string is not NULL * terminated. * * This routine is a inverse to hfs_mac2triv(). * A ':' is replaced by a '/'. */ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr *in) { struct nls_table *nls_disk = HFS_SB(sb)->nls_disk; struct nls_table *nls_io = HFS_SB(sb)->nls_io; const char *src; char *dst; int srclen, dstlen, size; src = in->name; srclen = in->len; dst = out->name; dstlen = HFS_NAMELEN; if (nls_io) { wchar_t ch; while (srclen > 0 && dstlen > 0) { size = nls_io->char2uni(src, srclen, &ch); if (size < 0) { ch = '?'; size = 1; } src += size; srclen -= size; if (ch == ':') ch = '/'; if (nls_disk) { size = nls_disk->uni2char(ch, dst, dstlen); if (size < 0) { if (size == -ENAMETOOLONG) goto out; *dst = '?'; size = 1; } dst += size; dstlen -= size; } else { *dst++ = ch > 0xff ? '?' : ch; dstlen--; } } } else { char ch; if (dstlen > srclen) dstlen = srclen; while (--dstlen >= 0) *dst++ = (ch = *src++) == ':' ? '/' : ch; } out: out->len = dst - (char *)out->name; dstlen = HFS_NAMELEN - out->len; while (--dstlen >= 0) *dst++ = 0; } |
| 654 97 39 427 427 170 245 65 187 7 303 11 574 11 570 145 34 124 92 122 122 111 111 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 | // SPDX-License-Identifier: GPL-2.0+ /* * ext4_jbd2.h * * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1998--1999 Red Hat corp --- All Rights Reserved * * Ext4-specific journaling extensions. */ #ifndef _EXT4_JBD2_H #define _EXT4_JBD2_H #include <linux/fs.h> #include <linux/jbd2.h> #include "ext4.h" #define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal) /* Define the number of blocks we need to account to a transaction to * modify one block of data. * * We may have to touch one inode, one bitmap buffer, up to three * indirection blocks, the group and superblock summaries, and the data * block to complete the transaction. * * For extents-enabled fs we may have to allocate and modify up to * 5 levels of tree, data block (for each of these we need bitmap + group * summaries), root which is stored in the inode, sb */ #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \ (ext4_has_feature_extents(sb) ? 20U : 8U) /* Extended attribute operations touch at most two data buffers, * two bitmap buffers, and two group summaries, in addition to the inode * and the superblock, which are already accounted for. */ #define EXT4_XATTR_TRANS_BLOCKS 6U /* Define the minimum size for a transaction which modifies data. This * needs to take into account the fact that we may end up modifying two * quota files too (one for the group, one for the user quota). The * superblock only gets updated once, of course, so don't bother * counting that again for the quota updates. */ #define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \ EXT4_XATTR_TRANS_BLOCKS - 2 + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* * Define the number of metadata blocks we need to account to modify data. * * This include super block, inode block, quota blocks and xattr blocks */ #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* Define an arbitrary limit for the amount of data we will anticipate * writing to any given transaction. For unbounded transactions such as * write(2) and truncate(2) we can write more than this, but we always * start off at the maximum transaction size and grow the transaction * optimistically as we go. */ #define EXT4_MAX_TRANS_DATA 64U /* We break up a large truncate or write transaction once the handle's * buffer credits gets this low, we need either to extend the * transaction or to start a new one. Reserve enough space here for * inode, bitmap, superblock, group and indirection updates for at least * one block, plus two quota updates. Quota allocations are not * needed. */ #define EXT4_RESERVE_TRANS_BLOCKS 12U /* * Number of credits needed if we need to insert an entry into a * directory. For each new index block, we need 4 blocks (old index * block, new index block, bitmap block, bg summary). For normal * htree directories there are 2 levels; if the largedir feature * enabled it's 3 levels. */ #define EXT4_INDEX_EXTRA_TRANS_BLOCKS 12U #ifdef CONFIG_QUOTA /* Amount of blocks needed for quota update - we know that the structure was * allocated so we need to update only data block */ #define EXT4_QUOTA_TRANS_BLOCKS(sb) ((ext4_quota_capable(sb)) ? 1 : 0) /* Amount of blocks needed for quota insert/delete - we do some block writes * but inode, sb and group updates are done only once */ #define EXT4_QUOTA_INIT_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_INIT_REWRITE) : 0) #define EXT4_QUOTA_DEL_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_DEL_REWRITE) : 0) #else #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 #endif #define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) /* * Ext4 handle operation types -- for logging purposes */ #define EXT4_HT_MISC 0 #define EXT4_HT_INODE 1 #define EXT4_HT_WRITE_PAGE 2 #define EXT4_HT_MAP_BLOCKS 3 #define EXT4_HT_DIR 4 #define EXT4_HT_TRUNCATE 5 #define EXT4_HT_QUOTA 6 #define EXT4_HT_RESIZE 7 #define EXT4_HT_MIGRATE 8 #define EXT4_HT_MOVE_EXTENTS 9 #define EXT4_HT_XATTR 10 #define EXT4_HT_EXT_CONVERT 11 #define EXT4_HT_MAX 12 int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); #define ext4_mark_inode_dirty(__h, __i) \ __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__) int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, const char *func, unsigned int line); int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc); /* * Wrapper functions with which ext4 calls into JBD. */ int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_forget(const char *where, unsigned int line, handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr); int __ext4_journal_get_create_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh); #define ext4_journal_get_write_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_write_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ __ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \ (bh), (block_nr)) #define ext4_journal_get_create_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_create_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_handle_dirty_metadata(handle, inode, bh) \ __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \ (bh)) handle_t *__ext4_journal_start_sb(struct inode *inode, struct super_block *sb, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds); int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096) /* Note: Do not use this for NULL handles. This is only to determine if * a properly allocated handle is using a journal or not. */ static inline int ext4_handle_valid(handle_t *handle) { if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT) return 0; return 1; } static inline void ext4_handle_sync(handle_t *handle) { if (ext4_handle_valid(handle)) handle->h_sync = 1; } static inline int ext4_handle_is_aborted(handle_t *handle) { if (ext4_handle_valid(handle)) return is_handle_aborted(handle); return 0; } static inline int ext4_free_metadata_revoke_credits(struct super_block *sb, int blocks) { /* Freeing each metadata block can result in freeing one cluster */ return blocks * EXT4_SB(sb)->s_cluster_ratio; } static inline int ext4_trans_default_revoke_credits(struct super_block *sb) { return ext4_free_metadata_revoke_credits(sb, 8); } #define ext4_journal_start_sb(sb, type, nblocks) \ __ext4_journal_start_sb(NULL, (sb), __LINE__, (type), (nblocks), 0,\ ext4_trans_default_revoke_credits(sb)) #define ext4_journal_start(inode, type, nblocks) \ __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\ __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \ __ext4_journal_start((inode), __LINE__, (type), (blocks), 0, \ (revoke_creds)) static inline handle_t *__ext4_journal_start(struct inode *inode, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds) { return __ext4_journal_start_sb(inode, inode->i_sb, line, type, blocks, rsv_blocks, revoke_creds); } #define ext4_journal_stop(handle) \ __ext4_journal_stop(__func__, __LINE__, (handle)) #define ext4_journal_start_reserved(handle, type) \ __ext4_journal_start_reserved((handle), __LINE__, (type)) handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, int type); static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); } static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2_journal_extend(handle, nblocks, revoke); return 0; } static inline int ext4_journal_restart(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS); return 0; } int __ext4_journal_ensure_credits(handle_t *handle, int check_cred, int extend_cred, int revoke_cred); /* * Ensure @handle has at least @check_creds credits available. If not, * transaction will be extended or restarted to contain at least @extend_cred * credits. Before restarting transaction @fn is executed to allow for cleanup * before the transaction is restarted. * * The return value is < 0 in case of error, 0 in case the handle has enough * credits or transaction extension succeeded, 1 in case transaction had to be * restarted. */ #define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred, \ revoke_cred, fn) \ ({ \ __label__ __ensure_end; \ int err = __ext4_journal_ensure_credits((handle), (check_cred), \ (extend_cred), (revoke_cred)); \ \ if (err <= 0) \ goto __ensure_end; \ err = (fn); \ if (err < 0) \ goto __ensure_end; \ err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \ if (err == 0) \ err = 1; \ __ensure_end: \ err; \ }) /* * Ensure given handle has at least requested amount of credits available, * possibly restarting transaction if needed. We also make sure the transaction * has space for at least ext4_trans_default_revoke_credits(sb) revoke records * as freeing one or two blocks is very common pattern and requesting this is * very cheap. */ static inline int ext4_journal_ensure_credits(handle_t *handle, int credits, int revoke_creds) { return ext4_journal_ensure_credits_fn(handle, credits, credits, revoke_creds, 0); } static inline int ext4_journal_blocks_per_folio(struct inode *inode) { if (EXT4_JOURNAL(inode) != NULL) return jbd2_journal_blocks_per_folio(inode); return 0; } static inline int ext4_journal_force_commit(journal_t *journal) { if (journal) return jbd2_journal_force_commit(journal); return 0; } static inline int ext4_jbd2_inode_add_write(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_write(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline int ext4_jbd2_inode_add_wait(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_wait(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline void ext4_update_inode_fsync_trans(handle_t *handle, struct inode *inode, int datasync) { struct ext4_inode_info *ei = EXT4_I(inode); if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) { ei->i_sync_tid = handle->h_transaction->t_tid; if (datasync) ei->i_datasync_tid = handle->h_transaction->t_tid; } } /* super.c */ int ext4_force_commit(struct super_block *sb); /* * Ext4 inode journal modes */ #define EXT4_INODE_JOURNAL_DATA_MODE 0x01 /* journal data mode */ #define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */ #define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */ int ext4_inode_journal_mode(struct inode *inode); static inline int ext4_should_journal_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE; } static inline int ext4_should_order_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE; } static inline int ext4_should_writeback_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE; } static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks) { if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) return 0; if (!ext4_should_journal_data(inode)) return 0; /* * Data blocks in one extent are contiguous, just account for partial * clusters at extent boundaries */ return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1); } /* * This function controls whether or not we should try to go down the * dioread_nolock code paths, which makes it safe to avoid taking * i_rwsem for direct I/O reads. This only works for extent-based * files, and it doesn't work if data journaling is enabled, since the * dioread_nolock code uses b_private to pass information back to the * I/O completion handler, and this conflicts with the jbd's use of * b_private. */ static inline int ext4_should_dioread_nolock(struct inode *inode) { if (!test_opt(inode->i_sb, DIOREAD_NOLOCK)) return 0; if (!S_ISREG(inode->i_mode)) return 0; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return 0; if (ext4_should_journal_data(inode)) return 0; /* temporary fix to prevent generic/422 test failures */ if (!test_opt(inode->i_sb, DELALLOC)) return 0; return 1; } /* * Pass journal explicitly as it may not be cached in the sbi->s_journal in some * cases */ static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal) { int err = 0; /* * At this point only two things can be operating on the journal. * JBD2 thread performing transaction commit and s_sb_upd_work * issuing sb update through the journal. Once we set * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not * queue s_sb_upd_work and ext4_force_commit() makes sure any * ext4_handle_error() calls from the running transaction commit are * finished. Hence no new s_sb_upd_work can be queued after we * flush it here. */ ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY); ext4_force_commit(sbi->s_sb); flush_work(&sbi->s_sb_upd_work); err = jbd2_journal_destroy(journal); sbi->s_journal = NULL; return err; } #endif /* _EXT4_JBD2_H */ |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 | /* SPDX-License-Identifier: GPL-2.0 */ /* atmdev.h - ATM device driver declarations and various related items */ #ifndef LINUX_ATMDEV_H #define LINUX_ATMDEV_H #include <linux/wait.h> /* wait_queue_head_t */ #include <linux/time.h> /* struct timeval */ #include <linux/net.h> #include <linux/bug.h> #include <linux/skbuff.h> /* struct sk_buff */ #include <linux/uio.h> #include <net/sock.h> #include <linux/atomic.h> #include <linux/refcount.h> #include <uapi/linux/atmdev.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> extern struct proc_dir_entry *atm_proc_root; #endif #ifdef CONFIG_COMPAT #include <linux/compat.h> struct compat_atm_iobuf { int length; compat_uptr_t buffer; }; #endif struct k_atm_aal_stats { #define __HANDLE_ITEM(i) atomic_t i __AAL_STAT_ITEMS #undef __HANDLE_ITEM }; struct k_atm_dev_stats { struct k_atm_aal_stats aal0; struct k_atm_aal_stats aal34; struct k_atm_aal_stats aal5; }; struct device; enum { ATM_VF_ADDR, /* Address is in use. Set by anybody, cleared by device driver. */ ATM_VF_READY, /* VC is ready to transfer data. Set by device driver, cleared by anybody. */ ATM_VF_PARTIAL, /* resources are bound to PVC (partial PVC setup), controlled by socket layer */ ATM_VF_REGIS, /* registered with demon, controlled by SVC socket layer */ ATM_VF_BOUND, /* local SAP is set, controlled by SVC socket layer */ ATM_VF_RELEASED, /* demon has indicated/requested release, controlled by SVC socket layer */ ATM_VF_HASQOS, /* QOS parameters have been set */ ATM_VF_LISTEN, /* socket is used for listening */ ATM_VF_META, /* SVC socket isn't used for normal data traffic and doesn't depend on signaling to be available */ ATM_VF_SESSION, /* VCC is p2mp session control descriptor */ ATM_VF_HASSAP, /* SAP has been set */ ATM_VF_CLOSE, /* asynchronous close - treat like VF_RELEASED*/ ATM_VF_WAITING, /* waiting for reply from sigd */ ATM_VF_IS_CLIP, /* in use by CLIP protocol */ }; #define ATM_VF2VS(flags) \ (test_bit(ATM_VF_READY,&(flags)) ? ATM_VS_CONNECTED : \ test_bit(ATM_VF_RELEASED,&(flags)) ? ATM_VS_CLOSING : \ test_bit(ATM_VF_LISTEN,&(flags)) ? ATM_VS_LISTEN : \ test_bit(ATM_VF_REGIS,&(flags)) ? ATM_VS_INUSE : \ test_bit(ATM_VF_BOUND,&(flags)) ? ATM_VS_BOUND : ATM_VS_IDLE) enum { ATM_DF_REMOVED, /* device was removed from atm_devs list */ }; #define ATM_PHY_SIG_LOST 0 /* no carrier/light */ #define ATM_PHY_SIG_UNKNOWN 1 /* carrier/light status is unknown */ #define ATM_PHY_SIG_FOUND 2 /* carrier/light okay */ #define ATM_ATMOPT_CLP 1 /* set CLP bit */ struct atm_vcc { /* struct sock has to be the first member of atm_vcc */ struct sock sk; unsigned long flags; /* VCC flags (ATM_VF_*) */ short vpi; /* VPI and VCI (types must be equal */ /* with sockaddr) */ int vci; unsigned long aal_options; /* AAL layer options */ unsigned long atm_options; /* ATM layer options */ struct atm_dev *dev; /* device back pointer */ struct atm_qos qos; /* QOS */ struct atm_sap sap; /* SAP */ void (*release_cb)(struct atm_vcc *vcc); /* release_sock callback */ void (*push)(struct atm_vcc *vcc,struct sk_buff *skb); void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */ int (*push_oam)(struct atm_vcc *vcc,void *cell); int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); void *dev_data; /* per-device data */ void *proto_data; /* per-protocol data */ struct k_atm_aal_stats *stats; /* pointer to AAL stats group */ struct module *owner; /* owner of ->push function */ /* SVC part --- may move later ------------------------------------- */ short itf; /* interface number */ struct sockaddr_atmsvc local; struct sockaddr_atmsvc remote; /* Multipoint part ------------------------------------------------- */ struct atm_vcc *session; /* session VCC descriptor */ /* Other stuff ----------------------------------------------------- */ void *user_back; /* user backlink - not touched by */ /* native ATM stack. Currently used */ /* by CLIP and sch_atm. */ }; static inline struct atm_vcc *atm_sk(struct sock *sk) { return (struct atm_vcc *)sk; } static inline struct atm_vcc *ATM_SD(struct socket *sock) { return atm_sk(sock->sk); } static inline struct sock *sk_atm(struct atm_vcc *vcc) { return (struct sock *)vcc; } struct atm_dev_addr { struct sockaddr_atmsvc addr; /* ATM address */ struct list_head entry; /* next address */ }; enum atm_addr_type_t { ATM_ADDR_LOCAL, ATM_ADDR_LECS }; struct atm_dev { const struct atmdev_ops *ops; /* device operations; NULL if unused */ const struct atmphy_ops *phy; /* PHY operations, may be undefined */ /* (NULL) */ const char *type; /* device type name */ int number; /* device index */ void *dev_data; /* per-device data */ void *phy_data; /* private PHY data */ unsigned long flags; /* device flags (ATM_DF_*) */ struct list_head local; /* local ATM addresses */ struct list_head lecs; /* LECS ATM addresses learned via ILMI */ unsigned char esi[ESI_LEN]; /* ESI ("MAC" addr) */ struct atm_cirange ci_range; /* VPI/VCI range */ struct k_atm_dev_stats stats; /* statistics */ char signal; /* signal status (ATM_PHY_SIG_*) */ int link_rate; /* link rate (default: OC3) */ refcount_t refcnt; /* reference count */ spinlock_t lock; /* protect internal members */ #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_entry; /* proc entry */ char *proc_name; /* proc entry name */ #endif struct device class_dev; /* sysfs device */ struct list_head dev_list; /* linkage */ }; /* OF: send_Oam Flags */ #define ATM_OF_IMMED 1 /* Attempt immediate delivery */ #define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ struct atmdev_ops { /* only send is required */ void (*dev_close)(struct atm_dev *dev); int (*open)(struct atm_vcc *vcc); void (*close)(struct atm_vcc *vcc); int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); #ifdef CONFIG_COMPAT int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, void __user *arg); #endif int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb); int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); void (*phy_put)(struct atm_dev *dev,unsigned char value, unsigned long addr); unsigned char (*phy_get)(struct atm_dev *dev,unsigned long addr); int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags); int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page); struct module *owner; }; struct atmphy_ops { int (*start)(struct atm_dev *dev); int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); void (*interrupt)(struct atm_dev *dev); int (*stop)(struct atm_dev *dev); }; struct atm_skb_data { struct atm_vcc *vcc; /* ATM VCC */ unsigned long atm_options; /* ATM layer options */ unsigned int acct_truesize; /* truesize accounted to vcc */ } __packed; #define VCC_HTABLE_SIZE 32 extern struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; extern rwlock_t vcc_sklist_lock; #define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb)) struct atm_dev *atm_dev_register(const char *type, struct device *parent, const struct atmdev_ops *ops, int number, /* -1 == pick first available */ unsigned long *flags); struct atm_dev *atm_dev_lookup(int number); void atm_dev_deregister(struct atm_dev *dev); /* atm_dev_signal_change * * Propagate lower layer signal change in atm_dev->signal to netdevice. * The event will be sent via a notifier call chain. */ void atm_dev_signal_change(struct atm_dev *dev, char signal); void vcc_insert_socket(struct sock *sk); void atm_dev_release_vccs(struct atm_dev *dev); static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb) { /* * Because ATM skbs may not belong to a sock (and we don't * necessarily want to), skb->truesize may be adjusted, * escaping the hack in pskb_expand_head() which avoids * doing so for some cases. So stash the value of truesize * at the time we accounted it, and atm_pop_raw() can use * that value later, in case it changes. */ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); ATM_SKB(skb)->acct_truesize = skb->truesize; ATM_SKB(skb)->atm_options = vcc->atm_options; } static inline void atm_return_tx(struct atm_vcc *vcc, struct sk_buff *skb) { WARN_ON_ONCE(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk_atm(vcc)->sk_wmem_alloc)); } static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc); } static inline void atm_return(struct atm_vcc *vcc,int truesize) { atomic_sub(truesize, &sk_atm(vcc)->sk_rmem_alloc); } static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) { return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < sk_atm(vcc)->sk_sndbuf; } static inline void atm_dev_hold(struct atm_dev *dev) { refcount_inc(&dev->refcnt); } static inline void atm_dev_put(struct atm_dev *dev) { if (refcount_dec_and_test(&dev->refcnt)) { BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); if (dev->ops->dev_close) dev->ops->dev_close(dev); put_device(&dev->class_dev); } } int atm_charge(struct atm_vcc *vcc,int truesize); struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, gfp_t gfp_flags); int atm_pcr_goal(const struct atm_trafprm *tp); void vcc_release_async(struct atm_vcc *vcc, int reply); struct atm_ioctl { struct module *owner; /* A module reference is kept if appropriate over this call. * Return -ENOIOCTLCMD if you don't handle it. */ int (*ioctl)(struct socket *, unsigned int cmd, unsigned long arg); struct list_head list; }; /** * register_atm_ioctl - register handler for ioctl operations * * Special (non-device) handlers of ioctl's should * register here. If you're a normal device, you should * set .ioctl in your atmdev_ops instead. */ void register_atm_ioctl(struct atm_ioctl *); /** * deregister_atm_ioctl - remove the ioctl handler */ void deregister_atm_ioctl(struct atm_ioctl *); /* register_atmdevice_notifier - register atm_dev notify events * * Clients like br2684 will register notify events * Currently we notify of signal found/lost */ int register_atmdevice_notifier(struct notifier_block *nb); void unregister_atmdevice_notifier(struct notifier_block *nb); #endif |
| 11 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000,2002-2005 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_BMAP_BTREE_H__ #define __XFS_BMAP_BTREE_H__ struct xfs_btree_cur; struct xfs_btree_block; struct xfs_mount; struct xfs_inode; struct xfs_trans; struct xbtree_ifakeroot; /* * Maximum number of bmap btree levels. */ #define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)]) /* * Prototypes for xfs_bmap.c to call. */ extern void xfs_bmdr_to_bmbt(struct xfs_inode *, xfs_bmdr_block_t *, int, struct xfs_btree_block *, int); void xfs_bmbt_disk_set_all(struct xfs_bmbt_rec *r, struct xfs_bmbt_irec *s); extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(const struct xfs_bmbt_rec *r); extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(const struct xfs_bmbt_rec *r); void xfs_bmbt_disk_get_all(const struct xfs_bmbt_rec *r, struct xfs_bmbt_irec *s); extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int, xfs_bmdr_block_t *, int); extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level); extern int xfs_bmdr_maxrecs(int blocklen, int leaf); unsigned int xfs_bmbt_maxrecs(struct xfs_mount *mp, unsigned int blocklen, bool leaf); extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, xfs_ino_t new_owner, struct list_head *buffer_list); extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, int); void xfs_bmbt_commit_staged_btree(struct xfs_btree_cur *cur, struct xfs_trans *tp, int whichfork); extern unsigned long long xfs_bmbt_calc_size(struct xfs_mount *mp, unsigned long long len); unsigned int xfs_bmbt_maxlevels_ondisk(void); int __init xfs_bmbt_init_cur_cache(void); void xfs_bmbt_destroy_cur_cache(void); void xfs_bmbt_init_block(struct xfs_inode *ip, struct xfs_btree_block *buf, struct xfs_buf *bp, __u16 level, __u16 numrecs); /* * Btree block header size depends on a superblock flag. */ static inline size_t xfs_bmbt_block_len(struct xfs_mount *mp) { return xfs_has_crc(mp) ? XFS_BTREE_LBLOCK_CRC_LEN : XFS_BTREE_LBLOCK_LEN; } /* Addresses of key, pointers, and records within an incore bmbt block. */ static inline struct xfs_bmbt_rec * xfs_bmbt_rec_addr( struct xfs_mount *mp, struct xfs_btree_block *block, unsigned int index) { return (struct xfs_bmbt_rec *) ((char *)block + xfs_bmbt_block_len(mp) + (index - 1) * sizeof(struct xfs_bmbt_rec)); } static inline struct xfs_bmbt_key * xfs_bmbt_key_addr( struct xfs_mount *mp, struct xfs_btree_block *block, unsigned int index) { return (struct xfs_bmbt_key *) ((char *)block + xfs_bmbt_block_len(mp) + (index - 1) * sizeof(struct xfs_bmbt_key *)); } static inline xfs_bmbt_ptr_t * xfs_bmbt_ptr_addr( struct xfs_mount *mp, struct xfs_btree_block *block, unsigned int index, unsigned int maxrecs) { return (xfs_bmbt_ptr_t *) ((char *)block + xfs_bmbt_block_len(mp) + maxrecs * sizeof(struct xfs_bmbt_key) + (index - 1) * sizeof(xfs_bmbt_ptr_t)); } /* Addresses of key, pointers, and records within an ondisk bmbt block. */ static inline struct xfs_bmbt_rec * xfs_bmdr_rec_addr( struct xfs_bmdr_block *block, unsigned int index) { return (struct xfs_bmbt_rec *) ((char *)(block + 1) + (index - 1) * sizeof(struct xfs_bmbt_rec)); } static inline struct xfs_bmbt_key * xfs_bmdr_key_addr( struct xfs_bmdr_block *block, unsigned int index) { return (struct xfs_bmbt_key *) ((char *)(block + 1) + (index - 1) * sizeof(struct xfs_bmbt_key)); } static inline xfs_bmbt_ptr_t * xfs_bmdr_ptr_addr( struct xfs_bmdr_block *block, unsigned int index, unsigned int maxrecs) { return (xfs_bmbt_ptr_t *) ((char *)(block + 1) + maxrecs * sizeof(struct xfs_bmbt_key) + (index - 1) * sizeof(xfs_bmbt_ptr_t)); } /* * Address of pointers within the incore btree root. * * These are to be used when we know the size of the block and * we don't have a cursor. */ static inline xfs_bmbt_ptr_t * xfs_bmap_broot_ptr_addr( struct xfs_mount *mp, struct xfs_btree_block *bb, unsigned int i, unsigned int sz) { return xfs_bmbt_ptr_addr(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, false)); } /* * Compute the space required for the incore btree root containing the given * number of records. */ static inline size_t xfs_bmap_broot_space_calc( struct xfs_mount *mp, unsigned int nrecs) { return xfs_bmbt_block_len(mp) + (nrecs * (sizeof(struct xfs_bmbt_key) + sizeof(xfs_bmbt_ptr_t))); } /* * Compute the space required for the incore btree root given the ondisk * btree root block. */ static inline size_t xfs_bmap_broot_space( struct xfs_mount *mp, struct xfs_bmdr_block *bb) { return xfs_bmap_broot_space_calc(mp, be16_to_cpu(bb->bb_numrecs)); } /* Compute the space required for the ondisk root block. */ static inline size_t xfs_bmdr_space_calc(unsigned int nrecs) { return sizeof(struct xfs_bmdr_block) + (nrecs * (sizeof(struct xfs_bmbt_key) + sizeof(xfs_bmbt_ptr_t))); } /* * Compute the space required for the ondisk root block given an incore root * block. */ static inline size_t xfs_bmap_bmdr_space(struct xfs_btree_block *bb) { return xfs_bmdr_space_calc(be16_to_cpu(bb->bb_numrecs)); } struct xfs_btree_block *xfs_bmap_broot_realloc(struct xfs_inode *ip, int whichfork, unsigned int new_numrecs); #endif /* __XFS_BMAP_BTREE_H__ */ |
| 3 3 2 3 3 1 1 1 2 2 1 1 1 1 1 1 7 6 4 2 2 1 2 4 4 7 5 5 5 1 4 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 1 1 69 3 2 1 3 6 7 6 5 6 49 72 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 | // SPDX-License-Identifier: GPL-2.0 #ifndef NO_BCACHEFS_FS #include "bcachefs.h" #include "chardev.h" #include "dirent.h" #include "fs.h" #include "fs-ioctl.h" #include "namei.h" #include "quota.h" #include <linux/compat.h> #include <linux/fsnotify.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/writeback.h> #define FS_IOC_GOINGDOWN _IOR('X', 125, __u32) #define FSOP_GOING_FLAGS_DEFAULT 0x0 /* going down */ #define FSOP_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */ #define FSOP_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */ static int bch2_reinherit_attrs_fn(struct btree_trans *trans, struct bch_inode_info *inode, struct bch_inode_unpacked *bi, void *p) { struct bch_inode_info *dir = p; return !bch2_reinherit_attrs(bi, &dir->ei_inode); } static int bch2_ioc_reinherit_attrs(struct bch_fs *c, struct file *file, struct bch_inode_info *src, const char __user *name) { struct bch_hash_info hash = bch2_hash_info_init(c, &src->ei_inode); struct bch_inode_info *dst; struct inode *vinode = NULL; char *kname = NULL; struct qstr qstr; int ret = 0; subvol_inum inum; kname = kmalloc(BCH_NAME_MAX, GFP_KERNEL); if (!kname) return -ENOMEM; ret = strncpy_from_user(kname, name, BCH_NAME_MAX); if (unlikely(ret < 0)) goto err1; qstr.len = ret; qstr.name = kname; ret = bch2_dirent_lookup(c, inode_inum(src), &hash, &qstr, &inum); if (ret) goto err1; vinode = bch2_vfs_inode_get(c, inum); ret = PTR_ERR_OR_ZERO(vinode); if (ret) goto err1; dst = to_bch_ei(vinode); ret = mnt_want_write_file(file); if (ret) goto err2; bch2_lock_inodes(INODE_UPDATE_LOCK, src, dst); if (inode_attr_changing(src, dst, Inode_opt_project)) { ret = bch2_fs_quota_transfer(c, dst, src->ei_qid, 1 << QTYP_PRJ, KEY_TYPE_QUOTA_PREALLOC); if (ret) goto err3; } ret = bch2_write_inode(c, dst, bch2_reinherit_attrs_fn, src, 0); err3: bch2_unlock_inodes(INODE_UPDATE_LOCK, src, dst); /* return true if we did work */ if (ret >= 0) ret = !ret; mnt_drop_write_file(file); err2: iput(vinode); err1: kfree(kname); return ret; } static int bch2_ioc_getversion(struct bch_inode_info *inode, u32 __user *arg) { return put_user(inode->v.i_generation, arg); } static int bch2_ioc_getlabel(struct bch_fs *c, char __user *user_label) { int ret; size_t len; char label[BCH_SB_LABEL_SIZE]; BUILD_BUG_ON(BCH_SB_LABEL_SIZE >= FSLABEL_MAX); mutex_lock(&c->sb_lock); memcpy(label, c->disk_sb.sb->label, BCH_SB_LABEL_SIZE); mutex_unlock(&c->sb_lock); len = strnlen(label, BCH_SB_LABEL_SIZE); if (len == BCH_SB_LABEL_SIZE) { bch_warn(c, "label is too long, return the first %zu bytes", --len); } ret = copy_to_user(user_label, label, len); return ret ? -EFAULT : 0; } static int bch2_ioc_setlabel(struct bch_fs *c, struct file *file, struct bch_inode_info *inode, const char __user *user_label) { int ret; char label[BCH_SB_LABEL_SIZE]; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(label, user_label, sizeof(label))) return -EFAULT; if (strnlen(label, BCH_SB_LABEL_SIZE) == BCH_SB_LABEL_SIZE) { bch_err(c, "unable to set label with more than %d bytes", BCH_SB_LABEL_SIZE - 1); return -EINVAL; } ret = mnt_want_write_file(file); if (ret) return ret; mutex_lock(&c->sb_lock); strscpy(c->disk_sb.sb->label, label, BCH_SB_LABEL_SIZE); ret = bch2_write_super(c); mutex_unlock(&c->sb_lock); mnt_drop_write_file(file); return ret; } static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) { u32 flags; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(flags, arg)) return -EFAULT; struct printbuf buf = PRINTBUF; bch2_log_msg_start(c, &buf); prt_printf(&buf, "shutdown by ioctl type %u", flags); switch (flags) { case FSOP_GOING_FLAGS_DEFAULT: ret = bdev_freeze(c->vfs_sb->s_bdev); if (ret) break; bch2_journal_flush(&c->journal); bch2_fs_emergency_read_only2(c, &buf); bdev_thaw(c->vfs_sb->s_bdev); break; case FSOP_GOING_FLAGS_LOGFLUSH: bch2_journal_flush(&c->journal); fallthrough; case FSOP_GOING_FLAGS_NOLOGFLUSH: bch2_fs_emergency_read_only2(c, &buf); break; default: ret = -EINVAL; goto noprint; } bch2_print_str(c, KERN_ERR, buf.buf); noprint: printbuf_exit(&buf); return ret; } static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, struct bch_ioctl_subvolume arg) { struct inode *dir; struct bch_inode_info *inode; struct user_namespace *s_user_ns; struct dentry *dst_dentry; struct path src_path, dst_path; int how = LOOKUP_FOLLOW; int error; subvol_inum snapshot_src = { 0 }; unsigned lookup_flags = 0; unsigned create_flags = BCH_CREATE_SUBVOL; if (arg.flags & ~(BCH_SUBVOL_SNAPSHOT_CREATE| BCH_SUBVOL_SNAPSHOT_RO)) return -EINVAL; if (!(arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) && (arg.src_ptr || (arg.flags & BCH_SUBVOL_SNAPSHOT_RO))) return -EINVAL; if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) create_flags |= BCH_CREATE_SNAPSHOT; if (arg.flags & BCH_SUBVOL_SNAPSHOT_RO) create_flags |= BCH_CREATE_SNAPSHOT_RO; if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) { /* sync_inodes_sb enforce s_umount is locked */ down_read(&c->vfs_sb->s_umount); sync_inodes_sb(c->vfs_sb); up_read(&c->vfs_sb->s_umount); } if (arg.src_ptr) { error = user_path_at(arg.dirfd, (const char __user *)(unsigned long)arg.src_ptr, how, &src_path); if (error) goto err1; if (src_path.dentry->d_sb->s_fs_info != c) { path_put(&src_path); error = -EXDEV; goto err1; } snapshot_src = inode_inum(to_bch_ei(src_path.dentry->d_inode)); } dst_dentry = user_path_create(arg.dirfd, (const char __user *)(unsigned long)arg.dst_ptr, &dst_path, lookup_flags); error = PTR_ERR_OR_ZERO(dst_dentry); if (error) goto err2; if (dst_dentry->d_sb->s_fs_info != c) { error = -EXDEV; goto err3; } if (dst_dentry->d_inode) { error = bch_err_throw(c, EEXIST_subvolume_create); goto err3; } dir = dst_path.dentry->d_inode; if (IS_DEADDIR(dir)) { error = bch_err_throw(c, ENOENT_directory_dead); goto err3; } s_user_ns = dir->i_sb->s_user_ns; if (!kuid_has_mapping(s_user_ns, current_fsuid()) || !kgid_has_mapping(s_user_ns, current_fsgid())) { error = -EOVERFLOW; goto err3; } error = inode_permission(file_mnt_idmap(filp), dir, MAY_WRITE | MAY_EXEC); if (error) goto err3; if (!IS_POSIXACL(dir)) arg.mode &= ~current_umask(); error = security_path_mkdir(&dst_path, dst_dentry, arg.mode); if (error) goto err3; if ((arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) && !arg.src_ptr) snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol; down_write(&c->snapshot_create_lock); inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir), dst_dentry, arg.mode|S_IFDIR, 0, snapshot_src, create_flags); up_write(&c->snapshot_create_lock); error = PTR_ERR_OR_ZERO(inode); if (error) goto err3; d_instantiate(dst_dentry, &inode->v); fsnotify_mkdir(dir, dst_dentry); err3: done_path_create(&dst_path, dst_dentry); err2: if (arg.src_ptr) path_put(&src_path); err1: return error; } static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp, struct bch_ioctl_subvolume arg) { const char __user *name = (void __user *)(unsigned long)arg.dst_ptr; struct path path; struct inode *dir; struct dentry *victim; int ret = 0; if (arg.flags) return -EINVAL; victim = user_path_locked_at(arg.dirfd, name, &path); if (IS_ERR(victim)) return PTR_ERR(victim); dir = d_inode(path.dentry); if (victim->d_sb->s_fs_info != c) { ret = -EXDEV; goto err; } ret = inode_permission(file_mnt_idmap(filp), d_inode(victim), MAY_WRITE) ?: __bch2_unlink(dir, victim, true); if (!ret) { fsnotify_rmdir(dir, victim); d_invalidate(victim); } err: inode_unlock(dir); dput(victim); path_put(&path); return ret; } long bch2_fs_file_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; long ret; switch (cmd) { case BCHFS_IOC_REINHERIT_ATTRS: ret = bch2_ioc_reinherit_attrs(c, file, inode, (void __user *) arg); break; case FS_IOC_GETVERSION: ret = bch2_ioc_getversion(inode, (u32 __user *) arg); break; case FS_IOC_SETVERSION: ret = -ENOTTY; break; case FS_IOC_GETFSLABEL: ret = bch2_ioc_getlabel(c, (void __user *) arg); break; case FS_IOC_SETFSLABEL: ret = bch2_ioc_setlabel(c, file, inode, (const void __user *) arg); break; case FS_IOC_GOINGDOWN: ret = bch2_ioc_goingdown(c, (u32 __user *) arg); break; case BCH_IOCTL_SUBVOLUME_CREATE: { struct bch_ioctl_subvolume i; ret = copy_from_user(&i, (void __user *) arg, sizeof(i)) ? -EFAULT : bch2_ioctl_subvolume_create(c, file, i); break; } case BCH_IOCTL_SUBVOLUME_DESTROY: { struct bch_ioctl_subvolume i; ret = copy_from_user(&i, (void __user *) arg, sizeof(i)) ? -EFAULT : bch2_ioctl_subvolume_destroy(c, file, i); break; } default: ret = bch2_fs_ioctl(c, cmd, (void __user *) arg); break; } return bch2_err_class(ret); } #ifdef CONFIG_COMPAT long bch2_compat_fs_ioctl(struct file *file, unsigned cmd, unsigned long arg) { /* These are just misnamed, they actually get/put from/to user an int */ switch (cmd) { case FS_IOC32_GETFLAGS: cmd = FS_IOC_GETFLAGS; break; case FS_IOC32_SETFLAGS: cmd = FS_IOC_SETFLAGS; break; case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; case FS_IOC_GETFSLABEL: case FS_IOC_SETFSLABEL: break; default: return -ENOIOCTLCMD; } return bch2_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif #endif /* NO_BCACHEFS_FS */ |
| 1 1 1 1 1 2 1 1 1 1 1 1 1 1 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 | // SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for "8 devices" USB2CAN converter * * Copyright (C) 2012 Bernd Krumboeck (krumboeck@universalnet.at) * * This driver is inspired by the 3.2.0 version of drivers/net/can/usb/ems_usb.c * and drivers/net/can/usb/esd_usb2.c * * Many thanks to Gerhard Bertelsmann (info@gerhard-bertelsmann.de) * for testing and fixing this driver. Also many thanks to "8 devices", * who were very cooperative and answered my questions. */ #include <linux/ethtool.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> /* driver constants */ #define MAX_RX_URBS 20 #define MAX_TX_URBS 20 #define RX_BUFFER_SIZE 64 /* vendor and product id */ #define USB_8DEV_VENDOR_ID 0x0483 #define USB_8DEV_PRODUCT_ID 0x1234 /* endpoints */ enum usb_8dev_endpoint { USB_8DEV_ENDP_DATA_RX = 1, USB_8DEV_ENDP_DATA_TX, USB_8DEV_ENDP_CMD_RX, USB_8DEV_ENDP_CMD_TX }; /* device CAN clock */ #define USB_8DEV_ABP_CLOCK 32000000 /* setup flags */ #define USB_8DEV_SILENT 0x01 #define USB_8DEV_LOOPBACK 0x02 #define USB_8DEV_DISABLE_AUTO_RESTRANS 0x04 #define USB_8DEV_STATUS_FRAME 0x08 /* commands */ enum usb_8dev_cmd { USB_8DEV_RESET = 1, USB_8DEV_OPEN, USB_8DEV_CLOSE, USB_8DEV_SET_SPEED, USB_8DEV_SET_MASK_FILTER, USB_8DEV_GET_STATUS, USB_8DEV_GET_STATISTICS, USB_8DEV_GET_SERIAL, USB_8DEV_GET_SOFTW_VER, USB_8DEV_GET_HARDW_VER, USB_8DEV_RESET_TIMESTAMP, USB_8DEV_GET_SOFTW_HARDW_VER }; /* command options */ #define USB_8DEV_BAUD_MANUAL 0x09 #define USB_8DEV_CMD_START 0x11 #define USB_8DEV_CMD_END 0x22 #define USB_8DEV_CMD_SUCCESS 0 #define USB_8DEV_CMD_ERROR 255 #define USB_8DEV_CMD_TIMEOUT 1000 /* frames */ #define USB_8DEV_DATA_START 0x55 #define USB_8DEV_DATA_END 0xAA #define USB_8DEV_TYPE_CAN_FRAME 0 #define USB_8DEV_TYPE_ERROR_FRAME 3 #define USB_8DEV_EXTID 0x01 #define USB_8DEV_RTR 0x02 #define USB_8DEV_ERR_FLAG 0x04 /* status */ #define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */ #define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occurred when sending */ #define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */ #define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */ #define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */ #define USB_8DEV_STATUSMSG_STUFF 0x20 /* Stuff Error */ #define USB_8DEV_STATUSMSG_FORM 0x21 /* Form Error */ #define USB_8DEV_STATUSMSG_ACK 0x23 /* Ack Error */ #define USB_8DEV_STATUSMSG_BIT0 0x24 /* Bit1 Error */ #define USB_8DEV_STATUSMSG_BIT1 0x25 /* Bit0 Error */ #define USB_8DEV_STATUSMSG_CRC 0x27 /* CRC Error */ #define USB_8DEV_RP_MASK 0x7F /* Mask for Receive Error Bit */ /* table of devices that work with this driver */ static const struct usb_device_id usb_8dev_table[] = { { USB_DEVICE(USB_8DEV_VENDOR_ID, USB_8DEV_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_8dev_table); struct usb_8dev_tx_urb_context { struct usb_8dev_priv *priv; u32 echo_index; }; /* Structure to hold all of our device specific stuff */ struct usb_8dev_priv { struct can_priv can; /* must be the first member */ struct usb_device *udev; struct net_device *netdev; atomic_t active_tx_urbs; struct usb_anchor tx_submitted; struct usb_8dev_tx_urb_context tx_contexts[MAX_TX_URBS]; struct usb_anchor rx_submitted; struct can_berr_counter bec; u8 *cmd_msg_buffer; struct mutex usb_8dev_cmd_lock; void *rxbuf[MAX_RX_URBS]; dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; /* tx frame */ struct __packed usb_8dev_tx_msg { u8 begin; u8 flags; /* RTR and EXT_ID flag */ __be32 id; /* upper 3 bits not used */ u8 dlc; /* data length code 0-8 bytes */ u8 data[8]; /* 64-bit data */ u8 end; }; /* rx frame */ struct __packed usb_8dev_rx_msg { u8 begin; u8 type; /* frame type */ u8 flags; /* RTR and EXT_ID flag */ __be32 id; /* upper 3 bits not used */ u8 dlc; /* data length code 0-8 bytes */ u8 data[8]; /* 64-bit data */ __be32 timestamp; /* 32-bit timestamp */ u8 end; }; /* command frame */ struct __packed usb_8dev_cmd_msg { u8 begin; u8 channel; /* unknown - always 0 */ u8 command; /* command to execute */ u8 opt1; /* optional parameter / return value */ u8 opt2; /* optional parameter 2 */ u8 data[10]; /* optional parameter and data */ u8 end; }; static int usb_8dev_send_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size) { int actual_length; return usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_TX), msg, size, &actual_length, USB_8DEV_CMD_TIMEOUT); } static int usb_8dev_wait_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size, int *actual_length) { return usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_RX), msg, size, actual_length, USB_8DEV_CMD_TIMEOUT); } /* Send command to device and receive result. * Command was successful when opt1 = 0. */ static int usb_8dev_send_cmd(struct usb_8dev_priv *priv, struct usb_8dev_cmd_msg *out, struct usb_8dev_cmd_msg *in) { int err; int num_bytes_read; struct net_device *netdev; netdev = priv->netdev; out->begin = USB_8DEV_CMD_START; out->end = USB_8DEV_CMD_END; mutex_lock(&priv->usb_8dev_cmd_lock); memcpy(priv->cmd_msg_buffer, out, sizeof(struct usb_8dev_cmd_msg)); err = usb_8dev_send_cmd_msg(priv, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg)); if (err < 0) { netdev_err(netdev, "sending command message failed\n"); goto failed; } err = usb_8dev_wait_cmd_msg(priv, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg), &num_bytes_read); if (err < 0) { netdev_err(netdev, "no command message answer\n"); goto failed; } memcpy(in, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg)); if (in->begin != USB_8DEV_CMD_START || in->end != USB_8DEV_CMD_END || num_bytes_read != 16 || in->opt1 != 0) err = -EPROTO; failed: mutex_unlock(&priv->usb_8dev_cmd_lock); return err; } /* Send open command to device */ static int usb_8dev_cmd_open(struct usb_8dev_priv *priv) { struct can_bittiming *bt = &priv->can.bittiming; struct usb_8dev_cmd_msg outmsg; struct usb_8dev_cmd_msg inmsg; u32 ctrlmode = priv->can.ctrlmode; u32 flags = USB_8DEV_STATUS_FRAME; __be32 beflags; __be16 bebrp; memset(&outmsg, 0, sizeof(outmsg)); outmsg.command = USB_8DEV_OPEN; outmsg.opt1 = USB_8DEV_BAUD_MANUAL; outmsg.data[0] = bt->prop_seg + bt->phase_seg1; outmsg.data[1] = bt->phase_seg2; outmsg.data[2] = bt->sjw; /* BRP */ bebrp = cpu_to_be16((u16)bt->brp); memcpy(&outmsg.data[3], &bebrp, sizeof(bebrp)); /* flags */ if (ctrlmode & CAN_CTRLMODE_LOOPBACK) flags |= USB_8DEV_LOOPBACK; if (ctrlmode & CAN_CTRLMODE_LISTENONLY) flags |= USB_8DEV_SILENT; if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) flags |= USB_8DEV_DISABLE_AUTO_RESTRANS; beflags = cpu_to_be32(flags); memcpy(&outmsg.data[5], &beflags, sizeof(beflags)); return usb_8dev_send_cmd(priv, &outmsg, &inmsg); } /* Send close command to device */ static int usb_8dev_cmd_close(struct usb_8dev_priv *priv) { struct usb_8dev_cmd_msg inmsg; struct usb_8dev_cmd_msg outmsg = { .channel = 0, .command = USB_8DEV_CLOSE, .opt1 = 0, .opt2 = 0 }; return usb_8dev_send_cmd(priv, &outmsg, &inmsg); } /* Get firmware and hardware version */ static int usb_8dev_cmd_version(struct usb_8dev_priv *priv, u32 *res) { struct usb_8dev_cmd_msg inmsg; struct usb_8dev_cmd_msg outmsg = { .channel = 0, .command = USB_8DEV_GET_SOFTW_HARDW_VER, .opt1 = 0, .opt2 = 0 }; int err = usb_8dev_send_cmd(priv, &outmsg, &inmsg); if (err) return err; *res = be32_to_cpup((__be32 *)inmsg.data); return err; } /* Set network device mode * * Maybe we should leave this function empty, because the device * set mode variable with open command. */ static int usb_8dev_set_mode(struct net_device *netdev, enum can_mode mode) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err = 0; switch (mode) { case CAN_MODE_START: err = usb_8dev_cmd_open(priv); if (err) netdev_warn(netdev, "couldn't start device"); break; default: return -EOPNOTSUPP; } return err; } /* Read error/status frames */ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, struct usb_8dev_rx_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; /* Error message: * byte 0: Status * byte 1: bit 7: Receive Passive * byte 1: bit 0-6: Receive Error Counter * byte 2: Transmit Error Counter * byte 3: Always 0 (maybe reserved for future use) */ u8 state = msg->data[0]; u8 rxerr = msg->data[1] & USB_8DEV_RP_MASK; u8 txerr = msg->data[2]; int rx_errors = 0; int tx_errors = 0; skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) return; switch (state) { case USB_8DEV_STATUSMSG_OK: priv->can.state = CAN_STATE_ERROR_ACTIVE; cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; break; case USB_8DEV_STATUSMSG_BUSOFF: priv->can.state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; priv->can.can_stats.bus_off++; can_bus_off(priv->netdev); break; case USB_8DEV_STATUSMSG_OVERRUN: case USB_8DEV_STATUSMSG_BUSLIGHT: case USB_8DEV_STATUSMSG_BUSHEAVY: cf->can_id |= CAN_ERR_CRTL; break; default: priv->can.state = CAN_STATE_ERROR_WARNING; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; priv->can.can_stats.bus_error++; break; } switch (state) { case USB_8DEV_STATUSMSG_OK: case USB_8DEV_STATUSMSG_BUSOFF: break; case USB_8DEV_STATUSMSG_ACK: cf->can_id |= CAN_ERR_ACK; tx_errors = 1; break; case USB_8DEV_STATUSMSG_CRC: cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT0: cf->data[2] |= CAN_ERR_PROT_BIT0; tx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT1: cf->data[2] |= CAN_ERR_PROT_BIT1; tx_errors = 1; break; case USB_8DEV_STATUSMSG_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; rx_errors = 1; break; case USB_8DEV_STATUSMSG_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; rx_errors = 1; break; case USB_8DEV_STATUSMSG_OVERRUN: cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BUSLIGHT: priv->can.state = CAN_STATE_ERROR_WARNING; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; priv->can.can_stats.error_warning++; break; case USB_8DEV_STATUSMSG_BUSHEAVY: priv->can.state = CAN_STATE_ERROR_PASSIVE; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; priv->can.can_stats.error_passive++; break; default: netdev_warn(priv->netdev, "Unknown status/error message (%d)\n", state); break; } if (tx_errors) { cf->data[2] |= CAN_ERR_PROT_TX; stats->tx_errors++; } if (rx_errors) stats->rx_errors++; if (priv->can.state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; netif_rx(skb); } /* Read data and status frames */ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, struct usb_8dev_rx_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; if (msg->type == USB_8DEV_TYPE_ERROR_FRAME && msg->flags == USB_8DEV_ERR_FLAG) { usb_8dev_rx_err_msg(priv, msg); } else if (msg->type == USB_8DEV_TYPE_CAN_FRAME) { skb = alloc_can_skb(priv->netdev, &cf); if (!skb) return; cf->can_id = be32_to_cpu(msg->id); can_frame_set_cc_len(cf, msg->dlc & 0xF, priv->can.ctrlmode); if (msg->flags & USB_8DEV_EXTID) cf->can_id |= CAN_EFF_FLAG; if (msg->flags & USB_8DEV_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, msg->data, cf->len); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } else { netdev_warn(priv->netdev, "frame type %d unknown", msg->type); } } /* Callback for reading data from device * * Check urb status, call read function and resubmit urb read operation. */ static void usb_8dev_read_bulk_callback(struct urb *urb) { struct usb_8dev_priv *priv = urb->context; struct net_device *netdev; int retval; int pos = 0; netdev = priv->netdev; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } while (pos < urb->actual_length) { struct usb_8dev_rx_msg *msg; if (pos + sizeof(struct usb_8dev_rx_msg) > urb->actual_length) { netdev_err(priv->netdev, "format error\n"); break; } msg = (struct usb_8dev_rx_msg *)(urb->transfer_buffer + pos); usb_8dev_rx_can_msg(priv, msg); pos += sizeof(struct usb_8dev_rx_msg); } resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), urb->transfer_buffer, RX_BUFFER_SIZE, usb_8dev_read_bulk_callback, priv); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) netif_device_detach(netdev); else if (retval) netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", retval); } /* Callback handler for write operations * * Free allocated buffers, check transmit status and * calculate statistic. */ static void usb_8dev_write_bulk_callback(struct urb *urb) { struct usb_8dev_tx_urb_context *context = urb->context; struct usb_8dev_priv *priv; struct net_device *netdev; BUG_ON(!context); priv = context->priv; netdev = priv->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); atomic_dec(&priv->active_tx_urbs); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); /* Release context */ context->echo_index = MAX_TX_URBS; netif_wake_queue(netdev); } /* Send data to device */ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *) skb->data; struct usb_8dev_tx_msg *msg; struct urb *urb; struct usb_8dev_tx_urb_context *context = NULL; u8 *buf; int i, err; size_t size = sizeof(struct usb_8dev_tx_msg); if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto nomem; buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); goto nomembuf; } memset(buf, 0, size); msg = (struct usb_8dev_tx_msg *)buf; msg->begin = USB_8DEV_DATA_START; msg->flags = 0x00; if (cf->can_id & CAN_RTR_FLAG) msg->flags |= USB_8DEV_RTR; if (cf->can_id & CAN_EFF_FLAG) msg->flags |= USB_8DEV_EXTID; msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK); msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); memcpy(msg->data, cf->data, cf->len); msg->end = USB_8DEV_DATA_END; for (i = 0; i < MAX_TX_URBS; i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &priv->tx_contexts[i]; break; } } /* May never happen! When this happens we'd more URBs in flight as * allowed (MAX_TX_URBS). */ if (!context) goto nofreecontext; context->priv = priv; context->echo_index = i; usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX), buf, size, usb_8dev_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&priv->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); atomic_dec(&priv->active_tx_urbs); if (err == -ENODEV) netif_device_detach(netdev); else netdev_warn(netdev, "failed tx_urb %d\n", err); stats->tx_dropped++; } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) /* Slow down tx path */ netif_stop_queue(netdev); /* Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; nofreecontext: usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context"); return NETDEV_TX_BUSY; nomembuf: usb_free_urb(urb); nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } static int usb_8dev_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct usb_8dev_priv *priv = netdev_priv(netdev); bec->txerr = priv->bec.txerr; bec->rxerr = priv->bec.rxerr; return 0; } /* Start USB device */ static int usb_8dev_start(struct usb_8dev_priv *priv) { struct net_device *netdev = priv->netdev; int err, i; for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), buf, RX_BUFFER_SIZE, usb_8dev_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); usb_free_urb(urb); break; } priv->rxbuf[i] = buf; priv->rxbuf_dma[i] = buf_dma; /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); err = usb_8dev_cmd_open(priv); if (err) goto failed; priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; } /* Open USB device */ static int usb_8dev_open(struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = usb_8dev_start(priv); if (err) { if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } static void unlink_all_urbs(struct usb_8dev_priv *priv) { int i; usb_kill_anchored_urbs(&priv->rx_submitted); for (i = 0; i < MAX_RX_URBS; ++i) usb_free_coherent(priv->udev, RX_BUFFER_SIZE, priv->rxbuf[i], priv->rxbuf_dma[i]); usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; } /* Close USB device */ static int usb_8dev_close(struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err = 0; /* Send CLOSE command to CAN controller */ err = usb_8dev_cmd_close(priv); if (err) netdev_warn(netdev, "couldn't stop device"); priv->can.state = CAN_STATE_STOPPED; netif_stop_queue(netdev); /* Stop polling */ unlink_all_urbs(priv); close_candev(netdev); return err; } static const struct net_device_ops usb_8dev_netdev_ops = { .ndo_open = usb_8dev_open, .ndo_stop = usb_8dev_close, .ndo_start_xmit = usb_8dev_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops usb_8dev_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const usb_8dev_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; /* Probe USB device * * Check device and firmware. * Set supported modes and bittiming constants. * Allocate some memory. */ static int usb_8dev_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct net_device *netdev; struct usb_8dev_priv *priv; int i, err = -ENOMEM; u32 version; char buf[18]; struct usb_device *usbdev = interface_to_usbdev(intf); /* product id looks strange, better we also check iProduct string */ if (usb_string(usbdev, usbdev->descriptor.iProduct, buf, sizeof(buf)) > 0 && strcmp(buf, "USB2CAN converter")) { dev_info(&usbdev->dev, "ignoring: not an USB2CAN converter\n"); return -ENODEV; } netdev = alloc_candev(sizeof(struct usb_8dev_priv), MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't alloc candev\n"); return -ENOMEM; } priv = netdev_priv(netdev); priv->udev = usbdev; priv->netdev = netdev; priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = USB_8DEV_ABP_CLOCK; priv->can.bittiming_const = &usb_8dev_bittiming_const; priv->can.do_set_mode = usb_8dev_set_mode; priv->can.do_get_berr_counter = usb_8dev_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC; netdev->netdev_ops = &usb_8dev_netdev_ops; netdev->ethtool_ops = &usb_8dev_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&priv->rx_submitted); init_usb_anchor(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), GFP_KERNEL); if (!priv->cmd_msg_buffer) goto cleanup_candev; usb_set_intfdata(intf, priv); SET_NETDEV_DEV(netdev, &intf->dev); mutex_init(&priv->usb_8dev_cmd_lock); err = register_candev(netdev); if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); goto cleanup_candev; } err = usb_8dev_cmd_version(priv, &version); if (err) { netdev_err(netdev, "can't get firmware version\n"); goto cleanup_unregister_candev; } else { netdev_info(netdev, "firmware: %d.%d, hardware: %d.%d\n", (version>>24) & 0xff, (version>>16) & 0xff, (version>>8) & 0xff, version & 0xff); } return 0; cleanup_unregister_candev: unregister_netdev(priv->netdev); cleanup_candev: free_candev(netdev); return err; } /* Called by the usb core when driver is unloaded or device is removed */ static void usb_8dev_disconnect(struct usb_interface *intf) { struct usb_8dev_priv *priv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (priv) { netdev_info(priv->netdev, "device disconnected\n"); unregister_netdev(priv->netdev); unlink_all_urbs(priv); free_candev(priv->netdev); } } static struct usb_driver usb_8dev_driver = { .name = KBUILD_MODNAME, .probe = usb_8dev_probe, .disconnect = usb_8dev_disconnect, .id_table = usb_8dev_table, }; module_usb_driver(usb_8dev_driver); MODULE_AUTHOR("Bernd Krumboeck <krumboeck@universalnet.at>"); MODULE_DESCRIPTION("CAN driver for 8 devices USB2CAN interfaces"); MODULE_LICENSE("GPL v2"); |
| 87 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NetLabel NETLINK Interface * * This file defines the NETLINK interface for the NetLabel system. The * NetLabel system manages static and dynamic label mappings for network * protocols such as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 */ #ifndef _NETLABEL_USER_H #define _NETLABEL_USER_H #include <linux/types.h> #include <linux/skbuff.h> #include <linux/capability.h> #include <linux/audit.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/netlabel.h> /* NetLabel NETLINK helper functions */ /** * netlbl_netlink_auditinfo - Fetch the audit information from a NETLINK msg * @audit_info: NetLabel audit information */ static inline void netlbl_netlink_auditinfo(struct netlbl_audit *audit_info) { security_current_getlsmprop_subj(&audit_info->prop); audit_info->loginuid = audit_get_loginuid(current); audit_info->sessionid = audit_get_sessionid(current); } /* NetLabel NETLINK I/O functions */ int netlbl_netlink_init(void); /* NetLabel Audit Functions */ struct audit_buffer *netlbl_audit_start_common(int type, struct netlbl_audit *audit_info); #endif |
| 184 185 185 185 185 96 22 22 7 7 7 7 7 3 3 3 3 3 3 114 115 115 115 115 115 5 5 5 2 2 3 5 132 132 129 3 3 2 3 132 9 9 9 9 7 6 6 1 6 6 2 2 4 2 9 4 1 1 4 4 4 4 3 4 3 5 5 5 5 1 1 1 5 5 5 5 5 5 132 132 130 132 9 9 9 4 1 116 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 | // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "acl.h" #include "bkey_methods.h" #include "btree_update.h" #include "extents.h" #include "fs.h" #include "rebalance.h" #include "str_hash.h" #include "xattr.h" #include <linux/dcache.h> #include <linux/posix_acl_xattr.h> #include <linux/xattr.h> static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned); static u64 bch2_xattr_hash(const struct bch_hash_info *info, const struct xattr_search_key *key) { struct bch_str_hash_ctx ctx; bch2_str_hash_init(&ctx, info); bch2_str_hash_update(&ctx, info, &key->type, sizeof(key->type)); bch2_str_hash_update(&ctx, info, key->name.name, key->name.len); return bch2_str_hash_end(&ctx, info); } static u64 xattr_hash_key(const struct bch_hash_info *info, const void *key) { return bch2_xattr_hash(info, key); } static u64 xattr_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k) { struct bkey_s_c_xattr x = bkey_s_c_to_xattr(k); return bch2_xattr_hash(info, &X_SEARCH(x.v->x_type, x.v->x_name_and_value, x.v->x_name_len)); } static bool xattr_cmp_key(struct bkey_s_c _l, const void *_r) { struct bkey_s_c_xattr l = bkey_s_c_to_xattr(_l); const struct xattr_search_key *r = _r; return l.v->x_type != r->type || l.v->x_name_len != r->name.len || memcmp(l.v->x_name_and_value, r->name.name, r->name.len); } static bool xattr_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r) { struct bkey_s_c_xattr l = bkey_s_c_to_xattr(_l); struct bkey_s_c_xattr r = bkey_s_c_to_xattr(_r); return l.v->x_type != r.v->x_type || l.v->x_name_len != r.v->x_name_len || memcmp(l.v->x_name_and_value, r.v->x_name_and_value, r.v->x_name_len); } const struct bch_hash_desc bch2_xattr_hash_desc = { .btree_id = BTREE_ID_xattrs, .key_type = KEY_TYPE_xattr, .hash_key = xattr_hash_key, .hash_bkey = xattr_hash_bkey, .cmp_key = xattr_cmp_key, .cmp_bkey = xattr_cmp_bkey, }; int bch2_xattr_validate(struct bch_fs *c, struct bkey_s_c k, struct bkey_validate_context from) { struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k); unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len, le16_to_cpu(xattr.v->x_val_len)); int ret = 0; bkey_fsck_err_on(bkey_val_u64s(k.k) < val_u64s, c, xattr_val_size_too_small, "value too small (%zu < %u)", bkey_val_u64s(k.k), val_u64s); /* XXX why +4 ? */ val_u64s = xattr_val_u64s(xattr.v->x_name_len, le16_to_cpu(xattr.v->x_val_len) + 4); bkey_fsck_err_on(bkey_val_u64s(k.k) > val_u64s, c, xattr_val_size_too_big, "value too big (%zu > %u)", bkey_val_u64s(k.k), val_u64s); bkey_fsck_err_on(!bch2_xattr_type_to_handler(xattr.v->x_type), c, xattr_invalid_type, "invalid type (%u)", xattr.v->x_type); bkey_fsck_err_on(memchr(xattr.v->x_name_and_value, '\0', xattr.v->x_name_len), c, xattr_name_invalid_chars, "xattr name has invalid characters"); fsck_err: return ret; } void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { const struct xattr_handler *handler; struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k); handler = bch2_xattr_type_to_handler(xattr.v->x_type); if (handler && handler->prefix) prt_printf(out, "%s", handler->prefix); else if (handler) prt_printf(out, "(type %u)", xattr.v->x_type); else prt_printf(out, "(unknown type %u)", xattr.v->x_type); unsigned name_len = xattr.v->x_name_len; unsigned val_len = le16_to_cpu(xattr.v->x_val_len); unsigned max_name_val_bytes = bkey_val_bytes(xattr.k) - offsetof(struct bch_xattr, x_name_and_value); val_len = min_t(int, val_len, max_name_val_bytes - name_len); name_len = min(name_len, max_name_val_bytes); prt_printf(out, "%.*s:%.*s", name_len, xattr.v->x_name_and_value, val_len, (char *) xattr_val(xattr.v)); if (xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS || xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT) { prt_char(out, ' '); bch2_acl_to_text(out, xattr_val(xattr.v), le16_to_cpu(xattr.v->x_val_len)); } } static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info *inode, const char *name, void *buffer, size_t size, int type) { struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode); struct xattr_search_key search = X_SEARCH(type, name, strlen(name)); struct btree_iter iter; struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash, inode_inum(inode), &search, 0); int ret = bkey_err(k); if (ret) return ret; struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k); ret = le16_to_cpu(xattr.v->x_val_len); if (buffer) { if (ret > size) ret = -ERANGE; else memcpy(buffer, xattr_val(xattr.v), ret); } bch2_trans_iter_exit(trans, &iter); return ret; } int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum, struct bch_inode_unpacked *inode_u, const struct bch_hash_info *hash_info, const char *name, const void *value, size_t size, int type, int flags) { struct bch_fs *c = trans->c; struct btree_iter inode_iter = {}; int ret; ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?: bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent); if (ret) return ret; /* * Besides the ctime update, extents, dirents and xattrs updates require * that an inode update also happens - to ensure that if a key exists in * one of those btrees with a given snapshot ID an inode is also present */ inode_u->bi_ctime = bch2_current_time(c); ret = bch2_inode_write(trans, &inode_iter, inode_u); bch2_trans_iter_exit(trans, &inode_iter); if (ret) return ret; if (value) { struct bkey_i_xattr *xattr; unsigned namelen = strlen(name); unsigned u64s = BKEY_U64s + xattr_val_u64s(namelen, size); if (u64s > U8_MAX) return -ERANGE; xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64)); if (IS_ERR(xattr)) return PTR_ERR(xattr); bkey_xattr_init(&xattr->k_i); xattr->k.u64s = u64s; xattr->v.x_type = type; xattr->v.x_name_len = namelen; xattr->v.x_val_len = cpu_to_le16(size); memcpy(xattr->v.x_name_and_value, name, namelen); memcpy(xattr_val(&xattr->v), value, size); ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info, inum, &xattr->k_i, (flags & XATTR_CREATE ? STR_HASH_must_create : 0)| (flags & XATTR_REPLACE ? STR_HASH_must_replace : 0)); } else { struct xattr_search_key search = X_SEARCH(type, name, strlen(name)); ret = bch2_hash_delete(trans, bch2_xattr_hash_desc, hash_info, inum, &search); } if (bch2_err_matches(ret, ENOENT)) ret = flags & XATTR_REPLACE ? -ENODATA : 0; return ret; } struct xattr_buf { char *buf; size_t len; size_t used; }; static int __bch2_xattr_emit(const char *prefix, const char *name, size_t name_len, struct xattr_buf *buf) { const size_t prefix_len = strlen(prefix); const size_t total_len = prefix_len + name_len + 1; if (buf->buf) { if (buf->used + total_len > buf->len) return -ERANGE; memcpy(buf->buf + buf->used, prefix, prefix_len); memcpy(buf->buf + buf->used + prefix_len, name, name_len); buf->buf[buf->used + prefix_len + name_len] = '\0'; } buf->used += total_len; return 0; } static inline const char *bch2_xattr_prefix(unsigned type, struct dentry *dentry) { const struct xattr_handler *handler = bch2_xattr_type_to_handler(type); if (!xattr_handler_can_list(handler, dentry)) return NULL; return xattr_prefix(handler); } static int bch2_xattr_emit(struct dentry *dentry, const struct bch_xattr *xattr, struct xattr_buf *buf) { const char *prefix; prefix = bch2_xattr_prefix(xattr->x_type, dentry); if (!prefix) return 0; return __bch2_xattr_emit(prefix, xattr->x_name_and_value, xattr->x_name_len, buf); } static int bch2_xattr_list_bcachefs(struct bch_fs *c, struct bch_inode_unpacked *inode, struct xattr_buf *buf, bool all) { const char *prefix = all ? "bcachefs_effective." : "bcachefs."; unsigned id; int ret = 0; u64 v; for (id = 0; id < Inode_opt_nr; id++) { v = bch2_inode_opt_get(inode, id); if (!v) continue; if (!all && !(inode->bi_fields_set & (1 << id))) continue; ret = __bch2_xattr_emit(prefix, bch2_inode_opts[id], strlen(bch2_inode_opts[id]), buf); if (ret) break; } return ret; } ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct bch_fs *c = dentry->d_sb->s_fs_info; struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); struct xattr_buf buf = { .buf = buffer, .len = buffer_size }; u64 offset = 0, inum = inode->ei_inode.bi_inum; int ret = bch2_trans_run(c, for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_xattrs, POS(inum, offset), POS(inum, U64_MAX), inode->ei_inum.subvol, 0, k, ({ if (k.k->type != KEY_TYPE_xattr) continue; bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf); }))) ?: bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false) ?: bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true); return ret ? bch2_err_class(ret) : buf.used; } static int bch2_xattr_get_handler(const struct xattr_handler *handler, struct dentry *dentry, struct inode *vinode, const char *name, void *buffer, size_t size) { struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; int ret = bch2_trans_do(c, bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags)); if (ret < 0 && bch2_err_matches(ret, ENOENT)) ret = -ENODATA; return bch2_err_class(ret); } static int bch2_xattr_set_handler(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *vinode, const char *name, const void *value, size_t size, int flags) { struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); struct bch_inode_unpacked inode_u; int ret; ret = bch2_trans_run(c, commit_do(trans, NULL, NULL, 0, bch2_xattr_set(trans, inode_inum(inode), &inode_u, &hash, name, value, size, handler->flags, flags)) ?: (bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME), 0)); return bch2_err_class(ret); } static const struct xattr_handler bch_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .get = bch2_xattr_get_handler, .set = bch2_xattr_set_handler, .flags = KEY_TYPE_XATTR_INDEX_USER, }; static bool bch2_xattr_trusted_list(struct dentry *dentry) { return capable(CAP_SYS_ADMIN); } static const struct xattr_handler bch_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = bch2_xattr_trusted_list, .get = bch2_xattr_get_handler, .set = bch2_xattr_set_handler, .flags = KEY_TYPE_XATTR_INDEX_TRUSTED, }; static const struct xattr_handler bch_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .get = bch2_xattr_get_handler, .set = bch2_xattr_set_handler, .flags = KEY_TYPE_XATTR_INDEX_SECURITY, }; #ifndef NO_BCACHEFS_FS static int opt_to_inode_opt(int id) { switch (id) { #define x(name, ...) \ case Opt_##name: return Inode_opt_##name; BCH_INODE_OPTS() #undef x default: return -1; } } static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *vinode, const char *name, void *buffer, size_t size, bool all) { struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_opts opts = bch2_inode_opts_to_opts(&inode->ei_inode); const struct bch_option *opt; int id, inode_opt_id; struct printbuf out = PRINTBUF; int ret; u64 v; id = bch2_opt_lookup(name); if (id < 0 || !bch2_opt_is_inode_opt(id)) return -EINVAL; inode_opt_id = opt_to_inode_opt(id); if (inode_opt_id < 0) return -EINVAL; opt = bch2_opt_table + id; if (!bch2_opt_defined_by_id(&opts, id)) return -ENODATA; if (!all && !(inode->ei_inode.bi_fields_set & (1 << inode_opt_id))) return -ENODATA; v = bch2_opt_get_by_id(&opts, id); bch2_opt_to_text(&out, c, c->disk_sb.sb, opt, v, 0); ret = out.pos; if (out.allocation_failure) { ret = -ENOMEM; } else if (buffer) { if (out.pos > size) ret = -ERANGE; else memcpy(buffer, out.buf, out.pos); } printbuf_exit(&out); return ret; } static int bch2_xattr_bcachefs_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *vinode, const char *name, void *buffer, size_t size) { return __bch2_xattr_bcachefs_get(handler, dentry, vinode, name, buffer, size, false); } struct inode_opt_set { int id; u64 v; bool defined; }; static int inode_opt_set_fn(struct btree_trans *trans, struct bch_inode_info *inode, struct bch_inode_unpacked *bi, void *p) { struct inode_opt_set *s = p; if (s->id == Inode_opt_casefold) { int ret = bch2_inode_set_casefold(trans, inode_inum(inode), bi, s->v); if (ret) return ret; } if (s->defined) bi->bi_fields_set |= 1U << s->id; else bi->bi_fields_set &= ~(1U << s->id); bch2_inode_opt_set(bi, s->id, s->v); return 0; } static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *vinode, const char *name, const void *value, size_t size, int flags) { struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; const struct bch_option *opt; char *buf; struct inode_opt_set s; int opt_id, inode_opt_id, ret; opt_id = bch2_opt_lookup(name); if (opt_id < 0) return -EINVAL; opt = bch2_opt_table + opt_id; inode_opt_id = opt_to_inode_opt(opt_id); if (inode_opt_id < 0) return -EINVAL; s.id = inode_opt_id; if (value) { u64 v = 0; buf = kmalloc(size + 1, GFP_KERNEL); if (!buf) return -ENOMEM; memcpy(buf, value, size); buf[size] = '\0'; ret = bch2_opt_parse(c, opt, buf, &v, NULL); kfree(buf); if (ret < 0) goto err_class_exit; ret = bch2_opt_hook_pre_set(c, NULL, opt_id, v); if (ret < 0) goto err_class_exit; s.v = v + 1; s.defined = true; } else { /* * Check if this option was set on the parent - if so, switched * back to inheriting from the parent: * * rename() also has to deal with keeping inherited options up * to date - see bch2_reinherit_attrs() */ spin_lock(&dentry->d_lock); if (!IS_ROOT(dentry)) { struct bch_inode_info *dir = to_bch_ei(d_inode(dentry->d_parent)); s.v = bch2_inode_opt_get(&dir->ei_inode, inode_opt_id); } else { s.v = 0; } spin_unlock(&dentry->d_lock); s.defined = false; } mutex_lock(&inode->ei_update_lock); if (inode_opt_id == Inode_opt_project) { /* * inode fields accessible via the xattr interface are stored * with a +1 bias, so that 0 means unset: */ ret = bch2_set_projid(c, inode, s.v ? s.v - 1 : 0); if (ret) goto err; } ret = bch2_write_inode(c, inode, inode_opt_set_fn, &s, 0); err: mutex_unlock(&inode->ei_update_lock); err_class_exit: return bch2_err_class(ret); } static const struct xattr_handler bch_xattr_bcachefs_handler = { .prefix = "bcachefs.", .get = bch2_xattr_bcachefs_get, .set = bch2_xattr_bcachefs_set, }; static int bch2_xattr_bcachefs_get_effective( const struct xattr_handler *handler, struct dentry *dentry, struct inode *vinode, const char *name, void *buffer, size_t size) { return __bch2_xattr_bcachefs_get(handler, dentry, vinode, name, buffer, size, true); } /* Noop - xattrs in the bcachefs_effective namespace are inherited */ static int bch2_xattr_bcachefs_set_effective(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *vinode, const char *name, const void *value, size_t size, int flags) { return 0; } static const struct xattr_handler bch_xattr_bcachefs_effective_handler = { .prefix = "bcachefs_effective.", .get = bch2_xattr_bcachefs_get_effective, .set = bch2_xattr_bcachefs_set_effective, }; #endif /* NO_BCACHEFS_FS */ const struct xattr_handler * const bch2_xattr_handlers[] = { &bch_xattr_user_handler, &bch_xattr_trusted_handler, &bch_xattr_security_handler, #ifndef NO_BCACHEFS_FS &bch_xattr_bcachefs_handler, &bch_xattr_bcachefs_effective_handler, #endif NULL }; static const struct xattr_handler *bch_xattr_handler_map[] = { [KEY_TYPE_XATTR_INDEX_USER] = &bch_xattr_user_handler, [KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access, [KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default, [KEY_TYPE_XATTR_INDEX_TRUSTED] = &bch_xattr_trusted_handler, [KEY_TYPE_XATTR_INDEX_SECURITY] = &bch_xattr_security_handler, }; static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned type) { return type < ARRAY_SIZE(bch_xattr_handler_map) ? bch_xattr_handler_map[type] : NULL; } |
| 88 7 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _KBD_KERN_H #define _KBD_KERN_H #include <linux/tty.h> #include <linux/interrupt.h> #include <linux/keyboard.h> extern char *func_table[MAX_NR_FUNC]; /* * kbd->xxx contains the VC-local things (flag settings etc..) * * Note: externally visible are LED_SCR, LED_NUM, LED_CAP defined in kd.h * The code in KDGETLED / KDSETLED depends on the internal and * external order being the same. * * Note: lockstate is used as index in the array key_map. */ struct kbd_struct { unsigned char lockstate; /* 8 modifiers - the names do not have any meaning at all; they can be associated to arbitrarily chosen keys */ #define VC_SHIFTLOCK KG_SHIFT /* shift lock mode */ #define VC_ALTGRLOCK KG_ALTGR /* altgr lock mode */ #define VC_CTRLLOCK KG_CTRL /* control lock mode */ #define VC_ALTLOCK KG_ALT /* alt lock mode */ #define VC_SHIFTLLOCK KG_SHIFTL /* shiftl lock mode */ #define VC_SHIFTRLOCK KG_SHIFTR /* shiftr lock mode */ #define VC_CTRLLLOCK KG_CTRLL /* ctrll lock mode */ #define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */ unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */ unsigned char ledmode:1; #define LED_SHOW_FLAGS 0 /* traditional state */ #define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */ unsigned char ledflagstate:4; /* flags, not lights */ unsigned char default_ledflagstate:4; #define VC_SCROLLOCK 0 /* scroll-lock mode */ #define VC_NUMLOCK 1 /* numeric lock mode */ #define VC_CAPSLOCK 2 /* capslock mode */ #define VC_KANALOCK 3 /* kanalock mode */ unsigned char kbdmode:3; /* one 3-bit value */ #define VC_XLATE 0 /* translate keycodes using keymap */ #define VC_MEDIUMRAW 1 /* medium raw (keycode) mode */ #define VC_RAW 2 /* raw (scancode) mode */ #define VC_UNICODE 3 /* Unicode mode */ #define VC_OFF 4 /* disabled mode */ unsigned char modeflags:5; #define VC_APPLIC 0 /* application key mode */ #define VC_CKMODE 1 /* cursor key mode */ #define VC_REPEAT 2 /* keyboard repeat */ #define VC_CRLF 3 /* 0 - enter sends CR, 1 - enter sends CRLF */ #define VC_META 4 /* 0 - meta, 1 - meta=prefix with ESC */ }; extern int kbd_init(void); extern void setledstate(struct kbd_struct *kbd, unsigned int led); extern int do_poke_blanked_console; extern void (*kbd_ledfunc)(unsigned int led); extern int set_console(int nr); extern void schedule_console_callback(void); static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) { return ((kbd->modeflags >> flag) & 1); } static inline int vc_kbd_led(struct kbd_struct * kbd, int flag) { return ((kbd->ledflagstate >> flag) & 1); } static inline void set_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags |= 1 << flag; } static inline void set_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate |= 1 << flag; } static inline void clr_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags &= ~(1 << flag); } static inline void clr_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate &= ~(1 << flag); } static inline void chg_vc_kbd_lock(struct kbd_struct * kbd, int flag) { kbd->lockstate ^= 1 << flag; } static inline void chg_vc_kbd_slock(struct kbd_struct * kbd, int flag) { kbd->slockstate ^= 1 << flag; } static inline void chg_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags ^= 1 << flag; } static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate ^= 1 << flag; } #define U(x) ((x) ^ 0xf000) #define BRL_UC_ROW 0x2800 /* keyboard.c */ struct console; void vt_set_leds_compute_shiftstate(void); /* defkeymap.c */ extern unsigned int keymap_count; #endif |
| 48 4 45 67 67 66 66 3 3 54 4 4 3 2 54 24 24 24 16 15 16 21 3 21 20 16 20 9 9 9 9 50 50 31 28 18 12 51 52 2 50 7 16 8 52 5 4 4 4 51 49 11 11 4 47 47 3 51 13 13 11 7 11 11 7 5 27 8 8 8 8 5 5 9 55 45 45 45 13 6 12 53 53 53 55 56 56 56 55 55 55 26 26 26 26 26 26 24 19 13 13 13 18 19 4 4 19 19 17 18 17 18 25 25 23 24 7 17 22 22 28 19 28 15 14 15 4 2 2 2 2 4 9 4 3 3 3 9 6 6 1 5 5 5 5 6 6 27 27 27 27 2 1 26 23 2 27 27 27 27 5 2 27 9 8 8 8 25 25 2 10 4 4 4 4 27 21 18 27 27 3 3 2 2 5 3 5 5 5 56 2 54 54 50 48 48 48 48 48 48 32 32 32 32 2 2 30 30 3 3 3 3 30 2 2 30 26 18 31 55 12 12 10 10 10 7 6 6 2 4 2 5 4 1 3 3 3 3 4 4 11 10 10 10 8 10 12 3 12 10 11 7 7 12 32 13 31 6 11 3 3 3 4 3 3 2 7 7 4 3 3 2 25 30 9 8 6 1 1 2 1 3 9 25 25 1 24 22 23 21 21 21 6 21 21 21 20 20 9 9 9 9 8 8 8 2 2 7 2 19 19 19 19 19 7 12 18 4 16 18 12 22 14 23 24 18 18 12 12 12 12 12 9 12 11 6 7 18 10 18 13 14 14 13 13 13 8 8 2 7 7 1 13 1 13 13 7 5 4 5 3 1 2 2 1 2 2 2 19 24 1 24 22 2 22 22 20 19 14 19 24 25 27 28 22 3 2 1 1 2 3 28 28 28 26 26 2 2 2 24 24 24 3 2 2 2 3 78 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 | // SPDX-License-Identifier: GPL-2.0-only /* * VMware vSockets Driver * * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. */ /* Implementation notes: * * - There are two kinds of sockets: those created by user action (such as * calling socket(2)) and those created by incoming connection request packets. * * - There are two "global" tables, one for bound sockets (sockets that have * specified an address that they are responsible for) and one for connected * sockets (sockets that have established a connection with another socket). * These tables are "global" in that all sockets on the system are placed * within them. - Note, though, that the bound table contains an extra entry * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in * that list. The bound table is used solely for lookup of sockets when packets * are received and that's not necessary for SOCK_DGRAM sockets since we create * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM * sockets out of the bound hash buckets will reduce the chance of collisions * when looking for SOCK_STREAM sockets and prevents us from having to check the * socket type in the hash table lookups. * * - Sockets created by user action will either be "client" sockets that * initiate a connection or "server" sockets that listen for connections; we do * not support simultaneous connects (two "client" sockets connecting). * * - "Server" sockets are referred to as listener sockets throughout this * implementation because they are in the TCP_LISTEN state. When a * connection request is received (the second kind of socket mentioned above), * we create a new socket and refer to it as a pending socket. These pending * sockets are placed on the pending connection list of the listener socket. * When future packets are received for the address the listener socket is * bound to, we check if the source of the packet is from one that has an * existing pending connection. If it does, we process the packet for the * pending socket. When that socket reaches the connected state, it is removed * from the listener socket's pending list and enqueued in the listener * socket's accept queue. Callers of accept(2) will accept connected sockets * from the listener socket's accept queue. If the socket cannot be accepted * for some reason then it is marked rejected. Once the connection is * accepted, it is owned by the user process and the responsibility for cleanup * falls with that user process. * * - It is possible that these pending sockets will never reach the connected * state; in fact, we may never receive another packet after the connection * request. Because of this, we must schedule a cleanup function to run in the * future, after some amount of time passes where a connection should have been * established. This function ensures that the socket is off all lists so it * cannot be retrieved, then drops all references to the socket so it is cleaned * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this * function will also cleanup rejected sockets, those that reach the connected * state but leave it before they have been accepted. * * - Lock ordering for pending or accept queue sockets is: * * lock_sock(listener); * lock_sock_nested(pending, SINGLE_DEPTH_NESTING); * * Using explicit nested locking keeps lockdep happy since normally only one * lock of a given class may be taken at a time. * * - Sockets created by user action will be cleaned up when the user process * calls close(2), causing our release implementation to be called. Our release * implementation will perform some cleanup then drop the last reference so our * sk_destruct implementation is invoked. Our sk_destruct implementation will * perform additional cleanup that's common for both types of sockets. * * - A socket's reference count is what ensures that the structure won't be * freed. Each entry in a list (such as the "global" bound and connected tables * and the listener socket's pending list and connected queue) ensures a * reference. When we defer work until process context and pass a socket as our * argument, we must ensure the reference count is increased to ensure the * socket isn't freed before the function is run; the deferred function will * then drop the reference. * * - sk->sk_state uses the TCP state constants because they are widely used by * other address families and exposed to userspace tools like ss(8): * * TCP_CLOSE - unconnected * TCP_SYN_SENT - connecting * TCP_ESTABLISHED - connected * TCP_CLOSING - disconnecting * TCP_LISTEN - listening */ #include <linux/compat.h> #include <linux/types.h> #include <linux/bitops.h> #include <linux/cred.h> #include <linux/errqueue.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/net.h> #include <linux/poll.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/smp.h> #include <linux/socket.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <net/sock.h> #include <net/af_vsock.h> #include <uapi/linux/vm_sockets.h> #include <uapi/asm-generic/ioctls.h> static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); static void vsock_sk_destruct(struct sock *sk); static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); static void vsock_close(struct sock *sk, long timeout); /* Protocol family. */ struct proto vsock_proto = { .name = "AF_VSOCK", .owner = THIS_MODULE, .obj_size = sizeof(struct vsock_sock), .close = vsock_close, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = vsock_bpf_update_proto, #endif }; /* The default peer timeout indicates how long we will wait for a peer response * to a control message. */ #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) #define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256) #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256) #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128 /* Transport used for host->guest communication */ static const struct vsock_transport *transport_h2g; /* Transport used for guest->host communication */ static const struct vsock_transport *transport_g2h; /* Transport used for DGRAM communication */ static const struct vsock_transport *transport_dgram; /* Transport used for local communication */ static const struct vsock_transport *transport_local; static DEFINE_MUTEX(vsock_register_mutex); /**** UTILS ****/ /* Each bound VSocket is stored in the bind hash table and each connected * VSocket is stored in the connected hash table. * * Unbound sockets are all put on the same list attached to the end of the hash * table (vsock_unbound_sockets). Bound sockets are added to the hash table in * the bucket that their local address hashes to (vsock_bound_sockets(addr) * represents the list that addr hashes to). * * Specifically, we initialize the vsock_bind_table array to a size of * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function * mods with VSOCK_HASH_SIZE to ensure this. */ #define MAX_PORT_RETRIES 24 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE) #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)]) #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE]) /* XXX This can probably be implemented in a better way. */ #define VSOCK_CONN_HASH(src, dst) \ (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE) #define vsock_connected_sockets(src, dst) \ (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)]) #define vsock_connected_sockets_vsk(vsk) \ vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr) struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1]; EXPORT_SYMBOL_GPL(vsock_bind_table); struct list_head vsock_connected_table[VSOCK_HASH_SIZE]; EXPORT_SYMBOL_GPL(vsock_connected_table); DEFINE_SPINLOCK(vsock_table_lock); EXPORT_SYMBOL_GPL(vsock_table_lock); /* Autobind this socket to the local address if necessary. */ static int vsock_auto_bind(struct vsock_sock *vsk) { struct sock *sk = sk_vsock(vsk); struct sockaddr_vm local_addr; if (vsock_addr_bound(&vsk->local_addr)) return 0; vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); return __vsock_bind(sk, &local_addr); } static void vsock_init_tables(void) { int i; for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++) INIT_LIST_HEAD(&vsock_bind_table[i]); for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) INIT_LIST_HEAD(&vsock_connected_table[i]); } static void __vsock_insert_bound(struct list_head *list, struct vsock_sock *vsk) { sock_hold(&vsk->sk); list_add(&vsk->bound_table, list); } static void __vsock_insert_connected(struct list_head *list, struct vsock_sock *vsk) { sock_hold(&vsk->sk); list_add(&vsk->connected_table, list); } static void __vsock_remove_bound(struct vsock_sock *vsk) { list_del_init(&vsk->bound_table); sock_put(&vsk->sk); } static void __vsock_remove_connected(struct vsock_sock *vsk) { list_del_init(&vsk->connected_table); sock_put(&vsk->sk); } static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) { struct vsock_sock *vsk; list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) { if (vsock_addr_equals_addr(addr, &vsk->local_addr)) return sk_vsock(vsk); if (addr->svm_port == vsk->local_addr.svm_port && (vsk->local_addr.svm_cid == VMADDR_CID_ANY || addr->svm_cid == VMADDR_CID_ANY)) return sk_vsock(vsk); } return NULL; } static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst) { struct vsock_sock *vsk; list_for_each_entry(vsk, vsock_connected_sockets(src, dst), connected_table) { if (vsock_addr_equals_addr(src, &vsk->remote_addr) && dst->svm_port == vsk->local_addr.svm_port) { return sk_vsock(vsk); } } return NULL; } static void vsock_insert_unbound(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); __vsock_insert_bound(vsock_unbound_sockets, vsk); spin_unlock_bh(&vsock_table_lock); } void vsock_insert_connected(struct vsock_sock *vsk) { struct list_head *list = vsock_connected_sockets( &vsk->remote_addr, &vsk->local_addr); spin_lock_bh(&vsock_table_lock); __vsock_insert_connected(list, vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_insert_connected); void vsock_remove_bound(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); if (__vsock_in_bound_table(vsk)) __vsock_remove_bound(vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_remove_bound); void vsock_remove_connected(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); if (__vsock_in_connected_table(vsk)) __vsock_remove_connected(vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_remove_connected); struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr) { struct sock *sk; spin_lock_bh(&vsock_table_lock); sk = __vsock_find_bound_socket(addr); if (sk) sock_hold(sk); spin_unlock_bh(&vsock_table_lock); return sk; } EXPORT_SYMBOL_GPL(vsock_find_bound_socket); struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst) { struct sock *sk; spin_lock_bh(&vsock_table_lock); sk = __vsock_find_connected_socket(src, dst); if (sk) sock_hold(sk); spin_unlock_bh(&vsock_table_lock); return sk; } EXPORT_SYMBOL_GPL(vsock_find_connected_socket); void vsock_remove_sock(struct vsock_sock *vsk) { /* Transport reassignment must not remove the binding. */ if (sock_flag(sk_vsock(vsk), SOCK_DEAD)) vsock_remove_bound(vsk); vsock_remove_connected(vsk); } EXPORT_SYMBOL_GPL(vsock_remove_sock); void vsock_for_each_connected_socket(struct vsock_transport *transport, void (*fn)(struct sock *sk)) { int i; spin_lock_bh(&vsock_table_lock); for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { struct vsock_sock *vsk; list_for_each_entry(vsk, &vsock_connected_table[i], connected_table) { if (vsk->transport != transport) continue; fn(sk_vsock(vsk)); } } spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket); void vsock_add_pending(struct sock *listener, struct sock *pending) { struct vsock_sock *vlistener; struct vsock_sock *vpending; vlistener = vsock_sk(listener); vpending = vsock_sk(pending); sock_hold(pending); sock_hold(listener); list_add_tail(&vpending->pending_links, &vlistener->pending_links); } EXPORT_SYMBOL_GPL(vsock_add_pending); void vsock_remove_pending(struct sock *listener, struct sock *pending) { struct vsock_sock *vpending = vsock_sk(pending); list_del_init(&vpending->pending_links); sock_put(listener); sock_put(pending); } EXPORT_SYMBOL_GPL(vsock_remove_pending); void vsock_enqueue_accept(struct sock *listener, struct sock *connected) { struct vsock_sock *vlistener; struct vsock_sock *vconnected; vlistener = vsock_sk(listener); vconnected = vsock_sk(connected); sock_hold(connected); sock_hold(listener); list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue); } EXPORT_SYMBOL_GPL(vsock_enqueue_accept); static bool vsock_use_local_transport(unsigned int remote_cid) { lockdep_assert_held(&vsock_register_mutex); if (!transport_local) return false; if (remote_cid == VMADDR_CID_LOCAL) return true; if (transport_g2h) { return remote_cid == transport_g2h->get_local_cid(); } else { return remote_cid == VMADDR_CID_HOST; } } static void vsock_deassign_transport(struct vsock_sock *vsk) { if (!vsk->transport) return; vsk->transport->destruct(vsk); module_put(vsk->transport->module); vsk->transport = NULL; } /* Assign a transport to a socket and call the .init transport callback. * * Note: for connection oriented socket this must be called when vsk->remote_addr * is set (e.g. during the connect() or when a connection request on a listener * socket is received). * The vsk->remote_addr is used to decide which transport to use: * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if * g2h is not loaded, will use local transport; * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport; * - remote CID > VMADDR_CID_HOST will use host->guest transport; */ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) { const struct vsock_transport *new_transport; struct sock *sk = sk_vsock(vsk); unsigned int remote_cid = vsk->remote_addr.svm_cid; __u8 remote_flags; int ret; /* If the packet is coming with the source and destination CIDs higher * than VMADDR_CID_HOST, then a vsock channel where all the packets are * forwarded to the host should be established. Then the host will * need to forward the packets to the guest. * * The flag is set on the (listen) receive path (psk is not NULL). On * the connect path the flag can be set by the user space application. */ if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST && vsk->remote_addr.svm_cid > VMADDR_CID_HOST) vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST; remote_flags = vsk->remote_addr.svm_flags; mutex_lock(&vsock_register_mutex); switch (sk->sk_type) { case SOCK_DGRAM: new_transport = transport_dgram; break; case SOCK_STREAM: case SOCK_SEQPACKET: if (vsock_use_local_transport(remote_cid)) new_transport = transport_local; else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g || (remote_flags & VMADDR_FLAG_TO_HOST)) new_transport = transport_g2h; else new_transport = transport_h2g; break; default: ret = -ESOCKTNOSUPPORT; goto err; } if (vsk->transport) { if (vsk->transport == new_transport) { ret = 0; goto err; } /* transport->release() must be called with sock lock acquired. * This path can only be taken during vsock_connect(), where we * have already held the sock lock. In the other cases, this * function is called on a new socket which is not assigned to * any transport. */ vsk->transport->release(vsk); vsock_deassign_transport(vsk); /* transport's release() and destruct() can touch some socket * state, since we are reassigning the socket to a new transport * during vsock_connect(), let's reset these fields to have a * clean state. */ sock_reset_flag(sk, SOCK_DONE); sk->sk_state = TCP_CLOSE; vsk->peer_shutdown = 0; } /* We increase the module refcnt to prevent the transport unloading * while there are open sockets assigned to it. */ if (!new_transport || !try_module_get(new_transport->module)) { ret = -ENODEV; goto err; } /* It's safe to release the mutex after a successful try_module_get(). * Whichever transport `new_transport` points at, it won't go away until * the last module_put() below or in vsock_deassign_transport(). */ mutex_unlock(&vsock_register_mutex); if (sk->sk_type == SOCK_SEQPACKET) { if (!new_transport->seqpacket_allow || !new_transport->seqpacket_allow(remote_cid)) { module_put(new_transport->module); return -ESOCKTNOSUPPORT; } } ret = new_transport->init(vsk, psk); if (ret) { module_put(new_transport->module); return ret; } vsk->transport = new_transport; return 0; err: mutex_unlock(&vsock_register_mutex); return ret; } EXPORT_SYMBOL_GPL(vsock_assign_transport); /* * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks. * Otherwise we may race with module removal. Do not use on `vsk->transport`. */ static u32 vsock_registered_transport_cid(const struct vsock_transport **transport) { u32 cid = VMADDR_CID_ANY; mutex_lock(&vsock_register_mutex); if (*transport) cid = (*transport)->get_local_cid(); mutex_unlock(&vsock_register_mutex); return cid; } bool vsock_find_cid(unsigned int cid) { if (cid == vsock_registered_transport_cid(&transport_g2h)) return true; if (transport_h2g && cid == VMADDR_CID_HOST) return true; if (transport_local && cid == VMADDR_CID_LOCAL) return true; return false; } EXPORT_SYMBOL_GPL(vsock_find_cid); static struct sock *vsock_dequeue_accept(struct sock *listener) { struct vsock_sock *vlistener; struct vsock_sock *vconnected; vlistener = vsock_sk(listener); if (list_empty(&vlistener->accept_queue)) return NULL; vconnected = list_entry(vlistener->accept_queue.next, struct vsock_sock, accept_queue); list_del_init(&vconnected->accept_queue); sock_put(listener); /* The caller will need a reference on the connected socket so we let * it call sock_put(). */ return sk_vsock(vconnected); } static bool vsock_is_accept_queue_empty(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); return list_empty(&vsk->accept_queue); } static bool vsock_is_pending(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); return !list_empty(&vsk->pending_links); } static int vsock_send_shutdown(struct sock *sk, int mode) { struct vsock_sock *vsk = vsock_sk(sk); if (!vsk->transport) return -ENODEV; return vsk->transport->shutdown(vsk, mode); } static void vsock_pending_work(struct work_struct *work) { struct sock *sk; struct sock *listener; struct vsock_sock *vsk; bool cleanup; vsk = container_of(work, struct vsock_sock, pending_work.work); sk = sk_vsock(vsk); listener = vsk->listener; cleanup = true; lock_sock(listener); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (vsock_is_pending(sk)) { vsock_remove_pending(listener, sk); sk_acceptq_removed(listener); } else if (!vsk->rejected) { /* We are not on the pending list and accept() did not reject * us, so we must have been accepted by our user process. We * just need to drop our references to the sockets and be on * our way. */ cleanup = false; goto out; } /* We need to remove ourself from the global connected sockets list so * incoming packets can't find this socket, and to reduce the reference * count. */ vsock_remove_connected(vsk); sk->sk_state = TCP_CLOSE; out: release_sock(sk); release_sock(listener); if (cleanup) sock_put(sk); sock_put(sk); sock_put(listener); } /**** SOCKET OPERATIONS ****/ static int __vsock_bind_connectible(struct vsock_sock *vsk, struct sockaddr_vm *addr) { static u32 port; struct sockaddr_vm new_addr; if (!port) port = get_random_u32_above(LAST_RESERVED_PORT); vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); if (addr->svm_port == VMADDR_PORT_ANY) { bool found = false; unsigned int i; for (i = 0; i < MAX_PORT_RETRIES; i++) { if (port <= LAST_RESERVED_PORT) port = LAST_RESERVED_PORT + 1; new_addr.svm_port = port++; if (!__vsock_find_bound_socket(&new_addr)) { found = true; break; } } if (!found) return -EADDRNOTAVAIL; } else { /* If port is in reserved range, ensure caller * has necessary privileges. */ if (addr->svm_port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE)) { return -EACCES; } if (__vsock_find_bound_socket(&new_addr)) return -EADDRINUSE; } vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port); /* Remove connection oriented sockets from the unbound list and add them * to the hash table for easy lookup by its address. The unbound list * is simply an extra entry at the end of the hash table, a trick used * by AF_UNIX. */ __vsock_remove_bound(vsk); __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk); return 0; } static int __vsock_bind_dgram(struct vsock_sock *vsk, struct sockaddr_vm *addr) { return vsk->transport->dgram_bind(vsk, addr); } static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) { struct vsock_sock *vsk = vsock_sk(sk); int retval; /* First ensure this socket isn't already bound. */ if (vsock_addr_bound(&vsk->local_addr)) return -EINVAL; /* Now bind to the provided address or select appropriate values if * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that * like AF_INET prevents binding to a non-local IP address (in most * cases), we only allow binding to a local CID. */ if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid)) return -EADDRNOTAVAIL; switch (sk->sk_socket->type) { case SOCK_STREAM: case SOCK_SEQPACKET: spin_lock_bh(&vsock_table_lock); retval = __vsock_bind_connectible(vsk, addr); spin_unlock_bh(&vsock_table_lock); break; case SOCK_DGRAM: retval = __vsock_bind_dgram(vsk, addr); break; default: retval = -EINVAL; break; } return retval; } static void vsock_connect_timeout(struct work_struct *work); static struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, gfp_t priority, unsigned short type, int kern) { struct sock *sk; struct vsock_sock *psk; struct vsock_sock *vsk; sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern); if (!sk) return NULL; sock_init_data(sock, sk); /* sk->sk_type is normally set in sock_init_data, but only if sock is * non-NULL. We make sure that our sockets always have a type by * setting it here if needed. */ if (!sock) sk->sk_type = type; vsk = vsock_sk(sk); vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); sk->sk_destruct = vsock_sk_destruct; sk->sk_backlog_rcv = vsock_queue_rcv_skb; sock_reset_flag(sk, SOCK_DONE); INIT_LIST_HEAD(&vsk->bound_table); INIT_LIST_HEAD(&vsk->connected_table); vsk->listener = NULL; INIT_LIST_HEAD(&vsk->pending_links); INIT_LIST_HEAD(&vsk->accept_queue); vsk->rejected = false; vsk->sent_request = false; vsk->ignore_connecting_rst = false; vsk->peer_shutdown = 0; INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout); INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work); psk = parent ? vsock_sk(parent) : NULL; if (parent) { vsk->trusted = psk->trusted; vsk->owner = get_cred(psk->owner); vsk->connect_timeout = psk->connect_timeout; vsk->buffer_size = psk->buffer_size; vsk->buffer_min_size = psk->buffer_min_size; vsk->buffer_max_size = psk->buffer_max_size; security_sk_clone(parent, sk); } else { vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); vsk->owner = get_current_cred(); vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE; vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE; vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE; } return sk; } static bool sock_type_connectible(u16 type) { return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET); } static void __vsock_release(struct sock *sk, int level) { struct vsock_sock *vsk; struct sock *pending; vsk = vsock_sk(sk); pending = NULL; /* Compiler warning. */ /* When "level" is SINGLE_DEPTH_NESTING, use the nested * version to avoid the warning "possible recursive locking * detected". When "level" is 0, lock_sock_nested(sk, level) * is the same as lock_sock(sk). */ lock_sock_nested(sk, level); /* Indicate to vsock_remove_sock() that the socket is being released and * can be removed from the bound_table. Unlike transport reassignment * case, where the socket must remain bound despite vsock_remove_sock() * being called from the transport release() callback. */ sock_set_flag(sk, SOCK_DEAD); if (vsk->transport) vsk->transport->release(vsk); else if (sock_type_connectible(sk->sk_type)) vsock_remove_sock(vsk); sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; skb_queue_purge(&sk->sk_receive_queue); /* Clean up any sockets that never were accepted. */ while ((pending = vsock_dequeue_accept(sk)) != NULL) { __vsock_release(pending, SINGLE_DEPTH_NESTING); sock_put(pending); } release_sock(sk); sock_put(sk); } static void vsock_sk_destruct(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); /* Flush MSG_ZEROCOPY leftovers. */ __skb_queue_purge(&sk->sk_error_queue); vsock_deassign_transport(vsk); /* When clearing these addresses, there's no need to set the family and * possibly register the address family with the kernel. */ vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); put_cred(vsk->owner); } static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; err = sock_queue_rcv_skb(sk, skb); if (err) kfree_skb(skb); return err; } struct sock *vsock_create_connected(struct sock *parent) { return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL, parent->sk_type, 0); } EXPORT_SYMBOL_GPL(vsock_create_connected); s64 vsock_stream_has_data(struct vsock_sock *vsk) { if (WARN_ON(!vsk->transport)) return 0; return vsk->transport->stream_has_data(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_data); s64 vsock_connectible_has_data(struct vsock_sock *vsk) { struct sock *sk = sk_vsock(vsk); if (WARN_ON(!vsk->transport)) return 0; if (sk->sk_type == SOCK_SEQPACKET) return vsk->transport->seqpacket_has_data(vsk); else return vsock_stream_has_data(vsk); } EXPORT_SYMBOL_GPL(vsock_connectible_has_data); s64 vsock_stream_has_space(struct vsock_sock *vsk) { if (WARN_ON(!vsk->transport)) return 0; return vsk->transport->stream_has_space(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_space); void vsock_data_ready(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); if (vsock_stream_has_data(vsk) >= sk->sk_rcvlowat || sock_flag(sk, SOCK_DONE)) sk->sk_data_ready(sk); } EXPORT_SYMBOL_GPL(vsock_data_ready); /* Dummy callback required by sockmap. * See unconditional call of saved_close() in sock_map_close(). */ static void vsock_close(struct sock *sk, long timeout) { } static int vsock_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return 0; sk->sk_prot->close(sk, 0); __vsock_release(sk, 0); sock->sk = NULL; sock->state = SS_FREE; return 0; } static int vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { int err; struct sock *sk; struct sockaddr_vm *vm_addr; sk = sock->sk; if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0) return -EINVAL; lock_sock(sk); err = __vsock_bind(sk, vm_addr); release_sock(sk); return err; } static int vsock_getname(struct socket *sock, struct sockaddr *addr, int peer) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *vm_addr; sk = sock->sk; vsk = vsock_sk(sk); err = 0; lock_sock(sk); if (peer) { if (sock->state != SS_CONNECTED) { err = -ENOTCONN; goto out; } vm_addr = &vsk->remote_addr; } else { vm_addr = &vsk->local_addr; } if (!vm_addr) { err = -EINVAL; goto out; } /* sys_getsockname() and sys_getpeername() pass us a * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately * that macro is defined in socket.c instead of .h, so we hardcode its * value here. */ BUILD_BUG_ON(sizeof(*vm_addr) > 128); memcpy(addr, vm_addr, sizeof(*vm_addr)); err = sizeof(*vm_addr); out: release_sock(sk); return err; } void vsock_linger(struct sock *sk) { DEFINE_WAIT_FUNC(wait, woken_wake_function); ssize_t (*unsent)(struct vsock_sock *vsk); struct vsock_sock *vsk = vsock_sk(sk); long timeout; if (!sock_flag(sk, SOCK_LINGER)) return; timeout = sk->sk_lingertime; if (!timeout) return; /* Transports must implement `unsent_bytes` if they want to support * SOCK_LINGER through `vsock_linger()` since we use it to check when * the socket can be closed. */ unsent = vsk->transport->unsent_bytes; if (!unsent) return; add_wait_queue(sk_sleep(sk), &wait); do { if (sk_wait_event(sk, &timeout, unsent(vsk) == 0, &wait)) break; } while (!signal_pending(current) && timeout); remove_wait_queue(sk_sleep(sk), &wait); } EXPORT_SYMBOL_GPL(vsock_linger); static int vsock_shutdown(struct socket *sock, int mode) { int err; struct sock *sk; /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode * here like the other address families do. Note also that the * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3), * which is what we want. */ mode++; if ((mode & ~SHUTDOWN_MASK) || !mode) return -EINVAL; /* If this is a connection oriented socket and it is not connected then * bail out immediately. If it is a DGRAM socket then we must first * kick the socket so that it wakes up from any sleeping calls, for * example recv(), and then afterwards return the error. */ sk = sock->sk; lock_sock(sk); if (sock->state == SS_UNCONNECTED) { err = -ENOTCONN; if (sock_type_connectible(sk->sk_type)) goto out; } else { sock->state = SS_DISCONNECTING; err = 0; } /* Receive and send shutdowns are treated alike. */ mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); if (mode) { sk->sk_shutdown |= mode; sk->sk_state_change(sk); if (sock_type_connectible(sk->sk_type)) { sock_reset_flag(sk, SOCK_DONE); vsock_send_shutdown(sk, mode); } } out: release_sock(sk); return err; } static __poll_t vsock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk; __poll_t mask; struct vsock_sock *vsk; sk = sock->sk; vsk = vsock_sk(sk); poll_wait(file, sk_sleep(sk), wait); mask = 0; if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) /* Signify that there has been an error on this socket. */ mask |= EPOLLERR; /* INET sockets treat local write shutdown and peer write shutdown as a * case of EPOLLHUP set. */ if ((sk->sk_shutdown == SHUTDOWN_MASK) || ((sk->sk_shutdown & SEND_SHUTDOWN) && (vsk->peer_shutdown & SEND_SHUTDOWN))) { mask |= EPOLLHUP; } if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { mask |= EPOLLRDHUP; } if (sk_is_readable(sk)) mask |= EPOLLIN | EPOLLRDNORM; if (sock->type == SOCK_DGRAM) { /* For datagram sockets we can read if there is something in * the queue and write as long as the socket isn't shutdown for * sending. */ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) { mask |= EPOLLIN | EPOLLRDNORM; } if (!(sk->sk_shutdown & SEND_SHUTDOWN)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; } else if (sock_type_connectible(sk->sk_type)) { const struct vsock_transport *transport; lock_sock(sk); transport = vsk->transport; /* Listening sockets that have connections in their accept * queue can be read. */ if (sk->sk_state == TCP_LISTEN && !vsock_is_accept_queue_empty(sk)) mask |= EPOLLIN | EPOLLRDNORM; /* If there is something in the queue then we can read. */ if (transport && transport->stream_is_active(vsk) && !(sk->sk_shutdown & RCV_SHUTDOWN)) { bool data_ready_now = false; int target = sock_rcvlowat(sk, 0, INT_MAX); int ret = transport->notify_poll_in( vsk, target, &data_ready_now); if (ret < 0) { mask |= EPOLLERR; } else { if (data_ready_now) mask |= EPOLLIN | EPOLLRDNORM; } } /* Sockets whose connections have been closed, reset, or * terminated should also be considered read, and we check the * shutdown flag for that. */ if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { mask |= EPOLLIN | EPOLLRDNORM; } /* Connected sockets that can produce data can be written. */ if (transport && sk->sk_state == TCP_ESTABLISHED) { if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { bool space_avail_now = false; int ret = transport->notify_poll_out( vsk, 1, &space_avail_now); if (ret < 0) { mask |= EPOLLERR; } else { if (space_avail_now) /* Remove EPOLLWRBAND since INET * sockets are not setting it. */ mask |= EPOLLOUT | EPOLLWRNORM; } } } /* Simulate INET socket poll behaviors, which sets * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read, * but local send is not shutdown. */ if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { if (!(sk->sk_shutdown & SEND_SHUTDOWN)) mask |= EPOLLOUT | EPOLLWRNORM; } release_sock(sk); } return mask; } static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor) { struct vsock_sock *vsk = vsock_sk(sk); if (WARN_ON_ONCE(!vsk->transport)) return -ENODEV; return vsk->transport->read_skb(vsk, read_actor); } static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; const struct vsock_transport *transport; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* For now, MSG_DONTWAIT is always assumed... */ err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); transport = vsk->transport; err = vsock_auto_bind(vsk); if (err) goto out; /* If the provided message contains an address, use that. Otherwise * fall back on the socket's remote handle (if it has been connected). */ if (msg->msg_name && vsock_addr_cast(msg->msg_name, msg->msg_namelen, &remote_addr) == 0) { /* Ensure this address is of the right type and is a valid * destination. */ if (remote_addr->svm_cid == VMADDR_CID_ANY) remote_addr->svm_cid = transport->get_local_cid(); if (!vsock_addr_bound(remote_addr)) { err = -EINVAL; goto out; } } else if (sock->state == SS_CONNECTED) { remote_addr = &vsk->remote_addr; if (remote_addr->svm_cid == VMADDR_CID_ANY) remote_addr->svm_cid = transport->get_local_cid(); /* XXX Should connect() or this function ensure remote_addr is * bound? */ if (!vsock_addr_bound(&vsk->remote_addr)) { err = -EINVAL; goto out; } } else { err = -EINVAL; goto out; } if (!transport->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -EINVAL; goto out; } err = transport->dgram_enqueue(vsk, remote_addr, msg, len); out: release_sock(sk); return err; } static int vsock_dgram_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; sk = sock->sk; vsk = vsock_sk(sk); err = vsock_addr_cast(addr, addr_len, &remote_addr); if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) { lock_sock(sk); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); sock->state = SS_UNCONNECTED; release_sock(sk); return 0; } else if (err != 0) return -EINVAL; lock_sock(sk); err = vsock_auto_bind(vsk); if (err) goto out; if (!vsk->transport->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -EINVAL; goto out; } memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); sock->state = SS_CONNECTED; /* sock map disallows redirection of non-TCP sockets with sk_state != * TCP_ESTABLISHED (see sock_map_redirect_allowed()), so we set * TCP_ESTABLISHED here to allow redirection of connected vsock dgrams. * * This doesn't seem to be abnormal state for datagram sockets, as the * same approach can be see in other datagram socket types as well * (such as unix sockets). */ sk->sk_state = TCP_ESTABLISHED; out: release_sock(sk); return err; } int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct vsock_sock *vsk = vsock_sk(sk); return vsk->transport->dgram_dequeue(vsk, msg, len, flags); } int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { #ifdef CONFIG_BPF_SYSCALL struct sock *sk = sock->sk; const struct proto *prot; prot = READ_ONCE(sk->sk_prot); if (prot != &vsock_proto) return prot->recvmsg(sk, msg, len, flags, NULL); #endif return __vsock_dgram_recvmsg(sock, msg, len, flags); } EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg); static int vsock_do_ioctl(struct socket *sock, unsigned int cmd, int __user *arg) { struct sock *sk = sock->sk; struct vsock_sock *vsk; int ret; vsk = vsock_sk(sk); switch (cmd) { case SIOCOUTQ: { ssize_t n_bytes; if (!vsk->transport || !vsk->transport->unsent_bytes) { ret = -EOPNOTSUPP; break; } if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) { ret = -EINVAL; break; } n_bytes = vsk->transport->unsent_bytes(vsk); if (n_bytes < 0) { ret = n_bytes; break; } ret = put_user(n_bytes, arg); break; } default: ret = -ENOIOCTLCMD; } return ret; } static int vsock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int ret; lock_sock(sock->sk); ret = vsock_do_ioctl(sock, cmd, (int __user *)arg); release_sock(sock->sk); return ret; } static const struct proto_ops vsock_dgram_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = vsock_ioctl, .listen = sock_no_listen, .shutdown = vsock_shutdown, .sendmsg = vsock_dgram_sendmsg, .recvmsg = vsock_dgram_recvmsg, .mmap = sock_no_mmap, .read_skb = vsock_read_skb, }; static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) { const struct vsock_transport *transport = vsk->transport; if (!transport || !transport->cancel_pkt) return -EOPNOTSUPP; return transport->cancel_pkt(vsk); } static void vsock_connect_timeout(struct work_struct *work) { struct sock *sk; struct vsock_sock *vsk; vsk = container_of(work, struct vsock_sock, connect_work.work); sk = sk_vsock(vsk); lock_sock(sk); if (sk->sk_state == TCP_SYN_SENT && (sk->sk_shutdown != SHUTDOWN_MASK)) { sk->sk_state = TCP_CLOSE; sk->sk_socket->state = SS_UNCONNECTED; sk->sk_err = ETIMEDOUT; sk_error_report(sk); vsock_transport_cancel_pkt(vsk); } release_sock(sk); sock_put(sk); } static int vsock_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags) { int err; struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; struct sockaddr_vm *remote_addr; long timeout; DEFINE_WAIT(wait); err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ switch (sock->state) { case SS_CONNECTED: err = -EISCONN; goto out; case SS_DISCONNECTING: err = -EINVAL; goto out; case SS_CONNECTING: /* This continues on so we can move sock into the SS_CONNECTED * state once the connection has completed (at which point err * will be set to zero also). Otherwise, we will either wait * for the connection or return -EALREADY should this be a * non-blocking call. */ err = -EALREADY; if (flags & O_NONBLOCK) goto out; break; default: if ((sk->sk_state == TCP_LISTEN) || vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { err = -EINVAL; goto out; } /* Set the remote address that we are connecting to. */ memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); err = vsock_assign_transport(vsk, NULL); if (err) goto out; transport = vsk->transport; /* The hypervisor and well-known contexts do not have socket * endpoints. */ if (!transport || !transport->stream_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -ENETUNREACH; goto out; } if (vsock_msgzerocopy_allow(transport)) { set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); } else if (sock_flag(sk, SOCK_ZEROCOPY)) { /* If this option was set before 'connect()', * when transport was unknown, check that this * feature is supported here. */ err = -EOPNOTSUPP; goto out; } err = vsock_auto_bind(vsk); if (err) goto out; sk->sk_state = TCP_SYN_SENT; err = transport->connect(vsk); if (err < 0) goto out; /* sk_err might have been set as a result of an earlier * (failed) connect attempt. */ sk->sk_err = 0; /* Mark sock as connecting and set the error code to in * progress in case this is a non-blocking connect. */ sock->state = SS_CONNECTING; err = -EINPROGRESS; } /* The receive path will handle all communication until we are able to * enter the connected state. Here we wait for the connection to be * completed or a notification of an error. */ timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* If the socket is already closing or it is in an error state, there * is no point in waiting. */ while (sk->sk_state != TCP_ESTABLISHED && sk->sk_state != TCP_CLOSING && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection * attempt, in case the peer doesn't respond in a * timely manner. We hold on to the socket until the * timeout fires. */ sock_hold(sk); /* If the timeout function is already scheduled, * reschedule it, then ungrab the socket refcount to * keep it balanced. */ if (mod_delayed_work(system_wq, &vsk->connect_work, timeout)) sock_put(sk); /* Skip ahead to preserve error code set above. */ goto out_wait; } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); vsock_remove_connected(vsk); goto out_wait; } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) { err = -ETIMEDOUT; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); goto out_wait; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } if (sk->sk_err) { err = -sk->sk_err; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; } else { err = 0; } out_wait: finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; } static int vsock_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct sock *listener; int err; struct sock *connected; struct vsock_sock *vconnected; long timeout; DEFINE_WAIT(wait); err = 0; listener = sock->sk; lock_sock(listener); if (!sock_type_connectible(sock->type)) { err = -EOPNOTSUPP; goto out; } if (listener->sk_state != TCP_LISTEN) { err = -EINVAL; goto out; } /* Wait for children sockets to appear; these are the new sockets * created upon connection establishment. */ timeout = sock_rcvtimeo(listener, arg->flags & O_NONBLOCK); prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); while ((connected = vsock_dequeue_accept(listener)) == NULL && listener->sk_err == 0) { release_sock(listener); timeout = schedule_timeout(timeout); finish_wait(sk_sleep(listener), &wait); lock_sock(listener); if (signal_pending(current)) { err = sock_intr_errno(timeout); goto out; } else if (timeout == 0) { err = -EAGAIN; goto out; } prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(listener), &wait); if (listener->sk_err) err = -listener->sk_err; if (connected) { sk_acceptq_removed(listener); lock_sock_nested(connected, SINGLE_DEPTH_NESTING); vconnected = vsock_sk(connected); /* If the listener socket has received an error, then we should * reject this socket and return. Note that we simply mark the * socket rejected, drop our reference, and let the cleanup * function handle the cleanup; the fact that we found it in * the listener's accept queue guarantees that the cleanup * function hasn't run yet. */ if (err) { vconnected->rejected = true; } else { newsock->state = SS_CONNECTED; sock_graft(connected, newsock); if (vsock_msgzerocopy_allow(vconnected->transport)) set_bit(SOCK_SUPPORT_ZC, &connected->sk_socket->flags); } release_sock(connected); sock_put(connected); } out: release_sock(listener); return err; } static int vsock_listen(struct socket *sock, int backlog) { int err; struct sock *sk; struct vsock_sock *vsk; sk = sock->sk; lock_sock(sk); if (!sock_type_connectible(sk->sk_type)) { err = -EOPNOTSUPP; goto out; } if (sock->state != SS_UNCONNECTED) { err = -EINVAL; goto out; } vsk = vsock_sk(sk); if (!vsock_addr_bound(&vsk->local_addr)) { err = -EINVAL; goto out; } sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; err = 0; out: release_sock(sk); return err; } static void vsock_update_buffer_size(struct vsock_sock *vsk, const struct vsock_transport *transport, u64 val) { if (val > vsk->buffer_max_size) val = vsk->buffer_max_size; if (val < vsk->buffer_min_size) val = vsk->buffer_min_size; if (val != vsk->buffer_size && transport && transport->notify_buffer_size) transport->notify_buffer_size(vsk, &val); vsk->buffer_size = val; } static int vsock_connectible_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { int err; struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; u64 val; if (level != AF_VSOCK && level != SOL_SOCKET) return -ENOPROTOOPT; #define COPY_IN(_v) \ do { \ if (optlen < sizeof(_v)) { \ err = -EINVAL; \ goto exit; \ } \ if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \ err = -EFAULT; \ goto exit; \ } \ } while (0) err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); transport = vsk->transport; if (level == SOL_SOCKET) { int zerocopy; if (optname != SO_ZEROCOPY) { release_sock(sk); return sock_setsockopt(sock, level, optname, optval, optlen); } /* Use 'int' type here, because variable to * set this option usually has this type. */ COPY_IN(zerocopy); if (zerocopy < 0 || zerocopy > 1) { err = -EINVAL; goto exit; } if (transport && !vsock_msgzerocopy_allow(transport)) { err = -EOPNOTSUPP; goto exit; } sock_valbool_flag(sk, SOCK_ZEROCOPY, zerocopy); goto exit; } switch (optname) { case SO_VM_SOCKETS_BUFFER_SIZE: COPY_IN(val); vsock_update_buffer_size(vsk, transport, val); break; case SO_VM_SOCKETS_BUFFER_MAX_SIZE: COPY_IN(val); vsk->buffer_max_size = val; vsock_update_buffer_size(vsk, transport, vsk->buffer_size); break; case SO_VM_SOCKETS_BUFFER_MIN_SIZE: COPY_IN(val); vsk->buffer_min_size = val; vsock_update_buffer_size(vsk, transport, vsk->buffer_size); break; case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: { struct __kernel_sock_timeval tv; err = sock_copy_user_timeval(&tv, optval, optlen, optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); if (err) break; if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { vsk->connect_timeout = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ)); if (vsk->connect_timeout == 0) vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; } else { err = -ERANGE; } break; } default: err = -ENOPROTOOPT; break; } #undef COPY_IN exit: release_sock(sk); return err; } static int vsock_connectible_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct vsock_sock *vsk = vsock_sk(sk); union { u64 val64; struct old_timeval32 tm32; struct __kernel_old_timeval tm; struct __kernel_sock_timeval stm; } v; int lv = sizeof(v.val64); int len; if (level != AF_VSOCK) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; memset(&v, 0, sizeof(v)); switch (optname) { case SO_VM_SOCKETS_BUFFER_SIZE: v.val64 = vsk->buffer_size; break; case SO_VM_SOCKETS_BUFFER_MAX_SIZE: v.val64 = vsk->buffer_max_size; break; case SO_VM_SOCKETS_BUFFER_MIN_SIZE: v.val64 = vsk->buffer_min_size; break; case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: lv = sock_get_timeout(vsk->connect_timeout, &v, optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); break; default: return -ENOPROTOOPT; } if (len < lv) return -EINVAL; if (len > lv) len = lv; if (copy_to_user(optval, &v, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; ssize_t total_written; long timeout; int err; struct vsock_transport_send_notify_data send_data; DEFINE_WAIT_FUNC(wait, woken_wake_function); sk = sock->sk; vsk = vsock_sk(sk); total_written = 0; err = 0; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); transport = vsk->transport; /* Callers should not provide a destination with connection oriented * sockets. */ if (msg->msg_namelen) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out; } /* Send data only if both sides are not shutdown in the direction. */ if (sk->sk_shutdown & SEND_SHUTDOWN || vsk->peer_shutdown & RCV_SHUTDOWN) { err = -EPIPE; goto out; } if (!transport || sk->sk_state != TCP_ESTABLISHED || !vsock_addr_bound(&vsk->local_addr)) { err = -ENOTCONN; goto out; } if (!vsock_addr_bound(&vsk->remote_addr)) { err = -EDESTADDRREQ; goto out; } if (msg->msg_flags & MSG_ZEROCOPY && !vsock_msgzerocopy_allow(transport)) { err = -EOPNOTSUPP; goto out; } /* Wait for room in the produce queue to enqueue our user's data. */ timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); err = transport->notify_send_init(vsk, &send_data); if (err < 0) goto out; while (total_written < len) { ssize_t written; add_wait_queue(sk_sleep(sk), &wait); while (vsock_stream_has_space(vsk) == 0 && sk->sk_err == 0 && !(sk->sk_shutdown & SEND_SHUTDOWN) && !(vsk->peer_shutdown & RCV_SHUTDOWN)) { /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } err = transport->notify_send_pre_block(vsk, &send_data); if (err < 0) { remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } release_sock(sk); timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } else if (timeout == 0) { err = -EAGAIN; remove_wait_queue(sk_sleep(sk), &wait); goto out_err; } } remove_wait_queue(sk_sleep(sk), &wait); /* These checks occur both as part of and after the loop * conditional since we need to check before and after * sleeping. */ if (sk->sk_err) { err = -sk->sk_err; goto out_err; } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || (vsk->peer_shutdown & RCV_SHUTDOWN)) { err = -EPIPE; goto out_err; } err = transport->notify_send_pre_enqueue(vsk, &send_data); if (err < 0) goto out_err; /* Note that enqueue will only write as many bytes as are free * in the produce queue, so we don't need to ensure len is * smaller than the queue size. It is the caller's * responsibility to check how many bytes we were able to send. */ if (sk->sk_type == SOCK_SEQPACKET) { written = transport->seqpacket_enqueue(vsk, msg, len - total_written); } else { written = transport->stream_enqueue(vsk, msg, len - total_written); } if (written < 0) { err = written; goto out_err; } total_written += written; err = transport->notify_send_post_enqueue( vsk, written, &send_data); if (err < 0) goto out_err; } out_err: if (total_written > 0) { /* Return number of written bytes only if: * 1) SOCK_STREAM socket. * 2) SOCK_SEQPACKET socket when whole buffer is sent. */ if (sk->sk_type == SOCK_STREAM || total_written == len) err = total_written; } out: if (sk->sk_type == SOCK_STREAM) err = sk_stream_error(sk, msg->msg_flags, err); release_sock(sk); return err; } static int vsock_connectible_wait_data(struct sock *sk, struct wait_queue_entry *wait, long timeout, struct vsock_transport_recv_notify_data *recv_data, size_t target) { const struct vsock_transport *transport; struct vsock_sock *vsk; s64 data; int err; vsk = vsock_sk(sk); err = 0; transport = vsk->transport; while (1) { prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE); data = vsock_connectible_has_data(vsk); if (data != 0) break; if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) || (vsk->peer_shutdown & SEND_SHUTDOWN)) { break; } /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; break; } if (recv_data) { err = transport->notify_recv_pre_block(vsk, target, recv_data); if (err < 0) break; } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); break; } else if (timeout == 0) { err = -EAGAIN; break; } } finish_wait(sk_sleep(sk), wait); if (err) return err; /* Internal transport error when checking for available * data. XXX This should be changed to a connection * reset in a later change. */ if (data < 0) return -ENOMEM; return data; } static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags) { struct vsock_transport_recv_notify_data recv_data; const struct vsock_transport *transport; struct vsock_sock *vsk; ssize_t copied; size_t target; long timeout; int err; DEFINE_WAIT(wait); vsk = vsock_sk(sk); transport = vsk->transport; /* We must not copy less than target bytes into the user's buffer * before returning successfully, so we wait for the consume queue to * have that much data to consume before dequeueing. Note that this * makes it impossible to handle cases where target is greater than the * queue size. */ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (target >= transport->stream_rcvhiwat(vsk)) { err = -ENOMEM; goto out; } timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); copied = 0; err = transport->notify_recv_init(vsk, target, &recv_data); if (err < 0) goto out; while (1) { ssize_t read; err = vsock_connectible_wait_data(sk, &wait, timeout, &recv_data, target); if (err <= 0) break; err = transport->notify_recv_pre_dequeue(vsk, target, &recv_data); if (err < 0) break; read = transport->stream_dequeue(vsk, msg, len - copied, flags); if (read < 0) { err = read; break; } copied += read; err = transport->notify_recv_post_dequeue(vsk, target, read, !(flags & MSG_PEEK), &recv_data); if (err < 0) goto out; if (read >= target || flags & MSG_PEEK) break; target -= read; } if (sk->sk_err) err = -sk->sk_err; else if (sk->sk_shutdown & RCV_SHUTDOWN) err = 0; if (copied > 0) err = copied; out: return err; } static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags) { const struct vsock_transport *transport; struct vsock_sock *vsk; ssize_t msg_len; long timeout; int err = 0; DEFINE_WAIT(wait); vsk = vsock_sk(sk); transport = vsk->transport; timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0); if (err <= 0) goto out; msg_len = transport->seqpacket_dequeue(vsk, msg, flags); if (msg_len < 0) { err = msg_len; goto out; } if (sk->sk_err) { err = -sk->sk_err; } else if (sk->sk_shutdown & RCV_SHUTDOWN) { err = 0; } else { /* User sets MSG_TRUNC, so return real length of * packet. */ if (flags & MSG_TRUNC) err = msg_len; else err = len - msg_data_left(msg); /* Always set MSG_TRUNC if real length of packet is * bigger than user's buffer. */ if (msg_len > len) msg->msg_flags |= MSG_TRUNC; } out: return err; } int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk; struct vsock_sock *vsk; const struct vsock_transport *transport; int err; sk = sock->sk; if (unlikely(flags & MSG_ERRQUEUE)) return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR); vsk = vsock_sk(sk); err = 0; lock_sock(sk); transport = vsk->transport; if (!transport || sk->sk_state != TCP_ESTABLISHED) { /* Recvmsg is supposed to return 0 if a peer performs an * orderly shutdown. Differentiate between that case and when a * peer has not connected or a local shutdown occurred with the * SOCK_DONE flag. */ if (sock_flag(sk, SOCK_DONE)) err = 0; else err = -ENOTCONN; goto out; } if (flags & MSG_OOB) { err = -EOPNOTSUPP; goto out; } /* We don't check peer_shutdown flag here since peer may actually shut * down, but there can be data in the queue that a local socket can * receive. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { err = 0; goto out; } /* It is valid on Linux to pass in a zero-length receive buffer. This * is not an error. We may as well bail out now. */ if (!len) { err = 0; goto out; } if (sk->sk_type == SOCK_STREAM) err = __vsock_stream_recvmsg(sk, msg, len, flags); else err = __vsock_seqpacket_recvmsg(sk, msg, len, flags); out: release_sock(sk); return err; } int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { #ifdef CONFIG_BPF_SYSCALL struct sock *sk = sock->sk; const struct proto *prot; prot = READ_ONCE(sk->sk_prot); if (prot != &vsock_proto) return prot->recvmsg(sk, msg, len, flags, NULL); #endif return __vsock_connectible_recvmsg(sock, msg, len, flags); } EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg); static int vsock_set_rcvlowat(struct sock *sk, int val) { const struct vsock_transport *transport; struct vsock_sock *vsk; vsk = vsock_sk(sk); if (val > vsk->buffer_size) return -EINVAL; transport = vsk->transport; if (transport && transport->notify_set_rcvlowat) { int err; err = transport->notify_set_rcvlowat(vsk, val); if (err) return err; } WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); return 0; } static const struct proto_ops vsock_stream_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_connect, .socketpair = sock_no_socketpair, .accept = vsock_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = vsock_ioctl, .listen = vsock_listen, .shutdown = vsock_shutdown, .setsockopt = vsock_connectible_setsockopt, .getsockopt = vsock_connectible_getsockopt, .sendmsg = vsock_connectible_sendmsg, .recvmsg = vsock_connectible_recvmsg, .mmap = sock_no_mmap, .set_rcvlowat = vsock_set_rcvlowat, .read_skb = vsock_read_skb, }; static const struct proto_ops vsock_seqpacket_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_connect, .socketpair = sock_no_socketpair, .accept = vsock_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = vsock_ioctl, .listen = vsock_listen, .shutdown = vsock_shutdown, .setsockopt = vsock_connectible_setsockopt, .getsockopt = vsock_connectible_getsockopt, .sendmsg = vsock_connectible_sendmsg, .recvmsg = vsock_connectible_recvmsg, .mmap = sock_no_mmap, .read_skb = vsock_read_skb, }; static int vsock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct vsock_sock *vsk; struct sock *sk; int ret; if (!sock) return -EINVAL; if (protocol && protocol != PF_VSOCK) return -EPROTONOSUPPORT; switch (sock->type) { case SOCK_DGRAM: sock->ops = &vsock_dgram_ops; break; case SOCK_STREAM: sock->ops = &vsock_stream_ops; break; case SOCK_SEQPACKET: sock->ops = &vsock_seqpacket_ops; break; default: return -ESOCKTNOSUPPORT; } sock->state = SS_UNCONNECTED; sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern); if (!sk) return -ENOMEM; vsk = vsock_sk(sk); if (sock->type == SOCK_DGRAM) { ret = vsock_assign_transport(vsk, NULL); if (ret < 0) { sock->sk = NULL; sock_put(sk); return ret; } } /* SOCK_DGRAM doesn't have 'setsockopt' callback set in its * proto_ops, so there is no handler for custom logic. */ if (sock_type_connectible(sock->type)) set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); vsock_insert_unbound(vsk); return 0; } static const struct net_proto_family vsock_family_ops = { .family = AF_VSOCK, .create = vsock_create, .owner = THIS_MODULE, }; static long vsock_dev_do_ioctl(struct file *filp, unsigned int cmd, void __user *ptr) { u32 __user *p = ptr; int retval = 0; u32 cid; switch (cmd) { case IOCTL_VM_SOCKETS_GET_LOCAL_CID: /* To be compatible with the VMCI behavior, we prioritize the * guest CID instead of well-know host CID (VMADDR_CID_HOST). */ cid = vsock_registered_transport_cid(&transport_g2h); if (cid == VMADDR_CID_ANY) cid = vsock_registered_transport_cid(&transport_h2g); if (cid == VMADDR_CID_ANY) cid = vsock_registered_transport_cid(&transport_local); if (put_user(cid, p) != 0) retval = -EFAULT; break; default: retval = -ENOIOCTLCMD; } return retval; } static long vsock_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT static long vsock_dev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg)); } #endif static const struct file_operations vsock_device_ops = { .owner = THIS_MODULE, .unlocked_ioctl = vsock_dev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vsock_dev_compat_ioctl, #endif .open = nonseekable_open, }; static struct miscdevice vsock_device = { .name = "vsock", .fops = &vsock_device_ops, }; static int __init vsock_init(void) { int err = 0; vsock_init_tables(); vsock_proto.owner = THIS_MODULE; vsock_device.minor = MISC_DYNAMIC_MINOR; err = misc_register(&vsock_device); if (err) { pr_err("Failed to register misc device\n"); goto err_reset_transport; } err = proto_register(&vsock_proto, 1); /* we want our slab */ if (err) { pr_err("Cannot register vsock protocol\n"); goto err_deregister_misc; } err = sock_register(&vsock_family_ops); if (err) { pr_err("could not register af_vsock (%d) address family: %d\n", AF_VSOCK, err); goto err_unregister_proto; } vsock_bpf_build_proto(); return 0; err_unregister_proto: proto_unregister(&vsock_proto); err_deregister_misc: misc_deregister(&vsock_device); err_reset_transport: return err; } static void __exit vsock_exit(void) { misc_deregister(&vsock_device); sock_unregister(AF_VSOCK); proto_unregister(&vsock_proto); } const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk) { return vsk->transport; } EXPORT_SYMBOL_GPL(vsock_core_get_transport); int vsock_core_register(const struct vsock_transport *t, int features) { const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local; int err = mutex_lock_interruptible(&vsock_register_mutex); if (err) return err; t_h2g = transport_h2g; t_g2h = transport_g2h; t_dgram = transport_dgram; t_local = transport_local; if (features & VSOCK_TRANSPORT_F_H2G) { if (t_h2g) { err = -EBUSY; goto err_busy; } t_h2g = t; } if (features & VSOCK_TRANSPORT_F_G2H) { if (t_g2h) { err = -EBUSY; goto err_busy; } t_g2h = t; } if (features & VSOCK_TRANSPORT_F_DGRAM) { if (t_dgram) { err = -EBUSY; goto err_busy; } t_dgram = t; } if (features & VSOCK_TRANSPORT_F_LOCAL) { if (t_local) { err = -EBUSY; goto err_busy; } t_local = t; } transport_h2g = t_h2g; transport_g2h = t_g2h; transport_dgram = t_dgram; transport_local = t_local; err_busy: mutex_unlock(&vsock_register_mutex); return err; } EXPORT_SYMBOL_GPL(vsock_core_register); void vsock_core_unregister(const struct vsock_transport *t) { mutex_lock(&vsock_register_mutex); if (transport_h2g == t) transport_h2g = NULL; if (transport_g2h == t) transport_g2h = NULL; if (transport_dgram == t) transport_dgram = NULL; if (transport_local == t) transport_local = NULL; mutex_unlock(&vsock_register_mutex); } EXPORT_SYMBOL_GPL(vsock_core_unregister); module_init(vsock_init); module_exit(vsock_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Socket Family"); MODULE_VERSION("1.0.2.0-k"); MODULE_LICENSE("GPL v2"); |
| 3 6 6 5 2 5 5 1 1 1 1 2 2 9 3 3 3 4 4 7 6 5 6 2 6 4 4 4 4 2 4 2 4 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | /* * FUSE: Filesystem in Userspace * Copyright (C) 2016 Canonical Ltd. <seth.forshee@canonical.com> * * This program can be distributed under the terms of the GNU GPL. * See the file COPYING. */ #include "fuse_i.h" #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc, struct inode *inode, int type, bool rcu) { int size; const char *name; void *value = NULL; struct posix_acl *acl; if (rcu) return ERR_PTR(-ECHILD); if (fuse_is_bad(inode)) return ERR_PTR(-EIO); if (fc->no_getxattr) return NULL; if (type == ACL_TYPE_ACCESS) name = XATTR_NAME_POSIX_ACL_ACCESS; else if (type == ACL_TYPE_DEFAULT) name = XATTR_NAME_POSIX_ACL_DEFAULT; else return ERR_PTR(-EOPNOTSUPP); value = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); size = fuse_getxattr(inode, name, value, PAGE_SIZE); if (size > 0) acl = posix_acl_from_xattr(fc->user_ns, value, size); else if ((size == 0) || (size == -ENODATA) || (size == -EOPNOTSUPP && fc->no_getxattr)) acl = NULL; else if (size == -ERANGE) acl = ERR_PTR(-E2BIG); else acl = ERR_PTR(size); kfree(value); return acl; } static inline bool fuse_no_acl(const struct fuse_conn *fc, const struct inode *inode) { /* * Refuse interacting with POSIX ACLs for daemons that * don't support FUSE_POSIX_ACL and are not mounted on * the host to retain backwards compatibility. */ return !fc->posix_acl && (i_user_ns(inode) != &init_user_ns); } struct posix_acl *fuse_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, int type) { struct inode *inode = d_inode(dentry); struct fuse_conn *fc = get_fuse_conn(inode); if (fuse_no_acl(fc, inode)) return ERR_PTR(-EOPNOTSUPP); return __fuse_get_acl(fc, inode, type, false); } struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu) { struct fuse_conn *fc = get_fuse_conn(inode); /* * FUSE daemons before FUSE_POSIX_ACL was introduced could get and set * POSIX ACLs without them being used for permission checking by the * vfs. Retain that behavior for backwards compatibility as there are * filesystems that do all permission checking for acls in the daemon * and not in the kernel. */ if (!fc->posix_acl) return NULL; return __fuse_get_acl(fc, inode, type, rcu); } int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type) { struct inode *inode = d_inode(dentry); struct fuse_conn *fc = get_fuse_conn(inode); const char *name; int ret; if (fuse_is_bad(inode)) return -EIO; if (fc->no_setxattr || fuse_no_acl(fc, inode)) return -EOPNOTSUPP; if (type == ACL_TYPE_ACCESS) name = XATTR_NAME_POSIX_ACL_ACCESS; else if (type == ACL_TYPE_DEFAULT) name = XATTR_NAME_POSIX_ACL_DEFAULT; else return -EINVAL; if (acl) { unsigned int extra_flags = 0; /* * Fuse userspace is responsible for updating access * permissions in the inode, if needed. fuse_setxattr * invalidates the inode attributes, which will force * them to be refreshed the next time they are used, * and it also updates i_ctime. */ size_t size = posix_acl_xattr_size(acl->a_count); void *value; if (size > PAGE_SIZE) return -E2BIG; value = kmalloc(size, GFP_KERNEL); if (!value) return -ENOMEM; ret = posix_acl_to_xattr(fc->user_ns, acl, value, size); if (ret < 0) { kfree(value); return ret; } /* * Fuse daemons without FUSE_POSIX_ACL never changed the passed * through POSIX ACLs. Such daemons don't expect setgid bits to * be stripped. */ if (fc->posix_acl && !in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode))) extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID; ret = fuse_setxattr(inode, name, value, size, 0, extra_flags); kfree(value); } else { ret = fuse_removexattr(inode, name); } if (fc->posix_acl) { /* * Fuse daemons without FUSE_POSIX_ACL never cached POSIX ACLs * and didn't invalidate attributes. Retain that behavior. */ forget_all_cached_acls(inode); fuse_invalidate_attr(inode); } return ret; } |
| 2057 2061 2061 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * netprio_cgroup.h Control Group Priority set * * Authors: Neil Horman <nhorman@tuxdriver.com> */ #ifndef _NETPRIO_CGROUP_H #define _NETPRIO_CGROUP_H #include <linux/cgroup.h> #include <linux/hardirq.h> #include <linux/rcupdate.h> #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) struct netprio_map { struct rcu_head rcu; u32 priomap_len; u32 priomap[]; }; static inline u32 task_netprioidx(struct task_struct *p) { struct cgroup_subsys_state *css; u32 idx; rcu_read_lock(); css = task_css(p, net_prio_cgrp_id); idx = css->id; rcu_read_unlock(); return idx; } static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd) { if (in_interrupt()) return; sock_cgroup_set_prioidx(skcd, task_netprioidx(current)); } #else /* !CONFIG_CGROUP_NET_PRIO */ static inline u32 task_netprioidx(struct task_struct *p) { return 0; } static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd) { } #endif /* CONFIG_CGROUP_NET_PRIO */ #endif /* _NET_CLS_CGROUP_H */ |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __FS_CEPH_BUFFER_H #define __FS_CEPH_BUFFER_H #include <linux/kref.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/types.h> #include <linux/uio.h> /* * a simple reference counted buffer. * * use kmalloc for smaller sizes, vmalloc for larger sizes. */ struct ceph_buffer { struct kref kref; struct kvec vec; size_t alloc_len; }; extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp); extern void ceph_buffer_release(struct kref *kref); static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) { kref_get(&b->kref); return b; } static inline void ceph_buffer_put(struct ceph_buffer *b) { if (b) kref_put(&b->kref, ceph_buffer_release); } extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); #endif |
| 157 156 157 157 157 155 154 157 157 159 157 160 160 160 160 159 160 160 3 3 3 157 157 160 160 159 159 159 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 | // SPDX-License-Identifier: GPL-2.0-only /* * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and * Shaohua Li <shli@fb.com> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/init.h> #include "null_blk.h" #undef pr_fmt #define pr_fmt(fmt) "null_blk: " fmt #define FREE_BATCH 16 #define TICKS_PER_SEC 50ULL #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static DECLARE_FAULT_ATTR(null_timeout_attr); static DECLARE_FAULT_ATTR(null_requeue_attr); static DECLARE_FAULT_ATTR(null_init_hctx_attr); #endif static inline u64 mb_per_tick(int mbps) { return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); } /* * Status flags for nullb_device. * * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. * UP: Device is currently on and visible in userspace. * THROTTLED: Device is being throttled. * CACHE: Device is using a write-back cache. */ enum nullb_device_flags { NULLB_DEV_FL_CONFIGURED = 0, NULLB_DEV_FL_UP = 1, NULLB_DEV_FL_THROTTLED = 2, NULLB_DEV_FL_CACHE = 3, }; #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) /* * nullb_page is a page in memory for nullb devices. * * @page: The page holding the data. * @bitmap: The bitmap represents which sector in the page has data. * Each bit represents one block size. For example, sector 8 * will use the 7th bit * The highest 2 bits of bitmap are for special purpose. LOCK means the cache * page is being flushing to storage. FREE means the cache page is freed and * should be skipped from flushing to storage. Please see * null_make_cache_space */ struct nullb_page { struct page *page; DECLARE_BITMAP(bitmap, MAP_SZ); }; #define NULLB_PAGE_LOCK (MAP_SZ - 1) #define NULLB_PAGE_FREE (MAP_SZ - 2) static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; static DEFINE_IDA(nullb_indexes); static struct blk_mq_tag_set tag_set; enum { NULL_IRQ_NONE = 0, NULL_IRQ_SOFTIRQ = 1, NULL_IRQ_TIMER = 2, }; static bool g_virt_boundary; module_param_named(virt_boundary, g_virt_boundary, bool, 0444); MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False"); static int g_no_sched; module_param_named(no_sched, g_no_sched, int, 0444); MODULE_PARM_DESC(no_sched, "No io scheduler"); static int g_submit_queues = 1; module_param_named(submit_queues, g_submit_queues, int, 0444); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); static int g_poll_queues = 1; module_param_named(poll_queues, g_poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues"); static int g_home_node = NUMA_NO_NODE; module_param_named(home_node, g_home_node, int, 0444); MODULE_PARM_DESC(home_node, "Home node for the device"); #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION /* * For more details about fault injection, please refer to * Documentation/fault-injection/fault-injection.rst. */ static char g_timeout_str[80]; module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444); MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>"); static char g_requeue_str[80]; module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444); MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>"); static char g_init_hctx_str[80]; module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444); MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>"); #endif /* * Historic queue modes. * * These days nothing but NULL_Q_MQ is actually supported, but we keep it the * enum for error reporting. */ enum { NULL_Q_BIO = 0, NULL_Q_RQ = 1, NULL_Q_MQ = 2, }; static int g_queue_mode = NULL_Q_MQ; static int null_param_store_val(const char *str, int *val, int min, int max) { int ret, new_val; ret = kstrtoint(str, 10, &new_val); if (ret) return -EINVAL; if (new_val < min || new_val > max) return -EINVAL; *val = new_val; return 0; } static int null_set_queue_mode(const char *str, const struct kernel_param *kp) { return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); } static const struct kernel_param_ops null_queue_mode_param_ops = { .set = null_set_queue_mode, .get = param_get_int, }; device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444); MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); static int g_gb = 250; module_param_named(gb, g_gb, int, 0444); MODULE_PARM_DESC(gb, "Size in GB"); static int g_bs = 512; module_param_named(bs, g_bs, int, 0444); MODULE_PARM_DESC(bs, "Block size (in bytes)"); static int g_max_sectors; module_param_named(max_sectors, g_max_sectors, int, 0444); MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)"); static unsigned int nr_devices = 1; module_param(nr_devices, uint, 0444); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); static bool g_blocking; module_param_named(blocking, g_blocking, bool, 0444); MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); static bool g_shared_tags; module_param_named(shared_tags, g_shared_tags, bool, 0444); MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); static bool g_shared_tag_bitmap; module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444); MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq"); static int g_irqmode = NULL_IRQ_SOFTIRQ; static int null_set_irqmode(const char *str, const struct kernel_param *kp) { return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, NULL_IRQ_TIMER); } static const struct kernel_param_ops null_irqmode_param_ops = { .set = null_set_irqmode, .get = param_get_int, }; device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444); MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); static unsigned long g_completion_nsec = 10000; module_param_named(completion_nsec, g_completion_nsec, ulong, 0444); MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); static int g_hw_queue_depth = 64; module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); static bool g_use_per_node_hctx; module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444); MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); static bool g_memory_backed; module_param_named(memory_backed, g_memory_backed, bool, 0444); MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false"); static bool g_discard; module_param_named(discard, g_discard, bool, 0444); MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false"); static unsigned long g_cache_size; module_param_named(cache_size, g_cache_size, ulong, 0444); MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)"); static bool g_fua = true; module_param_named(fua, g_fua, bool, 0444); MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true"); static unsigned int g_mbps; module_param_named(mbps, g_mbps, uint, 0444); MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)"); static bool g_zoned; module_param_named(zoned, g_zoned, bool, S_IRUGO); MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false"); static unsigned long g_zone_size = 256; module_param_named(zone_size, g_zone_size, ulong, S_IRUGO); MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256"); static unsigned long g_zone_capacity; module_param_named(zone_capacity, g_zone_capacity, ulong, 0444); MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size"); static unsigned int g_zone_nr_conv; module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444); MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0"); static unsigned int g_zone_max_open; module_param_named(zone_max_open, g_zone_max_open, uint, 0444); MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)"); static unsigned int g_zone_max_active; module_param_named(zone_max_active, g_zone_max_active, uint, 0444); MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)"); static int g_zone_append_max_sectors = INT_MAX; module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444); MODULE_PARM_DESC(zone_append_max_sectors, "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation"); static bool g_zone_full; module_param_named(zone_full, g_zone_full, bool, S_IRUGO); MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false"); static bool g_rotational; module_param_named(rotational, g_rotational, bool, S_IRUGO); MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false"); static struct nullb_device *null_alloc_dev(void); static void null_free_dev(struct nullb_device *dev); static void null_del_dev(struct nullb *nullb); static int null_add_dev(struct nullb_device *dev); static struct nullb *null_find_dev_by_name(const char *name); static void null_free_device_storage(struct nullb_device *dev, bool is_cache); static inline struct nullb_device *to_nullb_device(struct config_item *item) { return item ? container_of(to_config_group(item), struct nullb_device, group) : NULL; } static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", val); } static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, char *page) { return snprintf(page, PAGE_SIZE, "%lu\n", val); } static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", val); } static ssize_t nullb_device_uint_attr_store(unsigned int *val, const char *page, size_t count) { unsigned int tmp; int result; result = kstrtouint(page, 0, &tmp); if (result < 0) return result; *val = tmp; return count; } static ssize_t nullb_device_ulong_attr_store(unsigned long *val, const char *page, size_t count) { int result; unsigned long tmp; result = kstrtoul(page, 0, &tmp); if (result < 0) return result; *val = tmp; return count; } static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, size_t count) { bool tmp; int result; result = kstrtobool(page, &tmp); if (result < 0) return result; *val = tmp; return count; } /* The following macro should only be used with TYPE = {uint, ulong, bool}. */ #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \ static ssize_t \ nullb_device_##NAME##_show(struct config_item *item, char *page) \ { \ return nullb_device_##TYPE##_attr_show( \ to_nullb_device(item)->NAME, page); \ } \ static ssize_t \ nullb_device_##NAME##_store(struct config_item *item, const char *page, \ size_t count) \ { \ int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\ struct nullb_device *dev = to_nullb_device(item); \ TYPE new_value = 0; \ int ret; \ \ ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\ if (ret < 0) \ return ret; \ if (apply_fn) \ ret = apply_fn(dev, new_value); \ else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \ ret = -EBUSY; \ if (ret < 0) \ return ret; \ dev->NAME = new_value; \ return count; \ } \ CONFIGFS_ATTR(nullb_device_, NAME); static int nullb_update_nr_hw_queues(struct nullb_device *dev, unsigned int submit_queues, unsigned int poll_queues) { struct blk_mq_tag_set *set; int ret, nr_hw_queues; if (!dev->nullb) return 0; /* * Make sure at least one submit queue exists. */ if (!submit_queues) return -EINVAL; /* * Make sure that null_init_hctx() does not access nullb->queues[] past * the end of that array. */ if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues) return -EINVAL; /* * Keep previous and new queue numbers in nullb_device for reference in * the call back function null_map_queues(). */ dev->prev_submit_queues = dev->submit_queues; dev->prev_poll_queues = dev->poll_queues; dev->submit_queues = submit_queues; dev->poll_queues = poll_queues; set = dev->nullb->tag_set; nr_hw_queues = submit_queues + poll_queues; blk_mq_update_nr_hw_queues(set, nr_hw_queues); ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM; if (ret) { /* on error, revert the queue numbers */ dev->submit_queues = dev->prev_submit_queues; dev->poll_queues = dev->prev_poll_queues; } return ret; } static int nullb_apply_submit_queues(struct nullb_device *dev, unsigned int submit_queues) { int ret; mutex_lock(&lock); ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); mutex_unlock(&lock); return ret; } static int nullb_apply_poll_queues(struct nullb_device *dev, unsigned int poll_queues) { int ret; mutex_lock(&lock); ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); mutex_unlock(&lock); return ret; } NULLB_DEVICE_ATTR(size, ulong, NULL); NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL); NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues); NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues); NULLB_DEVICE_ATTR(home_node, uint, NULL); NULLB_DEVICE_ATTR(queue_mode, uint, NULL); NULLB_DEVICE_ATTR(blocksize, uint, NULL); NULLB_DEVICE_ATTR(max_sectors, uint, NULL); NULLB_DEVICE_ATTR(irqmode, uint, NULL); NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL); NULLB_DEVICE_ATTR(index, uint, NULL); NULLB_DEVICE_ATTR(blocking, bool, NULL); NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL); NULLB_DEVICE_ATTR(memory_backed, bool, NULL); NULLB_DEVICE_ATTR(discard, bool, NULL); NULLB_DEVICE_ATTR(mbps, uint, NULL); NULLB_DEVICE_ATTR(cache_size, ulong, NULL); NULLB_DEVICE_ATTR(zoned, bool, NULL); NULLB_DEVICE_ATTR(zone_size, ulong, NULL); NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL); NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL); NULLB_DEVICE_ATTR(zone_full, bool, NULL); NULLB_DEVICE_ATTR(virt_boundary, bool, NULL); NULLB_DEVICE_ATTR(no_sched, bool, NULL); NULLB_DEVICE_ATTR(shared_tags, bool, NULL); NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL); NULLB_DEVICE_ATTR(fua, bool, NULL); NULLB_DEVICE_ATTR(rotational, bool, NULL); NULLB_DEVICE_ATTR(badblocks_once, bool, NULL); NULLB_DEVICE_ATTR(badblocks_partial_io, bool, NULL); static ssize_t nullb_device_power_show(struct config_item *item, char *page) { return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); } static ssize_t nullb_device_power_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *dev = to_nullb_device(item); bool newp = false; ssize_t ret; ret = nullb_device_bool_attr_store(&newp, page, count); if (ret < 0) return ret; ret = count; mutex_lock(&lock); if (!dev->power && newp) { if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) goto out; ret = null_add_dev(dev); if (ret) { clear_bit(NULLB_DEV_FL_UP, &dev->flags); goto out; } set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); dev->power = newp; ret = count; } else if (dev->power && !newp) { if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { dev->power = newp; null_del_dev(dev->nullb); } clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); } out: mutex_unlock(&lock); return ret; } CONFIGFS_ATTR(nullb_device_, power); static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page) { struct nullb_device *t_dev = to_nullb_device(item); return badblocks_show(&t_dev->badblocks, page, 0); } static ssize_t nullb_device_badblocks_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *t_dev = to_nullb_device(item); char *orig, *buf, *tmp; u64 start, end; int ret; orig = kstrndup(page, count, GFP_KERNEL); if (!orig) return -ENOMEM; buf = strstrip(orig); ret = -EINVAL; if (buf[0] != '+' && buf[0] != '-') goto out; tmp = strchr(&buf[1], '-'); if (!tmp) goto out; *tmp = '\0'; ret = kstrtoull(buf + 1, 0, &start); if (ret) goto out; ret = kstrtoull(tmp + 1, 0, &end); if (ret) goto out; ret = -EINVAL; if (start > end) goto out; /* enable badblocks */ cmpxchg(&t_dev->badblocks.shift, -1, 0); if (buf[0] == '+') { if (badblocks_set(&t_dev->badblocks, start, end - start + 1, 1)) ret = count; } else if (badblocks_clear(&t_dev->badblocks, start, end - start + 1)) { ret = count; } out: kfree(orig); return ret; } CONFIGFS_ATTR(nullb_device_, badblocks); static ssize_t nullb_device_zone_readonly_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *dev = to_nullb_device(item); return zone_cond_store(dev, page, count, BLK_ZONE_COND_READONLY); } CONFIGFS_ATTR_WO(nullb_device_, zone_readonly); static ssize_t nullb_device_zone_offline_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *dev = to_nullb_device(item); return zone_cond_store(dev, page, count, BLK_ZONE_COND_OFFLINE); } CONFIGFS_ATTR_WO(nullb_device_, zone_offline); static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_badblocks, &nullb_device_attr_badblocks_once, &nullb_device_attr_badblocks_partial_io, &nullb_device_attr_blocking, &nullb_device_attr_blocksize, &nullb_device_attr_cache_size, &nullb_device_attr_completion_nsec, &nullb_device_attr_discard, &nullb_device_attr_fua, &nullb_device_attr_home_node, &nullb_device_attr_hw_queue_depth, &nullb_device_attr_index, &nullb_device_attr_irqmode, &nullb_device_attr_max_sectors, &nullb_device_attr_mbps, &nullb_device_attr_memory_backed, &nullb_device_attr_no_sched, &nullb_device_attr_poll_queues, &nullb_device_attr_power, &nullb_device_attr_queue_mode, &nullb_device_attr_rotational, &nullb_device_attr_shared_tag_bitmap, &nullb_device_attr_shared_tags, &nullb_device_attr_size, &nullb_device_attr_submit_queues, &nullb_device_attr_use_per_node_hctx, &nullb_device_attr_virt_boundary, &nullb_device_attr_zone_append_max_sectors, &nullb_device_attr_zone_capacity, &nullb_device_attr_zone_full, &nullb_device_attr_zone_max_active, &nullb_device_attr_zone_max_open, &nullb_device_attr_zone_nr_conv, &nullb_device_attr_zone_offline, &nullb_device_attr_zone_readonly, &nullb_device_attr_zone_size, &nullb_device_attr_zoned, NULL, }; static void nullb_device_release(struct config_item *item) { struct nullb_device *dev = to_nullb_device(item); null_free_device_storage(dev, false); null_free_dev(dev); } static struct configfs_item_operations nullb_device_ops = { .release = nullb_device_release, }; static const struct config_item_type nullb_device_type = { .ct_item_ops = &nullb_device_ops, .ct_attrs = nullb_device_attrs, .ct_owner = THIS_MODULE, }; #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static void nullb_add_fault_config(struct nullb_device *dev) { fault_config_init(&dev->timeout_config, "timeout_inject"); fault_config_init(&dev->requeue_config, "requeue_inject"); fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject"); configfs_add_default_group(&dev->timeout_config.group, &dev->group); configfs_add_default_group(&dev->requeue_config.group, &dev->group); configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group); } #else static void nullb_add_fault_config(struct nullb_device *dev) { } #endif static struct config_group *nullb_group_make_group(struct config_group *group, const char *name) { struct nullb_device *dev; if (null_find_dev_by_name(name)) return ERR_PTR(-EEXIST); dev = null_alloc_dev(); if (!dev) return ERR_PTR(-ENOMEM); config_group_init_type_name(&dev->group, name, &nullb_device_type); nullb_add_fault_config(dev); return &dev->group; } static void nullb_group_drop_item(struct config_group *group, struct config_item *item) { struct nullb_device *dev = to_nullb_device(item); if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { mutex_lock(&lock); dev->power = false; null_del_dev(dev->nullb); mutex_unlock(&lock); } config_item_put(item); } static ssize_t memb_group_features_show(struct config_item *item, char *page) { struct configfs_attribute **entry; char delimiter = ','; size_t left = PAGE_SIZE; size_t written = 0; int ret; for (entry = &nullb_device_attrs[0]; *entry && left > 0; entry++) { if (!*(entry + 1)) delimiter = '\n'; ret = snprintf(page + written, left, "%s%c", (*entry)->ca_name, delimiter); if (ret >= left) { WARN_ONCE(1, "Too many null_blk features to print\n"); memzero_explicit(page, PAGE_SIZE); return -ENOBUFS; } left -= ret; written += ret; } return written; } CONFIGFS_ATTR_RO(memb_group_, features); static struct configfs_attribute *nullb_group_attrs[] = { &memb_group_attr_features, NULL, }; static struct configfs_group_operations nullb_group_ops = { .make_group = nullb_group_make_group, .drop_item = nullb_group_drop_item, }; static const struct config_item_type nullb_group_type = { .ct_group_ops = &nullb_group_ops, .ct_attrs = nullb_group_attrs, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem nullb_subsys = { .su_group = { .cg_item = { .ci_namebuf = "nullb", .ci_type = &nullb_group_type, }, }, }; static inline int null_cache_active(struct nullb *nullb) { return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); } static struct nullb_device *null_alloc_dev(void) { struct nullb_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION dev->timeout_config.attr = null_timeout_attr; dev->requeue_config.attr = null_requeue_attr; dev->init_hctx_fault_config.attr = null_init_hctx_attr; #endif INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); if (badblocks_init(&dev->badblocks, 0)) { kfree(dev); return NULL; } dev->size = g_gb * 1024; dev->completion_nsec = g_completion_nsec; dev->submit_queues = g_submit_queues; dev->prev_submit_queues = g_submit_queues; dev->poll_queues = g_poll_queues; dev->prev_poll_queues = g_poll_queues; dev->home_node = g_home_node; dev->queue_mode = g_queue_mode; dev->blocksize = g_bs; dev->max_sectors = g_max_sectors; dev->irqmode = g_irqmode; dev->hw_queue_depth = g_hw_queue_depth; dev->blocking = g_blocking; dev->memory_backed = g_memory_backed; dev->discard = g_discard; dev->cache_size = g_cache_size; dev->mbps = g_mbps; dev->use_per_node_hctx = g_use_per_node_hctx; dev->zoned = g_zoned; dev->zone_size = g_zone_size; dev->zone_capacity = g_zone_capacity; dev->zone_nr_conv = g_zone_nr_conv; dev->zone_max_open = g_zone_max_open; dev->zone_max_active = g_zone_max_active; dev->zone_append_max_sectors = g_zone_append_max_sectors; dev->zone_full = g_zone_full; dev->virt_boundary = g_virt_boundary; dev->no_sched = g_no_sched; dev->shared_tags = g_shared_tags; dev->shared_tag_bitmap = g_shared_tag_bitmap; dev->fua = g_fua; dev->rotational = g_rotational; return dev; } static void null_free_dev(struct nullb_device *dev) { if (!dev) return; null_free_zoned_dev(dev); badblocks_exit(&dev->badblocks); kfree(dev); } static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) { struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer); blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error); return HRTIMER_NORESTART; } static void null_cmd_end_timer(struct nullb_cmd *cmd) { ktime_t kt = cmd->nq->dev->completion_nsec; hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); } static void null_complete_rq(struct request *rq) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); blk_mq_end_request(rq, cmd->error); } static struct nullb_page *null_alloc_page(void) { struct nullb_page *t_page; t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO); if (!t_page) return NULL; t_page->page = alloc_pages(GFP_NOIO, 0); if (!t_page->page) { kfree(t_page); return NULL; } memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); return t_page; } static void null_free_page(struct nullb_page *t_page) { __set_bit(NULLB_PAGE_FREE, t_page->bitmap); if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) return; __free_page(t_page->page); kfree(t_page); } static bool null_page_empty(struct nullb_page *page) { int size = MAP_SZ - 2; return find_first_bit(page->bitmap, size) == size; } static void null_free_sector(struct nullb *nullb, sector_t sector, bool is_cache) { unsigned int sector_bit; u64 idx; struct nullb_page *t_page, *ret; struct radix_tree_root *root; root = is_cache ? &nullb->dev->cache : &nullb->dev->data; idx = sector >> PAGE_SECTORS_SHIFT; sector_bit = (sector & SECTOR_MASK); t_page = radix_tree_lookup(root, idx); if (t_page) { __clear_bit(sector_bit, t_page->bitmap); if (null_page_empty(t_page)) { ret = radix_tree_delete_item(root, idx, t_page); WARN_ON(ret != t_page); null_free_page(ret); if (is_cache) nullb->dev->curr_cache -= PAGE_SIZE; } } } static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, struct nullb_page *t_page, bool is_cache) { struct radix_tree_root *root; root = is_cache ? &nullb->dev->cache : &nullb->dev->data; if (radix_tree_insert(root, idx, t_page)) { null_free_page(t_page); t_page = radix_tree_lookup(root, idx); WARN_ON(!t_page || t_page->page->private != idx); } else if (is_cache) nullb->dev->curr_cache += PAGE_SIZE; return t_page; } static void null_free_device_storage(struct nullb_device *dev, bool is_cache) { unsigned long pos = 0; int nr_pages; struct nullb_page *ret, *t_pages[FREE_BATCH]; struct radix_tree_root *root; root = is_cache ? &dev->cache : &dev->data; do { int i; nr_pages = radix_tree_gang_lookup(root, (void **)t_pages, pos, FREE_BATCH); for (i = 0; i < nr_pages; i++) { pos = t_pages[i]->page->private; ret = radix_tree_delete_item(root, pos, t_pages[i]); WARN_ON(ret != t_pages[i]); null_free_page(ret); } pos++; } while (nr_pages == FREE_BATCH); if (is_cache) dev->curr_cache = 0; } static struct nullb_page *__null_lookup_page(struct nullb *nullb, sector_t sector, bool for_write, bool is_cache) { unsigned int sector_bit; u64 idx; struct nullb_page *t_page; struct radix_tree_root *root; idx = sector >> PAGE_SECTORS_SHIFT; sector_bit = (sector & SECTOR_MASK); root = is_cache ? &nullb->dev->cache : &nullb->dev->data; t_page = radix_tree_lookup(root, idx); WARN_ON(t_page && t_page->page->private != idx); if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) return t_page; return NULL; } static struct nullb_page *null_lookup_page(struct nullb *nullb, sector_t sector, bool for_write, bool ignore_cache) { struct nullb_page *page = NULL; if (!ignore_cache) page = __null_lookup_page(nullb, sector, for_write, true); if (page) return page; return __null_lookup_page(nullb, sector, for_write, false); } static struct nullb_page *null_insert_page(struct nullb *nullb, sector_t sector, bool ignore_cache) __releases(&nullb->lock) __acquires(&nullb->lock) { u64 idx; struct nullb_page *t_page; t_page = null_lookup_page(nullb, sector, true, ignore_cache); if (t_page) return t_page; spin_unlock_irq(&nullb->lock); t_page = null_alloc_page(); if (!t_page) goto out_lock; if (radix_tree_preload(GFP_NOIO)) goto out_freepage; spin_lock_irq(&nullb->lock); idx = sector >> PAGE_SECTORS_SHIFT; t_page->page->private = idx; t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); radix_tree_preload_end(); return t_page; out_freepage: null_free_page(t_page); out_lock: spin_lock_irq(&nullb->lock); return null_lookup_page(nullb, sector, true, ignore_cache); } static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) { int i; unsigned int offset; u64 idx; struct nullb_page *t_page, *ret; void *dst, *src; idx = c_page->page->private; t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { null_free_page(c_page); if (t_page && null_page_empty(t_page)) { ret = radix_tree_delete_item(&nullb->dev->data, idx, t_page); null_free_page(t_page); } return 0; } if (!t_page) return -ENOMEM; src = kmap_local_page(c_page->page); dst = kmap_local_page(t_page->page); for (i = 0; i < PAGE_SECTORS; i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { if (test_bit(i, c_page->bitmap)) { offset = (i << SECTOR_SHIFT); memcpy(dst + offset, src + offset, nullb->dev->blocksize); __set_bit(i, t_page->bitmap); } } kunmap_local(dst); kunmap_local(src); ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); null_free_page(ret); nullb->dev->curr_cache -= PAGE_SIZE; return 0; } static int null_make_cache_space(struct nullb *nullb, unsigned long n) { int i, err, nr_pages; struct nullb_page *c_pages[FREE_BATCH]; unsigned long flushed = 0, one_round; again: if ((nullb->dev->cache_size * 1024 * 1024) > nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) return 0; nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); /* * nullb_flush_cache_page could unlock before using the c_pages. To * avoid race, we don't allow page free */ for (i = 0; i < nr_pages; i++) { nullb->cache_flush_pos = c_pages[i]->page->private; /* * We found the page which is being flushed to disk by other * threads */ if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) c_pages[i] = NULL; else __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); } one_round = 0; for (i = 0; i < nr_pages; i++) { if (c_pages[i] == NULL) continue; err = null_flush_cache_page(nullb, c_pages[i]); if (err) return err; one_round++; } flushed += one_round << PAGE_SHIFT; if (n > flushed) { if (nr_pages == 0) nullb->cache_flush_pos = 0; if (one_round == 0) { /* give other threads a chance */ spin_unlock_irq(&nullb->lock); spin_lock_irq(&nullb->lock); } goto again; } return 0; } static int copy_to_nullb(struct nullb *nullb, struct page *source, unsigned int off, sector_t sector, size_t n, bool is_fua) { size_t temp, count = 0; unsigned int offset; struct nullb_page *t_page; while (count < n) { temp = min_t(size_t, nullb->dev->blocksize, n - count); if (null_cache_active(nullb) && !is_fua) null_make_cache_space(nullb, PAGE_SIZE); offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; t_page = null_insert_page(nullb, sector, !null_cache_active(nullb) || is_fua); if (!t_page) return -ENOSPC; memcpy_page(t_page->page, offset, source, off + count, temp); __set_bit(sector & SECTOR_MASK, t_page->bitmap); if (is_fua) null_free_sector(nullb, sector, true); count += temp; sector += temp >> SECTOR_SHIFT; } return 0; } static int copy_from_nullb(struct nullb *nullb, struct page *dest, unsigned int off, sector_t sector, size_t n) { size_t temp, count = 0; unsigned int offset; struct nullb_page *t_page; while (count < n) { temp = min_t(size_t, nullb->dev->blocksize, n - count); offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; t_page = null_lookup_page(nullb, sector, false, !null_cache_active(nullb)); if (t_page) memcpy_page(dest, off + count, t_page->page, offset, temp); else zero_user(dest, off + count, temp); count += temp; sector += temp >> SECTOR_SHIFT; } return 0; } static void nullb_fill_pattern(struct nullb *nullb, struct page *page, unsigned int len, unsigned int off) { memset_page(page, off, 0xff, len); } blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector, sector_t nr_sectors) { struct nullb *nullb = dev->nullb; size_t n = nr_sectors << SECTOR_SHIFT; size_t temp; spin_lock_irq(&nullb->lock); while (n > 0) { temp = min_t(size_t, n, dev->blocksize); null_free_sector(nullb, sector, false); if (null_cache_active(nullb)) null_free_sector(nullb, sector, true); sector += temp >> SECTOR_SHIFT; n -= temp; } spin_unlock_irq(&nullb->lock); return BLK_STS_OK; } static blk_status_t null_handle_flush(struct nullb *nullb) { int err; if (!null_cache_active(nullb)) return 0; spin_lock_irq(&nullb->lock); while (true) { err = null_make_cache_space(nullb, nullb->dev->cache_size * 1024 * 1024); if (err || nullb->dev->curr_cache == 0) break; } WARN_ON(!radix_tree_empty(&nullb->dev->cache)); spin_unlock_irq(&nullb->lock); return errno_to_blk_status(err); } static int null_transfer(struct nullb *nullb, struct page *page, unsigned int len, unsigned int off, bool is_write, sector_t sector, bool is_fua) { struct nullb_device *dev = nullb->dev; unsigned int valid_len = len; int err = 0; if (!is_write) { if (dev->zoned) valid_len = null_zone_valid_read_len(nullb, sector, len); if (valid_len) { err = copy_from_nullb(nullb, page, off, sector, valid_len); off += valid_len; len -= valid_len; } if (len) nullb_fill_pattern(nullb, page, len, off); flush_dcache_page(page); } else { flush_dcache_page(page); err = copy_to_nullb(nullb, page, off, sector, len, is_fua); } return err; } /* * Transfer data for the given request. The transfer size is capped with the * nr_sectors argument. */ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd, sector_t nr_sectors) { struct request *rq = blk_mq_rq_from_pdu(cmd); struct nullb *nullb = cmd->nq->dev->nullb; int err = 0; unsigned int len; sector_t sector = blk_rq_pos(rq); unsigned int max_bytes = nr_sectors << SECTOR_SHIFT; unsigned int transferred_bytes = 0; struct req_iterator iter; struct bio_vec bvec; spin_lock_irq(&nullb->lock); rq_for_each_segment(bvec, rq, iter) { len = bvec.bv_len; if (transferred_bytes + len > max_bytes) len = max_bytes - transferred_bytes; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, rq->cmd_flags & REQ_FUA); if (err) break; sector += len >> SECTOR_SHIFT; transferred_bytes += len; if (transferred_bytes >= max_bytes) break; } spin_unlock_irq(&nullb->lock); return errno_to_blk_status(err); } static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; blk_status_t sts = BLK_STS_OK; struct request *rq = blk_mq_rq_from_pdu(cmd); if (!hrtimer_active(&nullb->bw_timer)) hrtimer_restart(&nullb->bw_timer); if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { blk_mq_stop_hw_queues(nullb->q); /* race with timer */ if (atomic_long_read(&nullb->cur_bytes) > 0) blk_mq_start_stopped_hw_queues(nullb->q, true); /* requeue request */ sts = BLK_STS_DEV_RESOURCE; } return sts; } /* * Check if the command should fail for the badblocks. If so, return * BLK_STS_IOERR and return number of partial I/O sectors to be written or read, * which may be less than the requested number of sectors. * * @cmd: The command to handle. * @sector: The start sector for I/O. * @nr_sectors: Specifies number of sectors to write or read, and returns the * number of sectors to be written or read. */ blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector, unsigned int *nr_sectors) { struct badblocks *bb = &cmd->nq->dev->badblocks; struct nullb_device *dev = cmd->nq->dev; unsigned int block_sectors = dev->blocksize >> SECTOR_SHIFT; sector_t first_bad, bad_sectors; unsigned int partial_io_sectors = 0; if (!badblocks_check(bb, sector, *nr_sectors, &first_bad, &bad_sectors)) return BLK_STS_OK; if (cmd->nq->dev->badblocks_once) badblocks_clear(bb, first_bad, bad_sectors); if (cmd->nq->dev->badblocks_partial_io) { if (!IS_ALIGNED(first_bad, block_sectors)) first_bad = ALIGN_DOWN(first_bad, block_sectors); if (sector < first_bad) partial_io_sectors = first_bad - sector; } *nr_sectors = partial_io_sectors; return BLK_STS_IOERR; } blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) { struct nullb_device *dev = cmd->nq->dev; if (op == REQ_OP_DISCARD) return null_handle_discard(dev, sector, nr_sectors); return null_handle_data_transfer(cmd, nr_sectors); } static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); struct nullb_device *dev = cmd->nq->dev; struct bio *bio; if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) { __rq_for_each_bio(bio, rq) zero_fill_bio(bio); } } static inline void nullb_complete_cmd(struct nullb_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); /* * Since root privileges are required to configure the null_blk * driver, it is fine that this driver does not initialize the * data buffers of read commands. Zero-initialize these buffers * anyway if KMSAN is enabled to prevent that KMSAN complains * about null_blk not initializing read data buffers. */ if (IS_ENABLED(CONFIG_KMSAN)) nullb_zero_read_cmd_buffer(cmd); /* Complete IO by inline, softirq or timer */ switch (cmd->nq->dev->irqmode) { case NULL_IRQ_SOFTIRQ: blk_mq_complete_request(rq); break; case NULL_IRQ_NONE: blk_mq_end_request(rq, cmd->error); break; case NULL_IRQ_TIMER: null_cmd_end_timer(cmd); break; } } blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, unsigned int nr_sectors) { struct nullb_device *dev = cmd->nq->dev; blk_status_t badblocks_ret = BLK_STS_OK; blk_status_t ret; if (dev->badblocks.shift != -1) badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors); if (dev->memory_backed && nr_sectors) { ret = null_handle_memory_backed(cmd, op, sector, nr_sectors); if (ret != BLK_STS_OK) return ret; } return badblocks_ret; } static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, sector_t nr_sectors, enum req_op op) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; blk_status_t sts; if (op == REQ_OP_FLUSH) { cmd->error = null_handle_flush(nullb); goto out; } if (dev->zoned) sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors); else sts = null_process_cmd(cmd, op, sector, nr_sectors); /* Do not overwrite errors (e.g. timeout errors) */ if (cmd->error == BLK_STS_OK) cmd->error = sts; out: nullb_complete_cmd(cmd); } static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) { struct nullb *nullb = container_of(timer, struct nullb, bw_timer); ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); unsigned int mbps = nullb->dev->mbps; if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) return HRTIMER_NORESTART; atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); blk_mq_start_stopped_hw_queues(nullb->q, true); hrtimer_forward_now(&nullb->bw_timer, timer_interval); return HRTIMER_RESTART; } static void nullb_setup_bwtimer(struct nullb *nullb) { ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL); atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); } #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static bool should_timeout_request(struct request *rq) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_device *dev = cmd->nq->dev; return should_fail(&dev->timeout_config.attr, 1); } static bool should_requeue_request(struct request *rq) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_device *dev = cmd->nq->dev; return should_fail(&dev->requeue_config.attr, 1); } static bool should_init_hctx_fail(struct nullb_device *dev) { return should_fail(&dev->init_hctx_fault_config.attr, 1); } #else static bool should_timeout_request(struct request *rq) { return false; } static bool should_requeue_request(struct request *rq) { return false; } static bool should_init_hctx_fail(struct nullb_device *dev) { return false; } #endif static void null_map_queues(struct blk_mq_tag_set *set) { struct nullb *nullb = set->driver_data; int i, qoff; unsigned int submit_queues = g_submit_queues; unsigned int poll_queues = g_poll_queues; if (nullb) { struct nullb_device *dev = nullb->dev; /* * Refer nr_hw_queues of the tag set to check if the expected * number of hardware queues are prepared. If block layer failed * to prepare them, use previous numbers of submit queues and * poll queues to map queues. */ if (set->nr_hw_queues == dev->submit_queues + dev->poll_queues) { submit_queues = dev->submit_queues; poll_queues = dev->poll_queues; } else if (set->nr_hw_queues == dev->prev_submit_queues + dev->prev_poll_queues) { submit_queues = dev->prev_submit_queues; poll_queues = dev->prev_poll_queues; } else { pr_warn("tag set has unexpected nr_hw_queues: %d\n", set->nr_hw_queues); WARN_ON_ONCE(true); submit_queues = 1; poll_queues = 0; } } for (i = 0, qoff = 0; i < set->nr_maps; i++) { struct blk_mq_queue_map *map = &set->map[i]; switch (i) { case HCTX_TYPE_DEFAULT: map->nr_queues = submit_queues; break; case HCTX_TYPE_READ: map->nr_queues = 0; continue; case HCTX_TYPE_POLL: map->nr_queues = poll_queues; break; } map->queue_offset = qoff; qoff += map->nr_queues; blk_mq_map_queues(map); } } static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nullb_queue *nq = hctx->driver_data; LIST_HEAD(list); int nr = 0; struct request *rq; spin_lock(&nq->poll_lock); list_splice_init(&nq->poll_list, &list); list_for_each_entry(rq, &list, queuelist) blk_mq_set_request_complete(rq); spin_unlock(&nq->poll_lock); while (!list_empty(&list)) { struct nullb_cmd *cmd; struct request *req; req = list_first_entry(&list, struct request, queuelist); list_del_init(&req->queuelist); cmd = blk_mq_rq_to_pdu(req); cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), blk_rq_sectors(req)); if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK, blk_mq_end_request_batch)) blk_mq_end_request(req, cmd->error); nr++; } return nr; } static enum blk_eh_timer_return null_timeout_rq(struct request *rq) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); if (hctx->type == HCTX_TYPE_POLL) { struct nullb_queue *nq = hctx->driver_data; spin_lock(&nq->poll_lock); /* The request may have completed meanwhile. */ if (blk_mq_request_completed(rq)) { spin_unlock(&nq->poll_lock); return BLK_EH_DONE; } list_del_init(&rq->queuelist); spin_unlock(&nq->poll_lock); } pr_info("rq %p timed out\n", rq); /* * If the device is marked as blocking (i.e. memory backed or zoned * device), the submission path may be blocked waiting for resources * and cause real timeouts. For these real timeouts, the submission * path will complete the request using blk_mq_complete_request(). * Only fake timeouts need to execute blk_mq_complete_request() here. */ cmd->error = BLK_STS_TIMEOUT; if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) blk_mq_complete_request(rq); return BLK_EH_DONE; } static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct request *rq = bd->rq; struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_queue *nq = hctx->driver_data; sector_t nr_sectors = blk_rq_sectors(rq); sector_t sector = blk_rq_pos(rq); const bool is_poll = hctx->type == HCTX_TYPE_POLL; might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) { hrtimer_setup(&cmd->timer, null_cmd_timer_expired, CLOCK_MONOTONIC, HRTIMER_MODE_REL); } cmd->error = BLK_STS_OK; cmd->nq = nq; cmd->fake_timeout = should_timeout_request(rq) || blk_should_fake_timeout(rq->q); if (should_requeue_request(rq)) { /* * Alternate between hitting the core BUSY path, and the * driver driven requeue path */ nq->requeue_selection++; if (nq->requeue_selection & 1) return BLK_STS_RESOURCE; blk_mq_requeue_request(rq, true); return BLK_STS_OK; } if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) { blk_status_t sts = null_handle_throttled(cmd); if (sts != BLK_STS_OK) return sts; } blk_mq_start_request(rq); if (is_poll) { spin_lock(&nq->poll_lock); list_add_tail(&rq->queuelist, &nq->poll_list); spin_unlock(&nq->poll_lock); return BLK_STS_OK; } if (cmd->fake_timeout) return BLK_STS_OK; null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); return BLK_STS_OK; } static void null_queue_rqs(struct rq_list *rqlist) { struct rq_list requeue_list = {}; struct blk_mq_queue_data bd = { }; blk_status_t ret; do { struct request *rq = rq_list_pop(rqlist); bd.rq = rq; ret = null_queue_rq(rq->mq_hctx, &bd); if (ret != BLK_STS_OK) rq_list_add_tail(&requeue_list, rq); } while (!rq_list_empty(rqlist)); *rqlist = requeue_list; } static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { nq->dev = nullb->dev; INIT_LIST_HEAD(&nq->poll_list); spin_lock_init(&nq->poll_lock); } static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, unsigned int hctx_idx) { struct nullb *nullb = hctx->queue->queuedata; struct nullb_queue *nq; if (should_init_hctx_fail(nullb->dev)) return -EFAULT; nq = &nullb->queues[hctx_idx]; hctx->driver_data = nq; null_init_queue(nullb, nq); return 0; } static const struct blk_mq_ops null_mq_ops = { .queue_rq = null_queue_rq, .queue_rqs = null_queue_rqs, .complete = null_complete_rq, .timeout = null_timeout_rq, .poll = null_poll, .map_queues = null_map_queues, .init_hctx = null_init_hctx, }; static void null_del_dev(struct nullb *nullb) { struct nullb_device *dev; if (!nullb) return; dev = nullb->dev; ida_free(&nullb_indexes, nullb->index); list_del_init(&nullb->list); del_gendisk(nullb->disk); if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { hrtimer_cancel(&nullb->bw_timer); atomic_long_set(&nullb->cur_bytes, LONG_MAX); blk_mq_start_stopped_hw_queues(nullb->q, true); } put_disk(nullb->disk); if (nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); kfree(nullb->queues); if (null_cache_active(nullb)) null_free_device_storage(nullb->dev, true); kfree(nullb); dev->nullb = NULL; } static void null_config_discard(struct nullb *nullb, struct queue_limits *lim) { if (nullb->dev->discard == false) return; if (!nullb->dev->memory_backed) { nullb->dev->discard = false; pr_info("discard option is ignored without memory backing\n"); return; } if (nullb->dev->zoned) { nullb->dev->discard = false; pr_info("discard option is ignored in zoned mode\n"); return; } lim->max_hw_discard_sectors = UINT_MAX >> 9; } static const struct block_device_operations null_ops = { .owner = THIS_MODULE, .report_zones = null_report_zones, }; static int setup_queues(struct nullb *nullb) { int nqueues = nr_cpu_ids; if (g_poll_queues) nqueues += g_poll_queues; nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue), GFP_KERNEL); if (!nullb->queues) return -ENOMEM; return 0; } static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues) { set->ops = &null_mq_ops; set->cmd_size = sizeof(struct nullb_cmd); set->timeout = 5 * HZ; set->nr_maps = 1; if (poll_queues) { set->nr_hw_queues += poll_queues; set->nr_maps += 2; } return blk_mq_alloc_tag_set(set); } static int null_init_global_tag_set(void) { int error; if (tag_set.ops) return 0; tag_set.nr_hw_queues = g_submit_queues; tag_set.queue_depth = g_hw_queue_depth; tag_set.numa_node = g_home_node; if (g_no_sched) tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT; if (g_shared_tag_bitmap) tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED; if (g_blocking) tag_set.flags |= BLK_MQ_F_BLOCKING; error = null_init_tag_set(&tag_set, g_poll_queues); if (error) tag_set.ops = NULL; return error; } static int null_setup_tagset(struct nullb *nullb) { if (nullb->dev->shared_tags) { nullb->tag_set = &tag_set; return null_init_global_tag_set(); } nullb->tag_set = &nullb->__tag_set; nullb->tag_set->driver_data = nullb; nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues; nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth; nullb->tag_set->numa_node = nullb->dev->home_node; if (nullb->dev->no_sched) nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT; if (nullb->dev->shared_tag_bitmap) nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; if (nullb->dev->blocking) nullb->tag_set->flags |= BLK_MQ_F_BLOCKING; return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues); } static int null_validate_conf(struct nullb_device *dev) { if (dev->queue_mode == NULL_Q_RQ) { pr_err("legacy IO path is no longer available\n"); return -EINVAL; } if (dev->queue_mode == NULL_Q_BIO) { pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n"); dev->queue_mode = NULL_Q_MQ; } if (dev->use_per_node_hctx) { if (dev->submit_queues != nr_online_nodes) dev->submit_queues = nr_online_nodes; } else if (dev->submit_queues > nr_cpu_ids) dev->submit_queues = nr_cpu_ids; else if (dev->submit_queues == 0) dev->submit_queues = 1; dev->prev_submit_queues = dev->submit_queues; if (dev->poll_queues > g_poll_queues) dev->poll_queues = g_poll_queues; dev->prev_poll_queues = dev->poll_queues; dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); /* Do memory allocation, so set blocking */ if (dev->memory_backed) dev->blocking = true; else /* cache is meaningless */ dev->cache_size = 0; dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, dev->cache_size); dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); if (dev->zoned && (!dev->zone_size || !is_power_of_2(dev->zone_size))) { pr_err("zone_size must be power-of-two\n"); return -EINVAL; } return 0; } #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static bool __null_setup_fault(struct fault_attr *attr, char *str) { if (!str[0]) return true; if (!setup_fault_attr(attr, str)) return false; attr->verbose = 0; return true; } #endif static bool null_setup_fault(void) { #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION if (!__null_setup_fault(&null_timeout_attr, g_timeout_str)) return false; if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) return false; if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str)) return false; #endif return true; } static int null_add_dev(struct nullb_device *dev) { struct queue_limits lim = { .logical_block_size = dev->blocksize, .physical_block_size = dev->blocksize, .max_hw_sectors = dev->max_sectors, }; struct nullb *nullb; int rv; rv = null_validate_conf(dev); if (rv) return rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); if (!nullb) { rv = -ENOMEM; goto out; } nullb->dev = dev; dev->nullb = nullb; spin_lock_init(&nullb->lock); rv = setup_queues(nullb); if (rv) goto out_free_nullb; rv = null_setup_tagset(nullb); if (rv) goto out_cleanup_queues; if (dev->virt_boundary) lim.virt_boundary_mask = PAGE_SIZE - 1; null_config_discard(nullb, &lim); if (dev->zoned) { rv = null_init_zoned_dev(dev, &lim); if (rv) goto out_cleanup_tags; } if (dev->cache_size > 0) { set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); lim.features |= BLK_FEAT_WRITE_CACHE; if (dev->fua) lim.features |= BLK_FEAT_FUA; } if (dev->rotational) lim.features |= BLK_FEAT_ROTATIONAL; nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb); if (IS_ERR(nullb->disk)) { rv = PTR_ERR(nullb->disk); goto out_cleanup_zone; } nullb->q = nullb->disk->queue; if (dev->mbps) { set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); nullb_setup_bwtimer(nullb); } nullb->q->queuedata = nullb; rv = ida_alloc(&nullb_indexes, GFP_KERNEL); if (rv < 0) goto out_cleanup_disk; nullb->index = rv; dev->index = rv; if (config_item_name(&dev->group.cg_item)) { /* Use configfs dir name as the device name */ snprintf(nullb->disk_name, sizeof(nullb->disk_name), "%s", config_item_name(&dev->group.cg_item)); } else { sprintf(nullb->disk_name, "nullb%d", nullb->index); } set_capacity(nullb->disk, ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT); nullb->disk->major = null_major; nullb->disk->first_minor = nullb->index; nullb->disk->minors = 1; nullb->disk->fops = &null_ops; nullb->disk->private_data = nullb; strscpy(nullb->disk->disk_name, nullb->disk_name); if (nullb->dev->zoned) { rv = null_register_zoned_dev(nullb); if (rv) goto out_ida_free; } rv = add_disk(nullb->disk); if (rv) goto out_ida_free; list_add_tail(&nullb->list, &nullb_list); pr_info("disk %s created\n", nullb->disk_name); return 0; out_ida_free: ida_free(&nullb_indexes, nullb->index); out_cleanup_disk: put_disk(nullb->disk); out_cleanup_zone: null_free_zoned_dev(dev); out_cleanup_tags: if (nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); out_cleanup_queues: kfree(nullb->queues); out_free_nullb: kfree(nullb); dev->nullb = NULL; out: return rv; } static struct nullb *null_find_dev_by_name(const char *name) { struct nullb *nullb = NULL, *nb; mutex_lock(&lock); list_for_each_entry(nb, &nullb_list, list) { if (strcmp(nb->disk_name, name) == 0) { nullb = nb; break; } } mutex_unlock(&lock); return nullb; } static int null_create_dev(void) { struct nullb_device *dev; int ret; dev = null_alloc_dev(); if (!dev) return -ENOMEM; mutex_lock(&lock); ret = null_add_dev(dev); mutex_unlock(&lock); if (ret) { null_free_dev(dev); return ret; } return 0; } static void null_destroy_dev(struct nullb *nullb) { struct nullb_device *dev = nullb->dev; null_del_dev(nullb); null_free_device_storage(dev, false); null_free_dev(dev); } static int __init null_init(void) { int ret = 0; unsigned int i; struct nullb *nullb; if (g_bs > PAGE_SIZE) { pr_warn("invalid block size\n"); pr_warn("defaults block size to %lu\n", PAGE_SIZE); g_bs = PAGE_SIZE; } if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { pr_err("invalid home_node value\n"); g_home_node = NUMA_NO_NODE; } if (!null_setup_fault()) return -EINVAL; if (g_queue_mode == NULL_Q_RQ) { pr_err("legacy IO path is no longer available\n"); return -EINVAL; } if (g_use_per_node_hctx) { if (g_submit_queues != nr_online_nodes) { pr_warn("submit_queues param is set to %u.\n", nr_online_nodes); g_submit_queues = nr_online_nodes; } } else if (g_submit_queues > nr_cpu_ids) { g_submit_queues = nr_cpu_ids; } else if (g_submit_queues <= 0) { g_submit_queues = 1; } config_group_init(&nullb_subsys.su_group); mutex_init(&nullb_subsys.su_mutex); ret = configfs_register_subsystem(&nullb_subsys); if (ret) return ret; mutex_init(&lock); null_major = register_blkdev(0, "nullb"); if (null_major < 0) { ret = null_major; goto err_conf; } for (i = 0; i < nr_devices; i++) { ret = null_create_dev(); if (ret) goto err_dev; } pr_info("module loaded\n"); return 0; err_dev: while (!list_empty(&nullb_list)) { nullb = list_entry(nullb_list.next, struct nullb, list); null_destroy_dev(nullb); } unregister_blkdev(null_major, "nullb"); err_conf: configfs_unregister_subsystem(&nullb_subsys); return ret; } static void __exit null_exit(void) { struct nullb *nullb; configfs_unregister_subsystem(&nullb_subsys); unregister_blkdev(null_major, "nullb"); mutex_lock(&lock); while (!list_empty(&nullb_list)) { nullb = list_entry(nullb_list.next, struct nullb, list); null_destroy_dev(nullb); } mutex_unlock(&lock); if (tag_set.ops) blk_mq_free_tag_set(&tag_set); mutex_destroy(&lock); } module_init(null_init); module_exit(null_exit); MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); MODULE_DESCRIPTION("multi queue aware block test driver"); MODULE_LICENSE("GPL"); |
| 2 2 2 2 2 2 2 6 6 7 7 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Syncookies implementation for the Linux kernel * * Copyright (C) 1997 Andi Kleen * Based on ideas by D.J.Bernstein and Eric Schenk. */ #include <linux/tcp.h> #include <linux/siphash.h> #include <linux/kernel.h> #include <linux/export.h> #include <net/secure_seq.h> #include <net/tcp.h> #include <net/route.h> static siphash_aligned_key_t syncookie_secret[2]; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) /* TCP Timestamp: 6 lowest bits of timestamp sent in the cookie SYN-ACK * stores TCP options: * * MSB LSB * | 31 ... 6 | 5 | 4 | 3 2 1 0 | * | Timestamp | ECN | SACK | WScale | * * When we receive a valid cookie-ACK, we look at the echoed tsval (if * any) to figure out which TCP options we should use for the rebuilt * connection. * * A WScale setting of '0xf' (which is an invalid scaling value) * means that original syn did not include the TCP window scaling option. */ #define TS_OPT_WSCALE_MASK 0xf #define TS_OPT_SACK BIT(4) #define TS_OPT_ECN BIT(5) /* There is no TS_OPT_TIMESTAMP: * if ACK contains timestamp option, we already know it was * requested/supported by the syn/synack exchange. */ #define TSBITS 6 static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 count, int c) { net_get_random_once(syncookie_secret, sizeof(syncookie_secret)); return siphash_4u32((__force u32)saddr, (__force u32)daddr, (__force u32)sport << 16 | (__force u32)dport, count, &syncookie_secret[c]); } /* * when syncookies are in effect and tcp timestamps are enabled we encode * tcp options in the lower bits of the timestamp value that will be * sent in the syn-ack. * Since subsequent timestamps use the normal tcp_time_stamp value, we * must make sure that the resulting initial timestamp is <= tcp_time_stamp. */ u64 cookie_init_timestamp(struct request_sock *req, u64 now) { const struct inet_request_sock *ireq = inet_rsk(req); u64 ts, ts_now = tcp_ns_to_ts(false, now); u32 options = 0; options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK; if (ireq->sack_ok) options |= TS_OPT_SACK; if (ireq->ecn_ok) options |= TS_OPT_ECN; ts = (ts_now >> TSBITS) << TSBITS; ts |= options; if (ts > ts_now) ts -= (1UL << TSBITS); if (tcp_rsk(req)->req_usec_ts) return ts * NSEC_PER_USEC; return ts * NSEC_PER_MSEC; } static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, __u32 sseq, __u32 data) { /* * Compute the secure sequence number. * The output should be: * HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24) * + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24). * Where sseq is their sequence number and count increases every * minute by 1. * As an extra hack, we add a small "data" value that encodes the * MSS into the second hash value. */ u32 count = tcp_cookie_time(); return (cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq + (count << COOKIEBITS) + ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data) & COOKIEMASK)); } /* * This retrieves the small "data" value from the syncookie. * If the syncookie is bad, the data returned will be out of * range. This must be checked by the caller. * * The count value used to generate the cookie must be less than * MAX_SYNCOOKIE_AGE minutes in the past. * The return value (__u32)-1 if this test fails. */ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, __be16 sport, __be16 dport, __u32 sseq) { u32 diff, count = tcp_cookie_time(); /* Strip away the layers from the cookie */ cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq; /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */ diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS); if (diff >= MAX_SYNCOOKIE_AGE) return (__u32)-1; return (cookie - cookie_hash(saddr, daddr, sport, dport, count - diff, 1)) & COOKIEMASK; /* Leaving the data behind */ } /* * MSS Values are chosen based on the 2011 paper * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson. * Values .. * .. lower than 536 are rare (< 0.2%) * .. between 537 and 1299 account for less than < 1.5% of observed values * .. in the 1300-1349 range account for about 15 to 20% of observed mss values * .. exceeding 1460 are very rare (< 0.04%) * * 1460 is the single most frequently announced mss value (30 to 46% depending * on monitor location). Table must be sorted. */ static __u16 const msstab[] = { 536, 1300, 1440, /* 1440, 1452: PPPoE */ 1460, }; /* * Generate a syncookie. mssp points to the mss, which is returned * rounded down to the value encoded in the cookie. */ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp) { int mssind; const __u16 mss = *mssp; for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) if (mss >= msstab[mssind]) break; *mssp = msstab[mssind]; return secure_tcp_syn_cookie(iph->saddr, iph->daddr, th->source, th->dest, ntohl(th->seq), mssind); } EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence); __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp) { const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); return __cookie_v4_init_sequence(iph, th, mssp); } /* * Check if a ack sequence number is a valid syncookie. * Return the decoded mss if it is, or 0 if not. */ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th) { __u32 cookie = ntohl(th->ack_seq) - 1; __u32 seq = ntohl(th->seq) - 1; __u32 mssind; mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr, th->source, th->dest, seq); return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; } EXPORT_SYMBOL_GPL(__cookie_v4_check); struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; bool own_req; child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, NULL, &own_req); if (child) { refcount_set(&req->rsk_refcnt, 1); sock_rps_save_rxhash(child, skb); if (rsk_drop_req(req)) { reqsk_put(req); return child; } if (inet_csk_reqsk_queue_add(sk, req, child)) return child; bh_unlock_sock(child); sock_put(child); } __reqsk_free(req); return NULL; } EXPORT_IPV6_MOD(tcp_get_cookie_sock); /* * when syncookies are in effect and tcp timestamps are enabled we stored * additional tcp options in the timestamp. * This extracts these options from the timestamp echo. * * return false if we decode a tcp option that is disabled * on the host. */ bool cookie_timestamp_decode(const struct net *net, struct tcp_options_received *tcp_opt) { /* echoed timestamp, lowest bits contain options */ u32 options = tcp_opt->rcv_tsecr; if (!tcp_opt->saw_tstamp) { tcp_clear_options(tcp_opt); return true; } if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps)) return false; tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0; if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack)) return false; if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK) return true; /* no window scaling */ tcp_opt->wscale_ok = 1; tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK; return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0; } EXPORT_IPV6_MOD(cookie_timestamp_decode); static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { struct inet_request_sock *ireq = inet_rsk(req); struct tcp_request_sock *treq = tcp_rsk(req); const struct tcphdr *th = tcp_hdr(skb); req->num_retrans = 0; ireq->ir_num = ntohs(th->dest); ireq->ir_rmt_port = th->source; ireq->ir_iif = inet_request_bound_dev_if(sk, skb); ireq->ir_mark = inet_request_mark(sk, skb); if (IS_ENABLED(CONFIG_SMC)) ireq->smc_ok = 0; treq->snt_synack = 0; treq->snt_tsval_first = 0; treq->tfo_listener = false; treq->txhash = net_tx_rndhash(); treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = ntohl(th->ack_seq) - 1; treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield; treq->req_usec_ts = false; #if IS_ENABLED(CONFIG_MPTCP) treq->is_mptcp = sk_is_mptcp(sk); if (treq->is_mptcp) return mptcp_subflow_init_cookie_req(req, sk, skb); #endif return 0; } #if IS_ENABLED(CONFIG_BPF) struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb) { struct request_sock *req = inet_reqsk(skb->sk); skb->sk = NULL; skb->destructor = NULL; if (cookie_tcp_reqsk_init(sk, skb, req)) { reqsk_free(req); req = NULL; } return req; } EXPORT_IPV6_MOD_GPL(cookie_bpf_check); #endif struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk, struct sk_buff *skb, struct tcp_options_received *tcp_opt, int mss, u32 tsoff) { struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct request_sock *req; if (sk_is_mptcp(sk)) req = mptcp_subflow_reqsk_alloc(ops, sk, false); else req = inet_reqsk_alloc(ops, sk, false); if (!req) return NULL; if (cookie_tcp_reqsk_init(sk, skb, req)) { reqsk_free(req); return NULL; } ireq = inet_rsk(req); treq = tcp_rsk(req); req->mss = mss; req->ts_recent = tcp_opt->saw_tstamp ? tcp_opt->rcv_tsval : 0; ireq->snd_wscale = tcp_opt->snd_wscale; ireq->tstamp_ok = tcp_opt->saw_tstamp; ireq->sack_ok = tcp_opt->sack_ok; ireq->wscale_ok = tcp_opt->wscale_ok; ireq->ecn_ok = !!(tcp_opt->rcv_tsecr & TS_OPT_ECN); treq->ts_off = tsoff; return req; } EXPORT_IPV6_MOD_GPL(cookie_tcp_reqsk_alloc); static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk, struct sk_buff *skb) { struct tcp_options_received tcp_opt; u32 tsoff = 0; int mss; if (tcp_synq_no_recent_overflow(sk)) goto out; mss = __cookie_v4_check(ip_hdr(skb), tcp_hdr(skb)); if (!mss) { __NET_INC_STATS(net, LINUX_MIB_SYNCOOKIESFAILED); goto out; } __NET_INC_STATS(net, LINUX_MIB_SYNCOOKIESRECV); /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(net, skb, &tcp_opt, 0, NULL); if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { tsoff = secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr); tcp_opt.rcv_tsecr -= tsoff; } if (!cookie_timestamp_decode(net, &tcp_opt)) goto out; return cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb, &tcp_opt, mss, tsoff); out: return ERR_PTR(-EINVAL); } /* On input, sk is a listener. * Output is listener if incoming packet would not create a child * NULL if memory could not be allocated. */ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) { struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; const struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); struct inet_request_sock *ireq; struct net *net = sock_net(sk); struct request_sock *req; struct sock *ret = sk; struct flowi4 fl4; struct rtable *rt; __u8 rcv_wscale; int full_space; SKB_DR(reason); if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) || !th->ack || th->rst) goto out; if (cookie_bpf_ok(skb)) { req = cookie_bpf_check(sk, skb); } else { req = cookie_tcp_check(net, sk, skb); if (IS_ERR(req)) goto out; } if (!req) { SKB_DR_SET(reason, NO_SOCKET); goto out_drop; } ireq = inet_rsk(req); sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb)); if (security_inet_conn_request(sk, skb, req)) { SKB_DR_SET(reason, SECURITY_HOOK); goto out_free; } tcp_ao_syncookie(sk, skb, req, AF_INET); /* * We need to lookup the route here to get at the correct * window size. We should better make sure that the window size * hasn't changed since we received the original syn, but I see * no easy way to do this. */ flowi4_init_output(&fl4, ireq->ir_iif, ireq->ir_mark, ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), IPPROTO_TCP, inet_sk_flowi_flags(sk), opt->srr ? opt->faddr : ireq->ir_rmt_addr, ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid); security_req_classify_flow(req, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) { SKB_DR_SET(reason, IP_OUTNOROUTES); goto out_free; } /* Try to redo what tcp_v4_send_synack did. */ req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? : dst_metric(&rt->dst, RTAX_WINDOW); /* limit the window selection if the user enforce a smaller rx buffer */ full_space = tcp_full_space(sk); if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) req->rsk_window_clamp = full_space; tcp_select_initial_window(sk, full_space, req->mss, &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); /* req->syncookie is set true only if ACK is validated * by BPF kfunc, then, rcv_wscale is already configured. */ if (!req->syncookie) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst); ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst); /* ip_queue_xmit() depends on our flow being setup * Normal sockets get it right from inet_csk_route_child_sock() */ if (!ret) { SKB_DR_SET(reason, NO_SOCKET); goto out_drop; } inet_sk(ret)->cork.fl.u.ip4 = fl4; out: return ret; out_free: reqsk_free(req); out_drop: sk_skb_reason_drop(sk, skb, reason); return NULL; } |
| 16 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | // SPDX-License-Identifier: GPL-2.0-only /* This is a module which is used to mark packets for tracing. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> MODULE_DESCRIPTION("Xtables: packet flow tracing"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_TRACE"); MODULE_ALIAS("ip6t_TRACE"); static int trace_tg_check(const struct xt_tgchk_param *par) { return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG); } static void trace_tg_destroy(const struct xt_tgdtor_param *par) { nf_logger_put(par->family, NF_LOG_TYPE_LOG); } static unsigned int trace_tg(struct sk_buff *skb, const struct xt_action_param *par) { skb->nf_trace = 1; return XT_CONTINUE; } static struct xt_target trace_tg_reg[] __read_mostly = { { .name = "TRACE", .revision = 0, .family = NFPROTO_IPV4, .table = "raw", .target = trace_tg, .checkentry = trace_tg_check, .destroy = trace_tg_destroy, .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "TRACE", .revision = 0, .family = NFPROTO_IPV6, .table = "raw", .target = trace_tg, .checkentry = trace_tg_check, .destroy = trace_tg_destroy, .me = THIS_MODULE, }, #endif }; static int __init trace_tg_init(void) { return xt_register_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg)); } static void __exit trace_tg_exit(void) { xt_unregister_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg)); } module_init(trace_tg_init); module_exit(trace_tg_exit); MODULE_SOFTDEP("pre: nf_log_syslog"); |
| 143 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2019 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> */ #ifndef __XFS_PWORK_H__ #define __XFS_PWORK_H__ struct xfs_pwork; struct xfs_mount; typedef int (*xfs_pwork_work_fn)(struct xfs_mount *mp, struct xfs_pwork *pwork); /* * Parallel work coordination structure. */ struct xfs_pwork_ctl { struct workqueue_struct *wq; struct xfs_mount *mp; xfs_pwork_work_fn work_fn; struct wait_queue_head poll_wait; atomic_t nr_work; int error; }; /* * Embed this parallel work control item inside your own work structure, * then queue work with it. */ struct xfs_pwork { struct work_struct work; struct xfs_pwork_ctl *pctl; }; #define XFS_PWORK_SINGLE_THREADED { .pctl = NULL } /* Have we been told to abort? */ static inline bool xfs_pwork_ctl_want_abort( struct xfs_pwork_ctl *pctl) { return pctl && pctl->error; } /* Have we been told to abort? */ static inline bool xfs_pwork_want_abort( struct xfs_pwork *pwork) { return xfs_pwork_ctl_want_abort(pwork->pctl); } int xfs_pwork_init(struct xfs_mount *mp, struct xfs_pwork_ctl *pctl, xfs_pwork_work_fn work_fn, const char *tag); void xfs_pwork_queue(struct xfs_pwork_ctl *pctl, struct xfs_pwork *pwork); int xfs_pwork_destroy(struct xfs_pwork_ctl *pctl); void xfs_pwork_poll(struct xfs_pwork_ctl *pctl); #endif /* __XFS_PWORK_H__ */ |
| 3 3 3 3 3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 | // SPDX-License-Identifier: GPL-2.0-or-later /* cx231xx-core.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver */ #include "cx231xx.h" #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include "cx231xx-reg.h" /* #define ENABLE_DEBUG_ISOC_FRAMES */ static unsigned int core_debug; module_param(core_debug, int, 0644); MODULE_PARM_DESC(core_debug, "enable debug messages [core]"); #define cx231xx_coredbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) static unsigned int reg_debug; module_param(reg_debug, int, 0644); MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]"); static int alt = CX231XX_PINOUT; module_param(alt, int, 0644); MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint"); #define cx231xx_isocdbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) /***************************************************************** * Device control list functions * ******************************************************************/ LIST_HEAD(cx231xx_devlist); static DEFINE_MUTEX(cx231xx_devlist_mutex); /* * cx231xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconnected or at module unload */ void cx231xx_remove_from_devlist(struct cx231xx *dev) { if (dev == NULL) return; if (dev->udev == NULL) return; if (atomic_read(&dev->devlist_count) > 0) { mutex_lock(&cx231xx_devlist_mutex); list_del(&dev->devlist); atomic_dec(&dev->devlist_count); mutex_unlock(&cx231xx_devlist_mutex); } }; void cx231xx_add_into_devlist(struct cx231xx *dev) { mutex_lock(&cx231xx_devlist_mutex); list_add_tail(&dev->devlist, &cx231xx_devlist); atomic_inc(&dev->devlist_count); mutex_unlock(&cx231xx_devlist_mutex); }; static LIST_HEAD(cx231xx_extension_devlist); int cx231xx_register_extension(struct cx231xx_ops *ops) { struct cx231xx *dev = NULL; mutex_lock(&cx231xx_devlist_mutex); list_add_tail(&ops->next, &cx231xx_extension_devlist); list_for_each_entry(dev, &cx231xx_devlist, devlist) { ops->init(dev); dev_info(dev->dev, "%s initialized\n", ops->name); } mutex_unlock(&cx231xx_devlist_mutex); return 0; } EXPORT_SYMBOL(cx231xx_register_extension); void cx231xx_unregister_extension(struct cx231xx_ops *ops) { struct cx231xx *dev = NULL; mutex_lock(&cx231xx_devlist_mutex); list_for_each_entry(dev, &cx231xx_devlist, devlist) { ops->fini(dev); dev_info(dev->dev, "%s removed\n", ops->name); } list_del(&ops->next); mutex_unlock(&cx231xx_devlist_mutex); } EXPORT_SYMBOL(cx231xx_unregister_extension); void cx231xx_init_extension(struct cx231xx *dev) { struct cx231xx_ops *ops = NULL; mutex_lock(&cx231xx_devlist_mutex); list_for_each_entry(ops, &cx231xx_extension_devlist, next) { if (ops->init) ops->init(dev); } mutex_unlock(&cx231xx_devlist_mutex); } void cx231xx_close_extension(struct cx231xx *dev) { struct cx231xx_ops *ops = NULL; mutex_lock(&cx231xx_devlist_mutex); list_for_each_entry(ops, &cx231xx_extension_devlist, next) { if (ops->fini) ops->fini(dev); } mutex_unlock(&cx231xx_devlist_mutex); } /**************************************************************** * U S B related functions * *****************************************************************/ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus, struct cx231xx_i2c_xfer_data *req_data) { int status = 0; struct cx231xx *dev = i2c_bus->dev; struct VENDOR_REQUEST_IN ven_req; u8 saddr_len = 0; u8 _i2c_period = 0; u8 _i2c_nostop = 0; u8 _i2c_reserve = 0; if (dev->state & DEV_DISCONNECTED) return -ENODEV; /* Get the I2C period, nostop and reserve parameters */ _i2c_period = i2c_bus->i2c_period; _i2c_nostop = i2c_bus->i2c_nostop; _i2c_reserve = i2c_bus->i2c_reserve; saddr_len = req_data->saddr_len; /* Set wValue */ ven_req.wValue = (req_data->dev_addr << 9 | _i2c_period << 4 | saddr_len << 2 | _i2c_nostop << 1 | I2C_SYNC | _i2c_reserve << 6); /* set channel number */ if (req_data->direction & I2C_M_RD) { /* channel number, for read,spec required channel_num +4 */ ven_req.bRequest = i2c_bus->nr + 4; } else ven_req.bRequest = i2c_bus->nr; /* channel number, */ /* set index value */ switch (saddr_len) { case 0: ven_req.wIndex = 0; /* need check */ break; case 1: ven_req.wIndex = (req_data->saddr_dat & 0xff); break; case 2: ven_req.wIndex = req_data->saddr_dat; break; } /* set wLength value */ ven_req.wLength = req_data->buf_size; /* set bData value */ ven_req.bData = 0; /* set the direction */ if (req_data->direction) { ven_req.direction = USB_DIR_IN; memset(req_data->p_buffer, 0x00, ven_req.wLength); } else ven_req.direction = USB_DIR_OUT; /* set the buffer for read / write */ ven_req.pBuff = req_data->p_buffer; /* call common vendor command request */ status = cx231xx_send_vendor_cmd(dev, &ven_req); if (status < 0 && !dev->i2c_scan_running) { dev_err(dev->dev, "%s: failed with status -%d\n", __func__, status); } return status; } EXPORT_SYMBOL_GPL(cx231xx_send_usb_command); /* * Sends/Receives URB control messages, assuring to use a kalloced buffer * for all operations (dev->urb_buf), to avoid using stacked buffers, as * they aren't safe for usage with USB, due to DMA restrictions. * Also implements the debug code for control URB's. */ static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { int rc, i; if (reg_debug) { printk(KERN_DEBUG "%s: (pipe 0x%08x): %s: %02x %02x %02x %02x %02x %02x %02x %02x ", dev->name, pipe, (requesttype & USB_DIR_IN) ? "IN" : "OUT", requesttype, request, value & 0xff, value >> 8, index & 0xff, index >> 8, size & 0xff, size >> 8); if (!(requesttype & USB_DIR_IN)) { printk(KERN_CONT ">>>"); for (i = 0; i < size; i++) printk(KERN_CONT " %02x", ((unsigned char *)data)[i]); } } /* Do the real call to usb_control_msg */ mutex_lock(&dev->ctrl_urb_lock); if (!(requesttype & USB_DIR_IN) && size) memcpy(dev->urb_buf, data, size); rc = usb_control_msg(dev->udev, pipe, request, requesttype, value, index, dev->urb_buf, size, timeout); if ((requesttype & USB_DIR_IN) && size) memcpy(data, dev->urb_buf, size); mutex_unlock(&dev->ctrl_urb_lock); if (reg_debug) { if (unlikely(rc < 0)) { printk(KERN_CONT "FAILED!\n"); return rc; } if ((requesttype & USB_DIR_IN)) { printk(KERN_CONT "<<<"); for (i = 0; i < size; i++) printk(KERN_CONT " %02x", ((unsigned char *)data)[i]); } printk(KERN_CONT "\n"); } return rc; } /* * cx231xx_read_ctrl_reg() * reads data from the usb device specifying bRequest and wValue */ int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len) { u8 val = 0; int ret; int pipe = usb_rcvctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if (len > URB_MAX_CTRL_SIZE) return -EINVAL; switch (len) { case 1: val = ENABLE_ONE_BYTE; break; case 2: val = ENABLE_TWE_BYTE; break; case 3: val = ENABLE_THREE_BYTE; break; case 4: val = ENABLE_FOUR_BYTE; break; default: val = 0xFF; /* invalid option */ } if (val == 0xFF) return -EINVAL; ret = __usb_control_msg(dev, pipe, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, reg, buf, len, HZ); return ret; } int cx231xx_send_vendor_cmd(struct cx231xx *dev, struct VENDOR_REQUEST_IN *ven_req) { int ret; int pipe = 0; int unsend_size = 0; u8 *pdata; if (dev->state & DEV_DISCONNECTED) return -ENODEV; if ((ven_req->wLength > URB_MAX_CTRL_SIZE)) return -EINVAL; if (ven_req->direction) pipe = usb_rcvctrlpipe(dev->udev, 0); else pipe = usb_sndctrlpipe(dev->udev, 0); /* * If the cx23102 read more than 4 bytes with i2c bus, * need chop to 4 byte per request */ if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) || (ven_req->bRequest == 0x5) || (ven_req->bRequest == 0x6) || /* Internal Master 3 Bus can send * and receive only 4 bytes per time */ (ven_req->bRequest == 0x2))) { unsend_size = 0; pdata = ven_req->pBuff; unsend_size = ven_req->wLength; /* the first package */ ven_req->wValue = ven_req->wValue & 0xFFFB; ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x2; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, 0x0004, HZ); unsend_size = unsend_size - 4; /* the middle package */ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x42; while (unsend_size - 4 > 0) { pdata = pdata + 4; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, 0x0004, HZ); unsend_size = unsend_size - 4; } /* the last package */ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x40; pdata = pdata + 4; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, unsend_size, HZ); } else { ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, ven_req->pBuff, ven_req->wLength, HZ); } return ret; } /* * cx231xx_write_ctrl_reg() * sends data to the usb device, specifying bRequest */ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len) { u8 val = 0; int ret; int pipe = usb_sndctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if ((len < 1) || (len > URB_MAX_CTRL_SIZE)) return -EINVAL; switch (len) { case 1: val = ENABLE_ONE_BYTE; break; case 2: val = ENABLE_TWE_BYTE; break; case 3: val = ENABLE_THREE_BYTE; break; case 4: val = ENABLE_FOUR_BYTE; break; default: val = 0xFF; /* invalid option */ } if (val == 0xFF) return -EINVAL; if (reg_debug) { int byte; cx231xx_isocdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>", pipe, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, req, 0, val, reg & 0xff, reg >> 8, len & 0xff, len >> 8); for (byte = 0; byte < len; byte++) cx231xx_isocdbg(" %02x", (unsigned char)buf[byte]); cx231xx_isocdbg("\n"); } ret = __usb_control_msg(dev, pipe, req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, reg, buf, len, HZ); return ret; } /**************************************************************** * USB Alternate Setting functions * *****************************************************************/ int cx231xx_set_video_alternate(struct cx231xx *dev) { int errCode, prev_alt = dev->video_mode.alt; unsigned int min_pkt_size = dev->width * 2 + 4; u32 usb_interface_index = 0; /* When image size is bigger than a certain value, the frame size should be increased, otherwise, only green screen will be received. */ if (dev->width * 2 * dev->height > 720 * 240 * 2) min_pkt_size *= 2; if (dev->width > 360) { /* resolutions: 720,704,640 */ dev->video_mode.alt = 3; } else if (dev->width > 180) { /* resolutions: 360,352,320,240 */ dev->video_mode.alt = 2; } else if (dev->width > 0) { /* resolutions: 180,176,160,128,88 */ dev->video_mode.alt = 1; } else { /* Change to alt0 BULK to release USB bandwidth */ dev->video_mode.alt = 0; } if (dev->USE_ISO == 0) dev->video_mode.alt = 0; cx231xx_coredbg("dev->video_mode.alt= %d\n", dev->video_mode.alt); /* Get the correct video interface Index */ usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. video_index + 1; if (dev->video_mode.alt != prev_alt) { cx231xx_coredbg("minimum isoc packet size: %u (alt=%d)\n", min_pkt_size, dev->video_mode.alt); if (dev->video_mode.alt_max_pkt_size != NULL) dev->video_mode.max_pkt_size = dev->video_mode.alt_max_pkt_size[dev->video_mode.alt]; cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n", dev->video_mode.alt, dev->video_mode.max_pkt_size); errCode = usb_set_interface(dev->udev, usb_interface_index, dev->video_mode.alt); if (errCode < 0) { dev_err(dev->dev, "cannot change alt number to %d (error=%i)\n", dev->video_mode.alt, errCode); return errCode; } } return 0; } int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt) { int status = 0; u32 usb_interface_index = 0; u32 max_pkt_size = 0; switch (index) { case INDEX_TS1: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. ts1_index + 1; dev->ts1_mode.alt = alt; if (dev->ts1_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->ts1_mode.max_pkt_size = dev->ts1_mode.alt_max_pkt_size[dev->ts1_mode.alt]; break; case INDEX_TS2: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. ts2_index + 1; break; case INDEX_AUDIO: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. audio_index + 1; dev->adev.alt = alt; if (dev->adev.alt_max_pkt_size != NULL) max_pkt_size = dev->adev.max_pkt_size = dev->adev.alt_max_pkt_size[dev->adev.alt]; break; case INDEX_VIDEO: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. video_index + 1; dev->video_mode.alt = alt; if (dev->video_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->video_mode.max_pkt_size = dev->video_mode.alt_max_pkt_size[dev->video_mode. alt]; break; case INDEX_VANC: if (dev->board.no_alt_vanc) return 0; usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. vanc_index + 1; dev->vbi_mode.alt = alt; if (dev->vbi_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->vbi_mode.max_pkt_size = dev->vbi_mode.alt_max_pkt_size[dev->vbi_mode.alt]; break; case INDEX_HANC: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. hanc_index + 1; dev->sliced_cc_mode.alt = alt; if (dev->sliced_cc_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->sliced_cc_mode.max_pkt_size = dev->sliced_cc_mode.alt_max_pkt_size[dev-> sliced_cc_mode. alt]; break; default: break; } if (alt > 0 && max_pkt_size == 0) { dev_err(dev->dev, "can't change interface %d alt no. to %d: Max. Pkt size = 0\n", usb_interface_index, alt); /*To workaround error number=-71 on EP0 for videograbber, need add following codes.*/ if (dev->board.no_alt_vanc) return -1; } cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,Interface = %d\n", alt, max_pkt_size, usb_interface_index); if (usb_interface_index > 0) { status = usb_set_interface(dev->udev, usb_interface_index, alt); if (status < 0) { dev_err(dev->dev, "can't change interface %d alt no. to %d (err=%i)\n", usb_interface_index, alt, status); return status; } } return status; } EXPORT_SYMBOL_GPL(cx231xx_set_alt_setting); int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio) { int rc = 0; if (!gpio) return rc; /* Send GPIO reset sequences specified at board entry */ while (gpio->sleep >= 0) { rc = cx231xx_set_gpio_value(dev, gpio->bit, gpio->val); if (rc < 0) return rc; if (gpio->sleep > 0) msleep(gpio->sleep); gpio++; } return rc; } int cx231xx_demod_reset(struct cx231xx *dev) { u8 status = 0; u8 value[4] = { 0, 0, 0, 0 }; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); cx231xx_coredbg("Enter cx231xx_demod_reset()\n"); value[1] = (u8) 0x3; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); value[1] = (u8) 0x0; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); value[1] = (u8) 0x3; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); return status; } EXPORT_SYMBOL_GPL(cx231xx_demod_reset); int is_fw_load(struct cx231xx *dev) { return cx231xx_check_fw(dev); } EXPORT_SYMBOL_GPL(is_fw_load); int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) { int errCode = 0; if (dev->mode == set_mode) return 0; if (set_mode == CX231XX_SUSPEND) { /* Set the chip in power saving mode */ dev->mode = set_mode; } /* Resource is locked */ if (dev->mode != CX231XX_SUSPEND) return -EINVAL; dev->mode = set_mode; if (dev->mode == CX231XX_DIGITAL_MODE)/* Set Digital power mode */ { /* set AGC mode to Digital */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_DIGITAL); break; default: break; } } else/* Set Analog Power mode */ { /* set AGC mode to Analog */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; default: break; } } if (errCode < 0) { dev_err(dev->dev, "Failed to set devmode to %s: error: %i", dev->mode == CX231XX_DIGITAL_MODE ? "digital" : "analog", errCode); return errCode; } return 0; } EXPORT_SYMBOL_GPL(cx231xx_set_mode); int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size) { int errCode = 0; int actlen = -1; int ret = -ENOMEM; u32 *buffer; buffer = kmemdup(firmware, EP5_BUF_SIZE, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5), buffer, EP5_BUF_SIZE, &actlen, EP5_TIMEOUT_MS); if (ret) dev_err(dev->dev, "bulk message failed: %d (%d/%d)", ret, size, actlen); else { errCode = actlen != size ? -1 : 0; } kfree(buffer); return errCode; } /***************************************************************** * URB Streaming functions * ******************************************************************/ /* * IRQ callback, called by URB callback */ static void cx231xx_isoc_irq_callback(struct urb *urb) { struct cx231xx_dmaqueue *dma_q = urb->context; struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode); unsigned long flags; int i; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ cx231xx_isocdbg("urb completion error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock_irqsave(&dev->video_mode.slock, flags); dev->video_mode.isoc_ctl.isoc_copy(dev, urb); spin_unlock_irqrestore(&dev->video_mode.slock, flags); /* Reset urb buffers */ for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { cx231xx_isocdbg("urb resubmit failed (error=%i)\n", urb->status); } } /***************************************************************** * URB Streaming functions * ******************************************************************/ /* * IRQ callback, called by URB callback */ static void cx231xx_bulk_irq_callback(struct urb *urb) { struct cx231xx_dmaqueue *dma_q = urb->context; struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode); unsigned long flags; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; case -EPIPE: /* stall */ cx231xx_isocdbg("urb completion error - device is stalled.\n"); return; default: /* error */ cx231xx_isocdbg("urb completion error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock_irqsave(&dev->video_mode.slock, flags); dev->video_mode.bulk_ctl.bulk_copy(dev, urb); spin_unlock_irqrestore(&dev->video_mode.slock, flags); /* Reset urb buffers */ urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { cx231xx_isocdbg("urb resubmit failed (error=%i)\n", urb->status); } } /* * Stop and Deallocate URBs */ void cx231xx_uninit_isoc(struct cx231xx *dev) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; struct urb *urb; int i; bool broken_pipe = false; cx231xx_isocdbg("cx231xx: called cx231xx_uninit_isoc\n"); dev->video_mode.isoc_ctl.nfields = -1; for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { urb = dev->video_mode.isoc_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->video_mode.isoc_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->video_mode.isoc_ctl. transfer_buffer[i], urb->transfer_dma); } if (urb->status == -EPIPE) { broken_pipe = true; } usb_free_urb(urb); dev->video_mode.isoc_ctl.urb[i] = NULL; } dev->video_mode.isoc_ctl.transfer_buffer[i] = NULL; } if (broken_pipe) { cx231xx_isocdbg("Reset endpoint to recover broken pipe."); usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr); } kfree(dev->video_mode.isoc_ctl.urb); kfree(dev->video_mode.isoc_ctl.transfer_buffer); kfree(dma_q->p_left_data); dev->video_mode.isoc_ctl.urb = NULL; dev->video_mode.isoc_ctl.transfer_buffer = NULL; dev->video_mode.isoc_ctl.num_bufs = 0; dma_q->p_left_data = NULL; if (dev->mode_tv == 0) cx231xx_capture_start(dev, 0, Raw_Video); else cx231xx_capture_start(dev, 0, TS1_serial_mode); } EXPORT_SYMBOL_GPL(cx231xx_uninit_isoc); /* * Stop and Deallocate URBs */ void cx231xx_uninit_bulk(struct cx231xx *dev) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; struct urb *urb; int i; bool broken_pipe = false; cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n"); dev->video_mode.bulk_ctl.nfields = -1; for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { urb = dev->video_mode.bulk_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->video_mode.bulk_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->video_mode.bulk_ctl. transfer_buffer[i], urb->transfer_dma); } if (urb->status == -EPIPE) { broken_pipe = true; } usb_free_urb(urb); dev->video_mode.bulk_ctl.urb[i] = NULL; } dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL; } if (broken_pipe) { cx231xx_isocdbg("Reset endpoint to recover broken pipe."); usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr); } kfree(dev->video_mode.bulk_ctl.urb); kfree(dev->video_mode.bulk_ctl.transfer_buffer); kfree(dma_q->p_left_data); dev->video_mode.bulk_ctl.urb = NULL; dev->video_mode.bulk_ctl.transfer_buffer = NULL; dev->video_mode.bulk_ctl.num_bufs = 0; dma_q->p_left_data = NULL; if (dev->mode_tv == 0) cx231xx_capture_start(dev, 0, Raw_Video); else cx231xx_capture_start(dev, 0, TS1_serial_mode); } EXPORT_SYMBOL_GPL(cx231xx_uninit_bulk); /* * Allocate URBs and start IRQ */ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*isoc_copy) (struct cx231xx *dev, struct urb *urb)) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int j, k; int rc; /* De-allocates all pending stuff */ cx231xx_uninit_isoc(dev); dma_q->p_left_data = kzalloc(EP5_BUF_SIZE, GFP_KERNEL); if (dma_q->p_left_data == NULL) return -ENOMEM; dev->video_mode.isoc_ctl.isoc_copy = isoc_copy; dev->video_mode.isoc_ctl.num_bufs = num_bufs; dma_q->pos = 0; dma_q->is_partial_line = 0; dma_q->last_sav = 0; dma_q->current_field = -1; dma_q->field1_done = 0; dma_q->lines_per_field = dev->height / 2; dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_completed = 0; dma_q->mpeg_buffer_done = 0; dma_q->left_data_count = 0; dma_q->mpeg_buffer_completed = 0; dma_q->add_ps_package_head = CX231XX_NEED_ADD_PS_PACKAGE_HEAD; dma_q->ps_head[0] = 0x00; dma_q->ps_head[1] = 0x00; dma_q->ps_head[2] = 0x01; dma_q->ps_head[3] = 0xBA; for (i = 0; i < 8; i++) dma_q->partial_buf[i] = 0; dev->video_mode.isoc_ctl.urb = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.isoc_ctl.urb) { dev_err(dev->dev, "cannot alloc memory for usb buffers\n"); kfree(dma_q->p_left_data); return -ENOMEM; } dev->video_mode.isoc_ctl.transfer_buffer = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.isoc_ctl.transfer_buffer) { dev_err(dev->dev, "cannot allocate memory for usbtransfer\n"); kfree(dev->video_mode.isoc_ctl.urb); kfree(dma_q->p_left_data); return -ENOMEM; } dev->video_mode.isoc_ctl.max_pkt_size = max_pkt_size; dev->video_mode.isoc_ctl.buf = NULL; sb_size = max_packets * dev->video_mode.isoc_ctl.max_pkt_size; if (dev->mode_tv == 1) dev->video_mode.end_point_addr = 0x81; else dev->video_mode.end_point_addr = 0x84; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { urb = usb_alloc_urb(max_packets, GFP_KERNEL); if (!urb) { cx231xx_uninit_isoc(dev); return -ENOMEM; } dev->video_mode.isoc_ctl.urb[i] = urb; dev->video_mode.isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) { dev_err(dev->dev, "unable to allocate %i bytes for transfer buffer %i\n", sb_size, i); cx231xx_uninit_isoc(dev); return -ENOMEM; } memset(dev->video_mode.isoc_ctl.transfer_buffer[i], 0, sb_size); pipe = usb_rcvisocpipe(dev->udev, dev->video_mode.end_point_addr); usb_fill_int_urb(urb, dev->udev, pipe, dev->video_mode.isoc_ctl.transfer_buffer[i], sb_size, cx231xx_isoc_irq_callback, dma_q, 1); urb->number_of_packets = max_packets; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; k = 0; for (j = 0; j < max_packets; j++) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = dev->video_mode.isoc_ctl.max_pkt_size; k += dev->video_mode.isoc_ctl.max_pkt_size; } } init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->video_mode.isoc_ctl.urb[i], GFP_ATOMIC); if (rc) { dev_err(dev->dev, "submit of urb %i failed (error=%i)\n", i, rc); cx231xx_uninit_isoc(dev); return rc; } } if (dev->mode_tv == 0) cx231xx_capture_start(dev, 1, Raw_Video); else cx231xx_capture_start(dev, 1, TS1_serial_mode); return 0; } EXPORT_SYMBOL_GPL(cx231xx_init_isoc); /* * Allocate URBs and start IRQ */ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*bulk_copy) (struct cx231xx *dev, struct urb *urb)) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int rc; dev->video_input = dev->video_input > 2 ? 2 : dev->video_input; cx231xx_coredbg("Setting Video mux to %d\n", dev->video_input); video_mux(dev, dev->video_input); /* De-allocates all pending stuff */ cx231xx_uninit_bulk(dev); dev->video_mode.bulk_ctl.bulk_copy = bulk_copy; dev->video_mode.bulk_ctl.num_bufs = num_bufs; dma_q->pos = 0; dma_q->is_partial_line = 0; dma_q->last_sav = 0; dma_q->current_field = -1; dma_q->field1_done = 0; dma_q->lines_per_field = dev->height / 2; dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_completed = 0; dma_q->mpeg_buffer_done = 0; dma_q->left_data_count = 0; dma_q->mpeg_buffer_completed = 0; dma_q->ps_head[0] = 0x00; dma_q->ps_head[1] = 0x00; dma_q->ps_head[2] = 0x01; dma_q->ps_head[3] = 0xBA; for (i = 0; i < 8; i++) dma_q->partial_buf[i] = 0; dev->video_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.bulk_ctl.urb) { dev_err(dev->dev, "cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->video_mode.bulk_ctl.transfer_buffer = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.bulk_ctl.transfer_buffer) { dev_err(dev->dev, "cannot allocate memory for usbtransfer\n"); kfree(dev->video_mode.bulk_ctl.urb); return -ENOMEM; } dev->video_mode.bulk_ctl.max_pkt_size = max_pkt_size; dev->video_mode.bulk_ctl.buf = NULL; sb_size = max_packets * dev->video_mode.bulk_ctl.max_pkt_size; if (dev->mode_tv == 1) dev->video_mode.end_point_addr = 0x81; else dev->video_mode.end_point_addr = 0x84; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { cx231xx_uninit_bulk(dev); return -ENOMEM; } dev->video_mode.bulk_ctl.urb[i] = urb; urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; dev->video_mode.bulk_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) { dev_err(dev->dev, "unable to allocate %i bytes for transfer buffer %i\n", sb_size, i); cx231xx_uninit_bulk(dev); return -ENOMEM; } memset(dev->video_mode.bulk_ctl.transfer_buffer[i], 0, sb_size); pipe = usb_rcvbulkpipe(dev->udev, dev->video_mode.end_point_addr); usb_fill_bulk_urb(urb, dev->udev, pipe, dev->video_mode.bulk_ctl.transfer_buffer[i], sb_size, cx231xx_bulk_irq_callback, dma_q); } /* clear halt */ rc = usb_clear_halt(dev->udev, dev->video_mode.bulk_ctl.urb[0]->pipe); if (rc < 0) { dev_err(dev->dev, "failed to clear USB bulk endpoint stall/halt condition (error=%i)\n", rc); cx231xx_uninit_bulk(dev); return rc; } init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i], GFP_ATOMIC); if (rc) { dev_err(dev->dev, "submit of urb %i failed (error=%i)\n", i, rc); cx231xx_uninit_bulk(dev); return rc; } } if (dev->mode_tv == 0) cx231xx_capture_start(dev, 1, Raw_Video); else cx231xx_capture_start(dev, 1, TS1_serial_mode); return 0; } EXPORT_SYMBOL_GPL(cx231xx_init_bulk); void cx231xx_stop_TS1(struct cx231xx *dev) { u8 val[4] = { 0, 0, 0, 0 }; val[0] = 0x00; val[1] = 0x03; val[2] = 0x00; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS_MODE_REG, val, 4); val[0] = 0x00; val[1] = 0x70; val[2] = 0x04; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); } /* EXPORT_SYMBOL_GPL(cx231xx_stop_TS1); */ void cx231xx_start_TS1(struct cx231xx *dev) { u8 val[4] = { 0, 0, 0, 0 }; val[0] = 0x03; val[1] = 0x03; val[2] = 0x00; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS_MODE_REG, val, 4); val[0] = 0x04; val[1] = 0xA3; val[2] = 0x3B; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); } /* EXPORT_SYMBOL_GPL(cx231xx_start_TS1); */ /***************************************************************** * Device Init/UnInit functions * ******************************************************************/ int cx231xx_dev_init(struct cx231xx *dev) { int errCode = 0; /* Initialize I2C bus */ /* External Master 1 Bus */ dev->i2c_bus[0].nr = 0; dev->i2c_bus[0].dev = dev; dev->i2c_bus[0].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[0].i2c_nostop = 0; dev->i2c_bus[0].i2c_reserve = 0; dev->i2c_bus[0].i2c_rc = -ENODEV; /* External Master 2 Bus */ dev->i2c_bus[1].nr = 1; dev->i2c_bus[1].dev = dev; dev->i2c_bus[1].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[1].i2c_nostop = 0; dev->i2c_bus[1].i2c_reserve = 0; dev->i2c_bus[1].i2c_rc = -ENODEV; /* Internal Master 3 Bus */ dev->i2c_bus[2].nr = 2; dev->i2c_bus[2].dev = dev; dev->i2c_bus[2].i2c_period = I2C_SPEED_100K; /* 100kHz */ dev->i2c_bus[2].i2c_nostop = 0; dev->i2c_bus[2].i2c_reserve = 0; dev->i2c_bus[2].i2c_rc = -ENODEV; /* register I2C buses */ errCode = cx231xx_i2c_register(&dev->i2c_bus[0]); if (errCode < 0) return errCode; errCode = cx231xx_i2c_register(&dev->i2c_bus[1]); if (errCode < 0) return errCode; errCode = cx231xx_i2c_register(&dev->i2c_bus[2]); if (errCode < 0) return errCode; errCode = cx231xx_i2c_mux_create(dev); if (errCode < 0) { dev_err(dev->dev, "%s: Failed to create I2C mux\n", __func__); return errCode; } errCode = cx231xx_i2c_mux_register(dev, 0); if (errCode < 0) return errCode; errCode = cx231xx_i2c_mux_register(dev, 1); if (errCode < 0) return errCode; /* scan the real bus segments in the order of physical port numbers */ cx231xx_do_i2c_scan(dev, I2C_0); cx231xx_do_i2c_scan(dev, I2C_1_MUX_1); cx231xx_do_i2c_scan(dev, I2C_2); cx231xx_do_i2c_scan(dev, I2C_1_MUX_3); /* init hardware */ /* Note : with out calling set power mode function, afe can not be set up correctly */ if (dev->board.external_av) { errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ENXTERNAL_AV); if (errCode < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, errCode); return errCode; } } else { errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV); if (errCode < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, errCode); return errCode; } } /* reset the Tuner, if it is a Xceive tuner */ if ((dev->board.tuner_type == TUNER_XC5000) || (dev->board.tuner_type == TUNER_XC2028)) cx231xx_gpio_set(dev, dev->board.tuner_gpio); /* initialize Colibri block */ errCode = cx231xx_afe_init_super_block(dev, 0x23c); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_afe init super block - errCode [%d]!\n", __func__, errCode); return errCode; } errCode = cx231xx_afe_init_channels(dev); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_afe init channels - errCode [%d]!\n", __func__, errCode); return errCode; } /* Set DIF in By pass mode */ errCode = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode - errCode [%d]!\n", __func__, errCode); return errCode; } /* I2S block related functions */ errCode = cx231xx_i2s_blk_initialize(dev); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_i2s block initialize - errCode [%d]!\n", __func__, errCode); return errCode; } /* init control pins */ errCode = cx231xx_init_ctrl_pin_status(dev); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_init ctrl pins - errCode [%d]!\n", __func__, errCode); return errCode; } /* set AGC mode to Analog */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; default: break; } if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_AGC mode to Analog - errCode [%d]!\n", __func__, errCode); return errCode; } /* set all alternate settings to zero initially */ cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0); cx231xx_set_alt_setting(dev, INDEX_VANC, 0); cx231xx_set_alt_setting(dev, INDEX_HANC, 0); if (dev->board.has_dvb) cx231xx_set_alt_setting(dev, INDEX_TS1, 0); errCode = 0; return errCode; } EXPORT_SYMBOL_GPL(cx231xx_dev_init); void cx231xx_dev_uninit(struct cx231xx *dev) { /* Un Initialize I2C bus */ cx231xx_i2c_mux_unregister(dev); cx231xx_i2c_unregister(&dev->i2c_bus[2]); cx231xx_i2c_unregister(&dev->i2c_bus[1]); cx231xx_i2c_unregister(&dev->i2c_bus[0]); } EXPORT_SYMBOL_GPL(cx231xx_dev_uninit); /***************************************************************** * G P I O related functions * ******************************************************************/ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val, u8 len, u8 request, u8 direction) { int status = 0; struct VENDOR_REQUEST_IN ven_req; /* Set wValue */ ven_req.wValue = (u16) (gpio_bit >> 16 & 0xffff); /* set request */ if (!request) { if (direction) ven_req.bRequest = VRT_GET_GPIO; /* 0x9 gpio */ else ven_req.bRequest = VRT_SET_GPIO; /* 0x8 gpio */ } else { if (direction) ven_req.bRequest = VRT_GET_GPIE; /* 0xb gpie */ else ven_req.bRequest = VRT_SET_GPIE; /* 0xa gpie */ } /* set index value */ ven_req.wIndex = (u16) (gpio_bit & 0xffff); /* set wLength value */ ven_req.wLength = len; /* set bData value */ ven_req.bData = 0; /* set the buffer for read / write */ ven_req.pBuff = gpio_val; /* set the direction */ if (direction) { ven_req.direction = USB_DIR_IN; memset(ven_req.pBuff, 0x00, ven_req.wLength); } else ven_req.direction = USB_DIR_OUT; /* call common vendor command request */ status = cx231xx_send_vendor_cmd(dev, &ven_req); if (status < 0) { dev_err(dev->dev, "%s: failed with status -%d\n", __func__, status); } return status; } EXPORT_SYMBOL_GPL(cx231xx_send_gpio_cmd); /***************************************************************** * C O N T R O L - Register R E A D / W R I T E functions * *****************************************************************/ int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, address, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); tmp |= mode; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, address, value, 4); return status; } /***************************************************************** * I 2 C Internal C O N T R O L functions * *****************************************************************/ int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len, int master) { int status = 0; struct cx231xx_i2c_xfer_data req_data; u8 value[64] = "0"; if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = I2C_M_RD; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = (u8 *) value; /* usb send command */ if (master == 0) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); else if (master == 1) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1], &req_data); else if (master == 2) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2], &req_data); if (status >= 0) { /* Copy the data read back to main buffer */ if (data_len == 1) *data = value[0]; else if (data_len == 4) *data = value[0] | value[1] << 8 | value[2] << 16 | value[3] << 24; else if (data_len > 4) *data = value[saddr]; } return status; } int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len, int master) { int status = 0; u8 value[4] = { 0, 0, 0, 0 }; struct cx231xx_i2c_xfer_data req_data; value[0] = (u8) data; value[1] = (u8) (data >> 8); value[2] = (u8) (data >> 16); value[3] = (u8) (data >> 24); if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = 0; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = value; /* usb send command */ if (master == 0) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); else if (master == 1) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1], &req_data); else if (master == 2) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2], &req_data); return status; } int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len) { int status = 0; struct cx231xx_i2c_xfer_data req_data; u8 value[4] = { 0, 0, 0, 0 }; if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = I2C_M_RD; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = (u8 *) value; /* usb send command */ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); if (status >= 0) { /* Copy the data read back to main buffer */ if (data_len == 1) *data = value[0]; else *data = value[0] | value[1] << 8 | value[2] << 16 | value[3] << 24; } return status; } int cx231xx_write_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len) { int status = 0; u8 value[4] = { 0, 0, 0, 0 }; struct cx231xx_i2c_xfer_data req_data; value[0] = (u8) data; value[1] = (u8) (data >> 8); value[2] = (u8) (data >> 16); value[3] = (u8) (data >> 24); if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = 0; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = value; /* usb send command */ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); return status; } int cx231xx_reg_mask_write(struct cx231xx *dev, u8 dev_addr, u8 size, u16 register_address, u8 bit_start, u8 bit_end, u32 value) { int status = 0; u32 tmp; u32 mask = 0; int i; if (bit_start > (size - 1) || bit_end > (size - 1)) return -1; if (size == 8) { status = cx231xx_read_i2c_data(dev, dev_addr, register_address, 2, &tmp, 1); } else { status = cx231xx_read_i2c_data(dev, dev_addr, register_address, 2, &tmp, 4); } if (status < 0) return status; mask = 1 << bit_end; for (i = bit_end; i > bit_start && i > 0; i--) mask = mask + (1 << (i - 1)); value <<= bit_start; if (size == 8) { tmp &= ~mask; tmp |= value; tmp &= 0xff; status = cx231xx_write_i2c_data(dev, dev_addr, register_address, 2, tmp, 1); } else { tmp &= ~mask; tmp |= value; status = cx231xx_write_i2c_data(dev, dev_addr, register_address, 2, tmp, 4); } return status; } int cx231xx_read_modify_write_i2c_dword(struct cx231xx *dev, u8 dev_addr, u16 saddr, u32 mask, u32 value) { u32 temp; int status = 0; status = cx231xx_read_i2c_data(dev, dev_addr, saddr, 2, &temp, 4); if (status < 0) return status; temp &= ~mask; temp |= value; status = cx231xx_write_i2c_data(dev, dev_addr, saddr, 2, temp, 4); return status; } u32 cx231xx_set_field(u32 field_mask, u32 data) { u32 temp; for (temp = field_mask; (temp & 1) == 0; temp >>= 1) data <<= 1; return data; } |
| 59 351 710 317 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | // SPDX-License-Identifier: GPL-2.0 #include <linux/fs.h> #include <linux/export.h> /* * fs on-disk file type to dirent file type conversion */ static const unsigned char fs_dtype_by_ftype[FT_MAX] = { [FT_UNKNOWN] = DT_UNKNOWN, [FT_REG_FILE] = DT_REG, [FT_DIR] = DT_DIR, [FT_CHRDEV] = DT_CHR, [FT_BLKDEV] = DT_BLK, [FT_FIFO] = DT_FIFO, [FT_SOCK] = DT_SOCK, [FT_SYMLINK] = DT_LNK }; /** * fs_ftype_to_dtype() - fs on-disk file type to dirent type. * @filetype: The on-disk file type to convert. * * This function converts the on-disk file type value (FT_*) to the directory * entry type (DT_*). * * Context: Any context. * Return: * * DT_UNKNOWN - Unknown type * * DT_FIFO - FIFO * * DT_CHR - Character device * * DT_DIR - Directory * * DT_BLK - Block device * * DT_REG - Regular file * * DT_LNK - Symbolic link * * DT_SOCK - Local-domain socket */ unsigned char fs_ftype_to_dtype(unsigned int filetype) { if (filetype >= FT_MAX) return DT_UNKNOWN; return fs_dtype_by_ftype[filetype]; } EXPORT_SYMBOL_GPL(fs_ftype_to_dtype); /* * dirent file type to fs on-disk file type conversion * Values not initialized explicitly are FT_UNKNOWN (0). */ static const unsigned char fs_ftype_by_dtype[DT_MAX] = { [DT_REG] = FT_REG_FILE, [DT_DIR] = FT_DIR, [DT_LNK] = FT_SYMLINK, [DT_CHR] = FT_CHRDEV, [DT_BLK] = FT_BLKDEV, [DT_FIFO] = FT_FIFO, [DT_SOCK] = FT_SOCK, }; /** * fs_umode_to_ftype() - file mode to on-disk file type. * @mode: The file mode to convert. * * This function converts the file mode value to the on-disk file type (FT_*). * * Context: Any context. * Return: * * FT_UNKNOWN - Unknown type * * FT_REG_FILE - Regular file * * FT_DIR - Directory * * FT_CHRDEV - Character device * * FT_BLKDEV - Block device * * FT_FIFO - FIFO * * FT_SOCK - Local-domain socket * * FT_SYMLINK - Symbolic link */ unsigned char fs_umode_to_ftype(umode_t mode) { return fs_ftype_by_dtype[S_DT(mode)]; } EXPORT_SYMBOL_GPL(fs_umode_to_ftype); /** * fs_umode_to_dtype() - file mode to dirent file type. * @mode: The file mode to convert. * * This function converts the file mode value to the directory * entry type (DT_*). * * Context: Any context. * Return: * * DT_UNKNOWN - Unknown type * * DT_FIFO - FIFO * * DT_CHR - Character device * * DT_DIR - Directory * * DT_BLK - Block device * * DT_REG - Regular file * * DT_LNK - Symbolic link * * DT_SOCK - Local-domain socket */ unsigned char fs_umode_to_dtype(umode_t mode) { return fs_ftype_to_dtype(fs_umode_to_ftype(mode)); } EXPORT_SYMBOL_GPL(fs_umode_to_dtype); |
| 20 19 1 5 5 5 5 5 21 22 23 23 3 3 3 3 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 | // SPDX-License-Identifier: GPL-2.0-or-later /* * vimc-common.c Virtual Media Controller Driver * * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com> */ #include <linux/init.h> #include <linux/module.h> #include <media/v4l2-ctrls.h> #include "vimc-common.h" /* * NOTE: non-bayer formats need to come first (necessary for enum_mbus_code * in the scaler) */ static const struct vimc_pix_map vimc_pix_map_list[] = { /* TODO: add all missing formats */ /* RGB formats */ { .code = { MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_BGR888_3X8 }, .pixelformat = V4L2_PIX_FMT_BGR24, .bpp = 3, .bayer = false, }, { .code = { MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_BE, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_3X8, MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, MEDIA_BUS_FMT_RGB888_1X32_PADHI, MEDIA_BUS_FMT_GBR888_1X24 }, .pixelformat = V4L2_PIX_FMT_RGB24, .bpp = 3, .bayer = false, }, { .code = { MEDIA_BUS_FMT_ARGB8888_1X32 }, .pixelformat = V4L2_PIX_FMT_ARGB32, .bpp = 4, .bayer = false, }, /* Bayer formats */ { .code = { MEDIA_BUS_FMT_SBGGR8_1X8 }, .pixelformat = V4L2_PIX_FMT_SBGGR8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGBRG8_1X8 }, .pixelformat = V4L2_PIX_FMT_SGBRG8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGRBG8_1X8 }, .pixelformat = V4L2_PIX_FMT_SGRBG8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SRGGB8_1X8 }, .pixelformat = V4L2_PIX_FMT_SRGGB8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SBGGR10_1X10 }, .pixelformat = V4L2_PIX_FMT_SBGGR10, .bpp = 2, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGBRG10_1X10 }, .pixelformat = V4L2_PIX_FMT_SGBRG10, .bpp = 2, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGRBG10_1X10 }, .pixelformat = V4L2_PIX_FMT_SGRBG10, .bpp = 2, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SRGGB10_1X10 }, .pixelformat = V4L2_PIX_FMT_SRGGB10, .bpp = 2, .bayer = true, }, /* 10bit raw bayer a-law compressed to 8 bits */ { .code = { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 }, .pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 }, .pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 }, .pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 }, .pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8, .bpp = 1, .bayer = true, }, /* 10bit raw bayer DPCM compressed to 8 bits */ { .code = { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 }, .pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 }, .pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 }, .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 }, .pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8, .bpp = 1, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SBGGR12_1X12 }, .pixelformat = V4L2_PIX_FMT_SBGGR12, .bpp = 2, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGBRG12_1X12 }, .pixelformat = V4L2_PIX_FMT_SGBRG12, .bpp = 2, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SGRBG12_1X12 }, .pixelformat = V4L2_PIX_FMT_SGRBG12, .bpp = 2, .bayer = true, }, { .code = { MEDIA_BUS_FMT_SRGGB12_1X12 }, .pixelformat = V4L2_PIX_FMT_SRGGB12, .bpp = 2, .bayer = true, }, }; bool vimc_is_source(struct media_entity *ent) { unsigned int i; for (i = 0; i < ent->num_pads; i++) if (ent->pads[i].flags & MEDIA_PAD_FL_SINK) return false; return true; } const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i) { if (i >= ARRAY_SIZE(vimc_pix_map_list)) return NULL; return &vimc_pix_map_list[i]; } u32 vimc_mbus_code_by_index(unsigned int index) { unsigned int i, j; for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) { for (j = 0; j < ARRAY_SIZE(vimc_pix_map_list[i].code); j++) { if (!vimc_pix_map_list[i].code[j]) break; if (!index) return vimc_pix_map_list[i].code[j]; index--; } } return 0; } const struct vimc_pix_map *vimc_pix_map_by_code(u32 code) { unsigned int i, j; for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) { for (j = 0; j < ARRAY_SIZE(vimc_pix_map_list[i].code); j++) { if (vimc_pix_map_list[i].code[j] == code) return &vimc_pix_map_list[i]; } } return NULL; } const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat) { unsigned int i; for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) { if (vimc_pix_map_list[i].pixelformat == pixelformat) return &vimc_pix_map_list[i]; } return NULL; } static int vimc_get_pix_format(struct media_pad *pad, struct v4l2_pix_format *fmt) { if (is_media_entity_v4l2_subdev(pad->entity)) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(pad->entity); struct v4l2_subdev_format sd_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = pad->index, }; const struct vimc_pix_map *pix_map; int ret; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt); if (ret) return ret; v4l2_fill_pix_format(fmt, &sd_fmt.format); pix_map = vimc_pix_map_by_code(sd_fmt.format.code); fmt->pixelformat = pix_map->pixelformat; } else if (is_media_entity_v4l2_video_device(pad->entity)) { struct video_device *vdev = container_of(pad->entity, struct video_device, entity); struct vimc_ent_device *ved = video_get_drvdata(vdev); if (!ved->vdev_get_format) return -ENOIOCTLCMD; ved->vdev_get_format(ved, fmt); } else { return -EINVAL; } return 0; } int vimc_vdev_link_validate(struct media_link *link) { struct v4l2_pix_format source_fmt, sink_fmt; int ret; ret = vimc_get_pix_format(link->source, &source_fmt); if (ret) return ret; ret = vimc_get_pix_format(link->sink, &sink_fmt); if (ret) return ret; pr_info("vimc link validate: " "%s:src:%dx%d (0x%x, %d, %d, %d, %d) " "%s:snk:%dx%d (0x%x, %d, %d, %d, %d)\n", /* src */ link->source->entity->name, source_fmt.width, source_fmt.height, source_fmt.pixelformat, source_fmt.colorspace, source_fmt.quantization, source_fmt.xfer_func, source_fmt.ycbcr_enc, /* sink */ link->sink->entity->name, sink_fmt.width, sink_fmt.height, sink_fmt.pixelformat, sink_fmt.colorspace, sink_fmt.quantization, sink_fmt.xfer_func, sink_fmt.ycbcr_enc); /* The width, height and pixelformat must match. */ if (source_fmt.width != sink_fmt.width || source_fmt.height != sink_fmt.height || source_fmt.pixelformat != sink_fmt.pixelformat) return -EPIPE; /* * The field order must match, or the sink field order must be NONE * to support interlaced hardware connected to bridges that support * progressive formats only. */ if (source_fmt.field != sink_fmt.field && sink_fmt.field != V4L2_FIELD_NONE) return -EPIPE; /* * If colorspace is DEFAULT, then assume all the colorimetry is also * DEFAULT, return 0 to skip comparing the other colorimetry parameters */ if (source_fmt.colorspace == V4L2_COLORSPACE_DEFAULT || sink_fmt.colorspace == V4L2_COLORSPACE_DEFAULT) return 0; /* Colorspace must match. */ if (source_fmt.colorspace != sink_fmt.colorspace) return -EPIPE; /* Colorimetry must match if they are not set to DEFAULT */ if (source_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT && sink_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT && source_fmt.ycbcr_enc != sink_fmt.ycbcr_enc) return -EPIPE; if (source_fmt.quantization != V4L2_QUANTIZATION_DEFAULT && sink_fmt.quantization != V4L2_QUANTIZATION_DEFAULT && source_fmt.quantization != sink_fmt.quantization) return -EPIPE; if (source_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT && sink_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT && source_fmt.xfer_func != sink_fmt.xfer_func) return -EPIPE; return 0; } static const struct media_entity_operations vimc_ent_sd_mops = { .link_validate = v4l2_subdev_link_validate, }; int vimc_ent_sd_register(struct vimc_ent_device *ved, struct v4l2_subdev *sd, struct v4l2_device *v4l2_dev, const char *const name, u32 function, u16 num_pads, struct media_pad *pads, const struct v4l2_subdev_internal_ops *int_ops, const struct v4l2_subdev_ops *sd_ops) { int ret; /* Fill the vimc_ent_device struct */ ved->ent = &sd->entity; /* Initialize the subdev */ v4l2_subdev_init(sd, sd_ops); sd->internal_ops = int_ops; sd->entity.function = function; sd->entity.ops = &vimc_ent_sd_mops; sd->owner = THIS_MODULE; strscpy(sd->name, name, sizeof(sd->name)); v4l2_set_subdevdata(sd, ved); /* Expose this subdev to user space */ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; if (sd->ctrl_handler) sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; /* Initialize the media entity */ ret = media_entity_pads_init(&sd->entity, num_pads, pads); if (ret) return ret; /* * Finalize the subdev initialization if it supports active states. Use * the control handler lock as the state lock if available. */ if (int_ops && int_ops->init_state) { if (sd->ctrl_handler) sd->state_lock = sd->ctrl_handler->lock; ret = v4l2_subdev_init_finalize(sd); if (ret) { dev_err(v4l2_dev->dev, "%s: subdev initialization failed (err=%d)\n", name, ret); goto err_clean_m_ent; } } /* Register the subdev with the v4l2 and the media framework */ ret = v4l2_device_register_subdev(v4l2_dev, sd); if (ret) { dev_err(v4l2_dev->dev, "%s: subdev register failed (err=%d)\n", name, ret); goto err_clean_sd; } return 0; err_clean_sd: v4l2_subdev_cleanup(sd); err_clean_m_ent: media_entity_cleanup(&sd->entity); return ret; } |
| 14 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/lib/crc-ccitt.c */ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-ccitt.h> /* * This mysterious table is just the CRC of each possible byte. It can be * computed using the standard bit-at-a-time methods. The polynomial can * be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12. * Add the implicit x^16, and you have the standard CRC-CCITT. */ u16 const crc_ccitt_table[256] = { 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 }; EXPORT_SYMBOL(crc_ccitt_table); /** * crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data * buffer * @crc: previous CRC value * @buffer: data pointer * @len: number of bytes in the buffer */ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len) { while (len--) crc = crc_ccitt_byte(crc, *buffer++); return crc; } EXPORT_SYMBOL(crc_ccitt); MODULE_DESCRIPTION("CRC-CCITT calculations"); MODULE_LICENSE("GPL"); |
| 17 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 | /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/fs/hpfs/hpfs_fn.h * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * function headers */ //#define DBG //#define DEBUG_LOCKS #ifdef pr_fmt #undef pr_fmt #endif #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mutex.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/blkdev.h> #include <linux/unaligned.h> #include "hpfs.h" #define EIOERROR EIO #define EFSERROR EUCLEAN #define ANODE_ALLOC_FWD 512 #define FNODE_ALLOC_FWD 0 #define ALLOC_FWD_MIN 16 #define ALLOC_FWD_MAX 128 #define ALLOC_M 1 #define FNODE_RD_AHEAD 16 #define ANODE_RD_AHEAD 0 #define DNODE_RD_AHEAD 72 #define COUNT_RD_AHEAD 62 #define FREE_DNODES_ADD 58 #define FREE_DNODES_DEL 29 #define CHKCOND(x,y) if (!(x)) printk y struct hpfs_inode_info { loff_t mmu_private; ino_t i_parent_dir; /* (directories) gives fnode of parent dir */ unsigned i_dno; /* (directories) root dnode */ unsigned i_dpos; /* (directories) temp for readdir */ unsigned i_dsubdno; /* (directories) temp for readdir */ unsigned i_file_sec; /* (files) minimalist cache of alloc info */ unsigned i_disk_sec; /* (files) minimalist cache of alloc info */ unsigned i_n_secs; /* (files) minimalist cache of alloc info */ unsigned i_ea_size; /* size of extended attributes */ unsigned i_ea_mode : 1; /* file's permission is stored in ea */ unsigned i_ea_uid : 1; /* file's uid is stored in ea */ unsigned i_ea_gid : 1; /* file's gid is stored in ea */ unsigned i_dirty : 1; loff_t **i_rddir_off; struct inode vfs_inode; }; struct hpfs_sb_info { struct mutex hpfs_mutex; /* global hpfs lock */ ino_t sb_root; /* inode number of root dir */ unsigned sb_fs_size; /* file system size, sectors */ unsigned sb_bitmaps; /* sector number of bitmap list */ unsigned sb_dirband_start; /* directory band start sector */ unsigned sb_dirband_size; /* directory band size, dnodes */ unsigned sb_dmap; /* sector number of dnode bit map */ unsigned sb_n_free; /* free blocks for statfs, or -1 */ unsigned sb_n_free_dnodes; /* free dnodes for statfs, or -1 */ kuid_t sb_uid; /* uid from mount options */ kgid_t sb_gid; /* gid from mount options */ umode_t sb_mode; /* mode from mount options */ unsigned sb_eas : 2; /* eas: 0-ignore, 1-ro, 2-rw */ unsigned sb_err : 2; /* on errs: 0-cont, 1-ro, 2-panic */ unsigned sb_chk : 2; /* checks: 0-no, 1-normal, 2-strict */ unsigned sb_lowercase : 1; /* downcase filenames hackery */ unsigned sb_was_error : 1; /* there was an error, set dirty flag */ unsigned sb_chkdsk : 2; /* chkdsk: 0-no, 1-on errs, 2-allways */ unsigned char *sb_cp_table; /* code page tables: */ /* 128 bytes uppercasing table & */ /* 128 bytes lowercasing table */ __le32 *sb_bmp_dir; /* main bitmap directory */ unsigned sb_c_bitmap; /* current bitmap */ unsigned sb_max_fwd_alloc; /* max forwad allocation */ int sb_timeshift; struct rcu_head rcu; unsigned n_hotfixes; secno hotfix_from[256]; secno hotfix_to[256]; }; /* Four 512-byte buffers and the 2k block obtained by concatenating them */ struct quad_buffer_head { struct buffer_head *bh[4]; void *data; }; /* The b-tree down pointer from a dir entry */ static inline dnode_secno de_down_pointer (struct hpfs_dirent *de) { CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n")); return le32_to_cpu(*(__le32 *) ((void *) de + le16_to_cpu(de->length) - 4)); } /* The first dir entry in a dnode */ static inline struct hpfs_dirent *dnode_first_de (struct dnode *dnode) { return (void *) dnode->dirent; } /* The end+1 of the dir entries */ static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode) { CHKCOND(le32_to_cpu(dnode->first_free)>=0x14 && le32_to_cpu(dnode->first_free)<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %x\n",(unsigned)le32_to_cpu(dnode->first_free))); return (void *) dnode + le32_to_cpu(dnode->first_free); } /* The dir entry after dir entry de */ static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de) { CHKCOND(le16_to_cpu(de->length)>=0x20 && le16_to_cpu(de->length)<0x800,("HPFS: de_next_de: de->length = %x\n",(unsigned)le16_to_cpu(de->length))); return (void *) de + le16_to_cpu(de->length); } static inline struct extended_attribute *fnode_ea(struct fnode *fnode) { return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s)); } static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode) { return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s)); } static unsigned ea_valuelen(struct extended_attribute *ea) { return ea->valuelen_lo + 256 * ea->valuelen_hi; } static inline struct extended_attribute *next_ea(struct extended_attribute *ea) { return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea_valuelen(ea)); } static inline secno ea_sec(struct extended_attribute *ea) { return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 9 + ea->namelen))); } static inline secno ea_len(struct extended_attribute *ea) { return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 5 + ea->namelen))); } static inline char *ea_data(struct extended_attribute *ea) { return (char *)((char *)ea + 5 + ea->namelen); } static inline unsigned de_size(int namelen, secno down_ptr) { return ((0x1f + namelen + 3) & ~3) + (down_ptr ? 4 : 0); } static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src) { int a; int n; if (!dst || !src) return; a = dst->down; n = dst->not_8x3; memcpy((char *)dst + 2, (char *)src + 2, 28); dst->down = a; dst->not_8x3 = n; } static inline unsigned tstbits(__le32 *bmp, unsigned b, unsigned n) { int i; if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; if (!((le32_to_cpu(bmp[(b & 0x3fff) >> 5]) >> (b & 0x1f)) & 1)) return 1; for (i = 1; i < n; i++) if (!((le32_to_cpu(bmp[((b+i) & 0x3fff) >> 5]) >> ((b+i) & 0x1f)) & 1)) return i + 1; return 0; } /* alloc.c */ int hpfs_chk_sectors(struct super_block *, secno, int, char *); secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int); int hpfs_alloc_if_possible(struct super_block *, secno); void hpfs_free_sectors(struct super_block *, secno, unsigned); int hpfs_check_free_dnodes(struct super_block *, int); void hpfs_free_dnode(struct super_block *, secno); struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *); struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); int hpfs_trim_fs(struct super_block *, u64, u64, u64, unsigned *); /* anode.c */ secno hpfs_bplus_lookup(struct super_block *, struct inode *, struct bplus_header *, unsigned, struct buffer_head *); secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned); void hpfs_remove_btree(struct super_block *, struct bplus_header *); int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *); int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, const char *); void hpfs_ea_remove(struct super_block *, secno, int, unsigned); void hpfs_truncate_btree(struct super_block *, secno, int, unsigned); void hpfs_remove_fnode(struct super_block *, fnode_secno fno); /* buffer.c */ secno hpfs_search_hotfix_map(struct super_block *s, secno sec); unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n); void hpfs_prefetch_sectors(struct super_block *, unsigned, int); void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int); void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **); void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int); void *hpfs_get_4sectors(struct super_block *, unsigned, struct quad_buffer_head *); void hpfs_brelse4(struct quad_buffer_head *); void hpfs_mark_4buffers_dirty(struct quad_buffer_head *); /* dentry.c */ extern const struct dentry_operations hpfs_dentry_operations; /* dir.c */ struct dentry *hpfs_lookup(struct inode *, struct dentry *, unsigned int); extern const struct file_operations hpfs_dir_ops; /* dnode.c */ int hpfs_add_pos(struct inode *, loff_t *); void hpfs_del_pos(struct inode *, loff_t *); struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, const unsigned char *, unsigned, secno); int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned, struct hpfs_dirent *); int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *); struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, const unsigned char *, unsigned, dnode_secno *, struct quad_buffer_head *); void hpfs_remove_dtree(struct super_block *, dnode_secno); struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *); /* ea.c */ void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned); int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int); char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *); void hpfs_set_ea(struct inode *, struct fnode *, const char *, const char *, int); /* file.c */ int hpfs_file_fsync(struct file *, loff_t, loff_t, int); void hpfs_truncate(struct inode *); extern const struct file_operations hpfs_file_ops; extern const struct inode_operations hpfs_file_iops; extern const struct address_space_operations hpfs_aops; /* inode.c */ void hpfs_init_inode(struct inode *); void hpfs_read_inode(struct inode *); void hpfs_write_inode(struct inode *); void hpfs_write_inode_nolock(struct inode *); int hpfs_setattr(struct mnt_idmap *, struct dentry *, struct iattr *); void hpfs_write_if_changed(struct inode *); void hpfs_evict_inode(struct inode *); /* map.c */ __le32 *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); __le32 *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); void hpfs_prefetch_bitmap(struct super_block *, unsigned); unsigned char *hpfs_load_code_page(struct super_block *, secno); __le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp); void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock); struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **); struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **); struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *); dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino); /* name.c */ unsigned char hpfs_upcase(unsigned char *, unsigned char); int hpfs_chk_name(const unsigned char *, unsigned *); unsigned char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int); int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned, const unsigned char *, unsigned, int); int hpfs_is_name_long(const unsigned char *, unsigned); void hpfs_adjust_length(const unsigned char *, unsigned *); /* namei.c */ extern const struct inode_operations hpfs_dir_iops; extern const struct address_space_operations hpfs_symlink_aops; static inline struct hpfs_inode_info *hpfs_i(struct inode *inode) { return container_of(inode, struct hpfs_inode_info, vfs_inode); } static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb) { return sb->s_fs_info; } /* super.c */ __printf(2, 3) void hpfs_error(struct super_block *, const char *, ...); int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *); unsigned hpfs_get_free_dnodes(struct super_block *); long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg); /* * local time (HPFS) to GMT (Unix) */ static inline time64_t local_to_gmt(struct super_block *s, time64_t t) { extern struct timezone sys_tz; return t + sys_tz.tz_minuteswest * 60 + hpfs_sb(s)->sb_timeshift; } static inline time32_t gmt_to_local(struct super_block *s, time64_t t) { extern struct timezone sys_tz; return t - sys_tz.tz_minuteswest * 60 - hpfs_sb(s)->sb_timeshift; } static inline time32_t local_get_seconds(struct super_block *s) { return gmt_to_local(s, ktime_get_real_seconds()); } /* * Locking: * * hpfs_lock() locks the whole filesystem. It must be taken * on any method called by the VFS. * * We don't do any per-file locking anymore, it is hard to * review and HPFS is not performance-sensitive anyway. */ static inline void hpfs_lock(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); mutex_lock(&sbi->hpfs_mutex); } static inline void hpfs_unlock(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); mutex_unlock(&sbi->hpfs_mutex); } static inline void hpfs_lock_assert(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex)); } |
| 40 2 22 3 2 2 1 18 3 3 1 24 24 25 24 24 24 21 24 2 2 21 21 3 20 19 20 20 20 4 4 4 3 3 3 2 2 2 3 3 3 7 6 6 4 2 2 1 18 17 17 17 7 5 14 17 13 12 3 9 1 8 8 8 1 2 6 8 6 2 2 5 2 5 2 1 7 7 8 14 15 15 15 10 3 1 2 2 1 13 13 1 15 15 24 1 23 23 23 23 21 23 22 21 3 19 19 19 1 19 3 16 3 19 19 14 1 19 19 11 14 19 19 19 8 19 24 34 33 33 30 1 29 11 1 10 1 9 1 26 25 2 24 4 23 1 21 7 15 14 34 38 41 1 41 7 16 16 41 31 32 15 32 32 32 1 1 1 1 1 129 40 129 1 31 30 17 14 14 14 1 3 2 1 68 1 1 1 3 3 2 1 1 1 2 3 2 2 2 3 1 5 4 3 2 1 1 2 1 1 1 1 1 1 1 3 3 2 2 2 2 2 1 2 2 2 2 2 1 1 11 11 3 2 1 4 1 2 1 16 25 40 2 128 129 129 128 69 39 1 14 14 14 14 2 2 14 14 12 2 13 14 5 5 5 5 5 5 5 5 5 5 5 4 4 4 1 3 3 3 2 2 2 2 2 2 2 3 41 33 14 41 41 1 40 34 33 25 25 25 15 2 33 33 13 25 38 25 38 15 37 15 38 13 38 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 28 32 32 32 32 32 26 26 17 17 17 17 17 26 2 2 2 2 1 1 1 21 21 21 25 25 24 1 25 13 13 17 4 10 2 9 2 9 16 14 48 11 11 11 11 48 49 49 49 1 57 47 48 47 20 20 20 20 20 20 20 20 3 3 24 24 24 24 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 | // SPDX-License-Identifier: GPL-2.0-or-later /* * History: * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), * to allow user process control of SCSI devices. * Development Sponsored by Killy Corp. NY NY * * Original driver (sg.c): * Copyright (C) 1992 Lawrence Foard * Version 2 and 3 extensions to driver: * Copyright (C) 1998 - 2014 Douglas Gilbert */ static int sg_version_num = 30536; /* 2 digits for each component */ #define SG_VERSION_STR "3.5.36" /* * D. P. Gilbert (dgilbert@interlog.com), notes: * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First * the kernel/module needs to be built with CONFIG_SCSI_LOGGING * (otherwise the macros compile to empty statements). * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/moduleparam.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/blktrace_api.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/uio.h> #include <linux/cred.h> /* for sg_check_file_access() */ #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_tcq.h> #include <scsi/sg.h> #include "scsi_logging.h" #ifdef CONFIG_SCSI_PROC_FS #include <linux/proc_fs.h> static char *sg_version_date = "20140603"; static int sg_proc_init(void); #endif #define SG_ALLOW_DIO_DEF 0 #define SG_MAX_DEVS (1 << MINORBITS) /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater * than 16 bytes are "variable length" whose length is a multiple of 4 */ #define SG_MAX_CDB_SIZE 252 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) static int sg_big_buff = SG_DEF_RESERVED_SIZE; /* N.B. This variable is readable and writeable via /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer of this size (or less if there is not enough memory) will be reserved for use by this file descriptor. [Deprecated usage: this variable is also readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into the kernel (i.e. it is not a module).] */ static int def_reserved_size = -1; /* picks up init parameter */ static int sg_allow_dio = SG_ALLOW_DIO_DEF; static int scatter_elem_sz = SG_SCATTER_SZ; static int scatter_elem_sz_prev = SG_SCATTER_SZ; #define SG_SECTOR_SZ 512 static int sg_add_device(struct device *); static void sg_remove_device(struct device *); static DEFINE_IDR(sg_index_idr); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add_device, .remove_dev = sg_remove_device, }; typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ unsigned bufflen; /* Size of (aggregate) data buffer */ struct page **pages; int page_order; char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ unsigned char cmd_opcode; /* first byte of command */ } Sg_scatter_hold; struct sg_device; /* forward declarations */ struct sg_fd; typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ struct list_head entry; /* list entry */ struct sg_fd *parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ char orphan; /* 1 -> drop on sight, 0 -> normal */ char sg_io_owned; /* 1 -> packet belongs to SG_IO */ /* done protected by rq_list_lock */ char done; /* 0->before bh, 1->before read, 2->read */ struct request *rq; struct bio *bio; struct execute_work ew; } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ struct list_head sfd_siblings; /* protected by device's sfd_lock */ struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ struct mutex f_mutex; /* protect against changes in this fd */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ struct list_head rq_list; /* head of request list */ struct fasync_struct *async_qp; /* used by asynchronous notification */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char mmap_called; /* 0 -> mmap() never called on this fd */ char res_in_use; /* 1 -> 'reserve' array in use */ struct kref f_ref; struct execute_work ew; } Sg_fd; typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ struct mutex open_rel_lock; /* held when in open() or release() */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ struct list_head sfds; rwlock_t sfd_lock; /* protect access to sfd list */ atomic_t detaching; /* 0->device usable, 1->device detaching */ bool exclude; /* 1->open(O_EXCL) succeeded and is active */ int open_cnt; /* count of opens (perhaps < num(sfds) ) */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ char name[DISK_NAME_LEN]; struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ struct kref d_ref; } Sg_device; /* tasklet or soft irq callback */ static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status); static int sg_start_req(Sg_request *srp, unsigned char *cmd); static int sg_finish_rem_req(Sg_request * srp); static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp); static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp); static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking); static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); static void sg_build_reserve(Sg_fd * sfp, int req_size); static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); static Sg_fd *sg_add_sfp(Sg_device * sdp); static void sg_remove_sfp(struct kref *); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static Sg_device *sg_get_dev(int dev); static void sg_device_destroy(struct kref *kref); #define SZ_SG_HEADER sizeof(struct sg_header) #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) #define SZ_SG_IOVEC sizeof(sg_iovec_t) #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) #define sg_printk(prefix, sdp, fmt, a...) \ sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a) /* * The SCSI interfaces that use read() and write() as an asynchronous variant of * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways * to trigger read() and write() calls from various contexts with elevated * privileges. This can lead to kernel memory corruption (e.g. if these * interfaces are called through splice()) and privilege escalation inside * userspace (e.g. if a process with access to such a device passes a file * descriptor to a SUID binary as stdin/stdout/stderr). * * This function provides protection for the legacy API by restricting the * calling context. */ static int sg_check_file_access(struct file *filp, const char *caller) { if (filp->f_cred != current_real_cred()) { pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", caller, task_tgid_vnr(current), current->comm); return -EPERM; } return 0; } static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; if (!scsi_cmd_allowed(cmd, filp->f_mode & FMODE_WRITE)) return -EPERM; return 0; } static int open_wait(Sg_device *sdp, int flags) { int retval = 0; if (flags & O_EXCL) { while (sdp->open_cnt > 0) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->open_cnt)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } else { while (sdp->exclude) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->exclude)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } return retval; } /* Returns 0 on success, else a negated errno value */ static int sg_open(struct inode *inode, struct file *filp) { int dev = iminor(inode); int flags = filp->f_flags; struct request_queue *q; struct scsi_device *device; Sg_device *sdp; Sg_fd *sfp; int retval; nonseekable_open(inode, filp); if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) return -EPERM; /* Can't lock it with read only access */ sdp = sg_get_dev(dev); if (IS_ERR(sdp)) return PTR_ERR(sdp); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_open: flags=0x%x\n", flags)); /* This driver's module count bumped by fops_get in <linux/fs.h> */ /* Prevent the device driver from vanishing while we sleep */ device = sdp->device; retval = scsi_device_get(device); if (retval) goto sg_put; /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ if (!((flags & O_NONBLOCK) || scsi_block_when_processing_errors(device))) { retval = -ENXIO; /* we are in error recovery for this device */ goto sdp_put; } mutex_lock(&sdp->open_rel_lock); if (flags & O_NONBLOCK) { if (flags & O_EXCL) { if (sdp->open_cnt > 0) { retval = -EBUSY; goto error_mutex_locked; } } else { if (sdp->exclude) { retval = -EBUSY; goto error_mutex_locked; } } } else { retval = open_wait(sdp, flags); if (retval) /* -ERESTARTSYS or -ENODEV */ goto error_mutex_locked; } /* N.B. at this point we are holding the open_rel_lock */ if (flags & O_EXCL) sdp->exclude = true; if (sdp->open_cnt < 1) { /* no existing opens */ sdp->sgdebug = 0; q = device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } sfp = sg_add_sfp(sdp); if (IS_ERR(sfp)) { retval = PTR_ERR(sfp); goto out_undo; } filp->private_data = sfp; sdp->open_cnt++; mutex_unlock(&sdp->open_rel_lock); retval = 0; sg_put: kref_put(&sdp->d_ref, sg_device_destroy); return retval; out_undo: if (flags & O_EXCL) { sdp->exclude = false; /* undo if error */ wake_up_interruptible(&sdp->open_wait); } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); sdp_put: kref_put(&sdp->d_ref, sg_device_destroy); scsi_device_put(device); return retval; } /* Release resources associated with a successful sg_open() * Returns 0 on success, else a negated errno value */ static int sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; * only open(O_EXCL)s wait on 0==open_cnt so only start one */ if (sdp->exclude) { sdp->exclude = false; wake_up_interruptible_all(&sdp->open_wait); } else if (0 == sdp->open_cnt) { wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); kref_put(&sfp->f_ref, sg_remove_sfp); return 0; } static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count) { struct sg_header __user *old_hdr = buf; int reply_len; if (count >= SZ_SG_HEADER) { /* negative reply_len means v3 format, otherwise v1/v2 */ if (get_user(reply_len, &old_hdr->reply_len)) return -EFAULT; if (reply_len >= 0) return get_user(*pack_id, &old_hdr->pack_id); if (in_compat_syscall() && count >= sizeof(struct compat_sg_io_hdr)) { struct compat_sg_io_hdr __user *hp = buf; return get_user(*pack_id, &hp->pack_id); } if (count >= sizeof(struct sg_io_hdr)) { struct sg_io_hdr __user *hp = buf; return get_user(*pack_id, &hp->pack_id); } } /* no valid header was passed, so ignore the pack_id */ *pack_id = -1; return 0; } static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int req_pack_id = -1; bool busy; sg_io_hdr_t *hp; struct sg_header *old_hdr; int retval; /* * This could cause a response to be stranded. Close the associated * file descriptor to free up any resources being held. */ retval = sg_check_file_access(filp, __func__); if (retval) return retval; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); if (sfp->force_packid) retval = get_sg_io_pack_id(&req_pack_id, buf, count); if (retval) return retval; srp = sg_get_rq_mark(sfp, req_pack_id, &busy); if (!srp) { /* now wait on packet to arrive */ if (filp->f_flags & O_NONBLOCK) return -EAGAIN; retval = wait_event_interruptible(sfp->read_wait, ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) || (!busy && atomic_read(&sdp->detaching)))); if (!srp) /* signal or detaching */ return retval ? retval : -ENODEV; } if (srp->header.interface_id != '\0') return sg_new_read(sfp, buf, count, srp); hp = &srp->header; old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL); if (!old_hdr) return -ENOMEM; old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; old_hdr->twelve_byte = ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; old_hdr->target_status = hp->masked_status; old_hdr->host_status = hp->host_status; old_hdr->driver_status = hp->driver_status; if ((CHECK_CONDITION & hp->masked_status) || (srp->sense_b[0] & 0x70) == 0x70) { old_hdr->driver_status = DRIVER_SENSE; memcpy(old_hdr->sense_buffer, srp->sense_b, sizeof (old_hdr->sense_buffer)); } switch (hp->host_status) { /* This setup of 'result' is for backward compatibility and is best ignored by the user who should use target, host + driver status */ case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: old_hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: old_hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: old_hdr->result = EIO; break; case DID_ERROR: old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: old_hdr->result = EIO; break; } /* Now copy the result back to the user buffer. */ if (count >= SZ_SG_HEADER) { if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } buf += SZ_SG_HEADER; if (count > old_hdr->reply_len) count = old_hdr->reply_len; if (count > SZ_SG_HEADER) { if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } } } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); sg_remove_request(sfp, srp); retval = count; free_old_hdr: kfree(old_hdr); return retval; } static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; int err = 0, err2; int len; if (in_compat_syscall()) { if (count < sizeof(struct compat_sg_io_hdr)) { err = -EINVAL; goto err_out; } } else if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } hp->sb_len_wr = 0; if ((hp->mx_sb_len > 0) && hp->sbp) { if ((CHECK_CONDITION & hp->masked_status) || (srp->sense_b[0] & 0x70) == 0x70) { int sb_len = SCSI_SENSE_BUFFERSIZE; sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ len = (len > sb_len) ? sb_len : len; if (copy_to_user(hp->sbp, srp->sense_b, len)) { err = -EFAULT; goto err_out; } hp->driver_status = DRIVER_SENSE; hp->sb_len_wr = len; } } if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; err = put_sg_io_hdr(hp, buf); err_out: err2 = sg_finish_rem_req(srp); sg_remove_request(sfp, srp); return err ? : err2 ? : count; } static ssize_t sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; int retval; retval = sg_check_file_access(filp, __func__); if (retval) return retval; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_write: count=%d\n", (int) count)); if (atomic_read(&sdp->detaching)) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (count < SZ_SG_HEADER) return -EIO; if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ buf += SZ_SG_HEADER; if (get_user(opcode, buf)) return -EFAULT; if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } mutex_lock(&sfp->f_mutex); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } mutex_unlock(&sfp->f_mutex); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (copy_from_user(cmnd, buf, cmd_size)) { sg_remove_request(sfp, srp); return -EFAULT; } /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; } static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp) { int k; Sg_request *srp; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; int timeout; unsigned long ul_timeout; if (count < SZ_SG_IO_HDR) return -EINVAL; sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_new_write: queue full\n")); return -EDOM; } srp->sg_io_owned = sg_io_owned; hp = &srp->header; if (get_sg_io_hdr(hp, buf)) { sg_remove_request(sfp, srp); return -EFAULT; } if (hp->interface_id != 'S') { sg_remove_request(sfp, srp); return -ENOSYS; } if (hp->flags & SG_FLAG_MMAP_IO) { if (hp->dxfer_len > sfp->reserve.bufflen) { sg_remove_request(sfp, srp); return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ } if (hp->flags & SG_FLAG_DIRECT_IO) { sg_remove_request(sfp, srp); return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ } if (sfp->res_in_use) { sg_remove_request(sfp, srp); return -EBUSY; /* reserve buffer already being used */ } } ul_timeout = msecs_to_jiffies(srp->header.timeout); timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { sg_remove_request(sfp, srp); return -EMSGSIZE; } if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; } if (read_only && sg_allow_access(file, cmnd)) { sg_remove_request(sfp, srp); return -EPERM; } k = sg_common_write(sfp, srp, cmnd, timeout, blocking); if (k < 0) return k; if (o_srp) *o_srp = srp; return count; } static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) { int k, at_head; Sg_device *sdp = sfp->parentdp; sg_io_hdr_t *hp = &srp->header; srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ hp->status = 0; hp->masked_status = 0; hp->msg_status = 0; hp->info = 0; hp->host_status = 0; hp->driver_status = 0; hp->resid = 0; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); if (hp->dxfer_len >= SZ_256M) { sg_remove_request(sfp, srp); return -EINVAL; } k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); sg_remove_request(sfp, srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { if (srp->bio) { blk_mq_free_request(srp->rq); srp->rq = NULL; } sg_finish_rem_req(srp); sg_remove_request(sfp, srp); return -ENODEV; } hp->duration = jiffies_to_msecs(jiffies); if (hp->interface_id != '\0' && /* v3 (or later) interface */ (SG_FLAG_Q_AT_TAIL & hp->flags)) at_head = 0; else at_head = 1; srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ srp->rq->end_io = sg_rq_end_io; blk_execute_rq_nowait(srp->rq, at_head); return 0; } static int srp_done(Sg_fd *sfp, Sg_request *srp) { unsigned long flags; int ret; read_lock_irqsave(&sfp->rq_list_lock, flags); ret = srp->done; read_unlock_irqrestore(&sfp->rq_list_lock, flags); return ret; } static int max_sectors_bytes(struct request_queue *q) { unsigned int max_sectors = queue_max_sectors(q); max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); return max_sectors << 9; } static void sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) { Sg_request *srp; int val; unsigned int ms; val = 0; list_for_each_entry(srp, &sfp->rq_list, entry) { if (val >= SG_MAX_QUEUE) break; rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = srp->header.duration; else { ms = jiffies_to_msecs(jiffies); rinfo[val].duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; } rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; val++; } } #ifdef CONFIG_COMPAT struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ char req_state; char orphan; char sg_io_owned; char problem; int pack_id; compat_uptr_t usr_ptr; unsigned int duration; int unused; }; static int put_compat_request_table(struct compat_sg_req_info __user *o, struct sg_req_info *rinfo) { int i; for (i = 0; i < SG_MAX_QUEUE; i++) { if (copy_to_user(o + i, rinfo + i, offsetof(sg_req_info_t, usr_ptr)) || put_user((uintptr_t)rinfo[i].usr_ptr, &o[i].usr_ptr) || put_user(rinfo[i].duration, &o[i].duration) || put_user(rinfo[i].unused, &o[i].unused)) return -EFAULT; } return 0; } #endif static long sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, unsigned int cmd_in, void __user *p) { int __user *ip = p; int result, val, read_only; Sg_request *srp; unsigned long iflags; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); switch (cmd_in) { case SG_IO: if (atomic_read(&sdp->detaching)) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, srp_done(sfp, srp)); write_lock_irq(&sfp->rq_list_lock); if (srp->done) { srp->done = 2; write_unlock_irq(&sfp->rq_list_lock); result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); return (result < 0) ? result : 0; } srp->orphan = 1; write_unlock_irq(&sfp->rq_list_lock); return result; /* -ERESTARTSYS because signal hit process */ case SG_SET_TIMEOUT: result = get_user(val, ip); if (result) return result; if (val < 0) return -EIO; if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ)) val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ), INT_MAX); sfp->timeout_user = val; sfp->timeout = mult_frac(val, HZ, USER_HZ); return 0; case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ /* strange ..., for backward compatibility */ return sfp->timeout_user; case SG_SET_FORCE_LOW_DMA: /* * N.B. This ioctl never worked properly, but failed to * return an error value. So returning '0' to keep compability * with legacy applications. */ return 0; case SG_GET_LOW_DMA: return put_user(0, ip); case SG_GET_SCSI_ID: { sg_scsi_id_t v; if (atomic_read(&sdp->detaching)) return -ENODEV; memset(&v, 0, sizeof(v)); v.host_no = sdp->device->host->host_no; v.channel = sdp->device->channel; v.scsi_id = sdp->device->id; v.lun = sdp->device->lun; v.scsi_type = sdp->device->type; v.h_cmd_per_lun = sdp->device->host->cmd_per_lun; v.d_queue_depth = sdp->device->queue_depth; if (copy_to_user(p, &v, sizeof(sg_scsi_id_t))) return -EFAULT; return 0; } case SG_SET_FORCE_PACK_ID: result = get_user(val, ip); if (result) return result; sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: read_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(srp, &sfp->rq_list, entry) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(srp->header.pack_id, ip); } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(-1, ip); case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); val = 0; list_for_each_entry(srp, &sfp->rq_list, entry) { if ((1 == srp->done) && (!srp->sg_io_owned)) ++val; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(val, ip); case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, ip); case SG_SET_RESERVED_SIZE: result = get_user(val, ip); if (result) return result; if (val < 0) return -EINVAL; val = min_t(int, val, max_sectors_bytes(sdp->device->request_queue)); mutex_lock(&sfp->f_mutex); if (val != sfp->reserve.bufflen) { if (sfp->mmap_called || sfp->res_in_use) { mutex_unlock(&sfp->f_mutex); return -EBUSY; } sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } mutex_unlock(&sfp->f_mutex); return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, max_sectors_bytes(sdp->device->request_queue)); return put_user(val, ip); case SG_SET_COMMAND_Q: result = get_user(val, ip); if (result) return result; sfp->cmd_q = val ? 1 : 0; return 0; case SG_GET_COMMAND_Q: return put_user((int) sfp->cmd_q, ip); case SG_SET_KEEP_ORPHAN: result = get_user(val, ip); if (result) return result; sfp->keep_orphan = val; return 0; case SG_GET_KEEP_ORPHAN: return put_user((int) sfp->keep_orphan, ip); case SG_NEXT_CMD_LEN: result = get_user(val, ip); if (result) return result; if (val > SG_MAX_CDB_SIZE) return -ENOMEM; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, ip); case SG_GET_ACCESS_COUNT: /* faked - we don't have a real access count anymore */ val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: { sg_req_info_t *rinfo; rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO, GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); sg_fill_request_table(sfp, rinfo); read_unlock_irqrestore(&sfp->rq_list_lock, iflags); #ifdef CONFIG_COMPAT if (in_compat_syscall()) result = put_compat_request_table(p, rinfo); else #endif result = copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); return result; } case SG_EMULATED_HOST: if (atomic_read(&sdp->detaching)) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, ip); case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, cmd_in, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) return result; sdp->sgdebug = (char) val; return 0; case BLKSECTGET: return put_user(max_sectors_bytes(sdp->device->request_queue), ip); case BLKTRACESETUP: return blk_trace_setup(sdp->device->request_queue, sdp->name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), NULL, p); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: return blk_trace_startstop(sdp->device->request_queue, 0); case BLKTRACETEARDOWN: return blk_trace_remove(sdp->device->request_queue); case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SCSI_IOCTL_PROBE_HOST: case SG_GET_TRANSFORM: case SG_SCSI_RESET: if (atomic_read(&sdp->detaching)) return -ENODEV; break; default: if (read_only) return -EPERM; /* don't know so take safe approach */ break; } result = scsi_ioctl_block_when_processing_errors(sdp->device, cmd_in, filp->f_flags & O_NDELAY); if (result) return result; return -ENOIOCTLCMD; } static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; Sg_device *sdp; Sg_fd *sfp; int ret; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); if (ret != -ENOIOCTLCMD) return ret; return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, cmd_in, p); } static __poll_t sg_poll(struct file *filp, poll_table * wait) { __poll_t res = 0; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int count = 0; unsigned long iflags; sfp = filp->private_data; if (!sfp) return EPOLLERR; sdp = sfp->parentdp; if (!sdp) return EPOLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(srp, &sfp->rq_list, entry) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) res = EPOLLIN | EPOLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (atomic_read(&sdp->detaching)) res |= EPOLLHUP; else if (!sfp->cmd_q) { if (0 == count) res |= EPOLLOUT | EPOLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= EPOLLOUT | EPOLLWRNORM; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_poll: res=0x%x\n", (__force u32) res)); return res; } static int sg_fasync(int fd, struct file *filp, int mode) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_fasync: mode=%d\n", mode)); return fasync_helper(fd, filp, mode, &sfp->async_qp); } static vm_fault_t sg_vma_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; Sg_fd *sfp; unsigned long offset, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) return VM_FAULT_SIGBUS; rsv_schp = &sfp->reserve; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rsv_schp->bufflen) return VM_FAULT_SIGBUS; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_vma_fault: offset=%lu, scatg=%d\n", offset, rsv_schp->k_use_sg)); sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; if (offset < len) { struct page *page = nth_page(rsv_schp->pages[k], offset >> PAGE_SHIFT); get_page(page); /* increment page count */ vmf->page = page; return 0; /* success */ } sa += len; offset -= len; } return VM_FAULT_SIGBUS; } static const struct vm_operations_struct sg_mmap_vm_ops = { .fault = sg_vma_fault, }; static int sg_mmap(struct file *filp, struct vm_area_struct *vma) { Sg_fd *sfp; unsigned long req_sz, len, sa; Sg_scatter_hold *rsv_schp; int k, length; int ret = 0; if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) return -ENXIO; req_sz = vma->vm_end - vma->vm_start; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_mmap starting, vm_start=%p, len=%d\n", (void *) vma->vm_start, (int) req_sz)); if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; mutex_lock(&sfp->f_mutex); if (req_sz > rsv_schp->bufflen) { ret = -ENOMEM; /* cannot map more than reserved buffer */ goto out; } sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; sa += len; } sfp->mmap_called = 1; vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; out: mutex_unlock(&sfp->f_mutex); return ret; } static void sg_rq_end_io_usercontext(struct work_struct *work) { struct sg_request *srp = container_of(work, struct sg_request, ew.work); struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); sg_remove_request(sfp, srp); kref_put(&sfp->f_ref, sg_remove_sfp); } /* * This function is a "bottom half" handler that is called by the mid * level when a command is completed (or has failed). */ static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); struct sg_request *srp = rq->end_io_data; Sg_device *sdp; Sg_fd *sfp; unsigned long iflags; unsigned int ms; char *sense; int result, resid, done = 1; if (WARN_ON(srp->done != 0)) return RQ_END_IO_NONE; sfp = srp->parentfp; if (WARN_ON(sfp == NULL)) return RQ_END_IO_NONE; sdp = sfp->parentdp; if (unlikely(atomic_read(&sdp->detaching))) pr_info("%s: device detaching\n", __func__); sense = scmd->sense_buffer; result = scmd->result; resid = scmd->resid_len; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_cmd_done: pack_id=%d, res=0x%x\n", srp->header.pack_id, result)); srp->header.resid = resid; ms = jiffies_to_msecs(jiffies); srp->header.duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; if (0 != result) { struct scsi_sense_hdr sshdr; srp->header.status = 0xff & result; srp->header.masked_status = sg_status_byte(result); srp->header.msg_status = COMMAND_COMPLETE; srp->header.host_status = host_byte(result); srp->header.driver_status = driver_byte(result); if ((sdp->sgdebug > 0) && ((CHECK_CONDITION == srp->header.masked_status) || (COMMAND_TERMINATED == srp->header.masked_status))) __scsi_print_sense(sdp->device, __func__, sense, SCSI_SENSE_BUFFERSIZE); /* Following if statement is a patch supplied by Eric Youngdale */ if (driver_byte(result) != 0 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) && !scsi_sense_is_deferred(&sshdr) && sshdr.sense_key == UNIT_ATTENTION && sdp->device->removable) { /* Detected possible disc change. Set the bit - this */ /* may be used if there are filesystems using this device */ sdp->device->changed = 1; } } if (scmd->sense_len) memcpy(srp->sense_b, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* Rely on write phase to clean out srp status values, so no "else" */ /* * Free the request as soon as it is complete so that its resources * can be reused without waiting for userspace to read() the * result. But keep the associated bio (if any) around until * blk_rq_unmap_user() can be called from user context. */ srp->rq = NULL; blk_mq_free_request(rq); write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) srp->sg_io_owned = 0; else done = 0; } srp->done = done; write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (likely(done)) { /* Now wake up any sg_read() that is waiting for this * packet. */ wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); kref_put(&sfp->f_ref, sg_remove_sfp); } else { INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); schedule_work(&srp->ew.work); } return RQ_END_IO_NONE; } static const struct file_operations sg_fops = { .owner = THIS_MODULE, .read = sg_read, .write = sg_write, .poll = sg_poll, .unlocked_ioctl = sg_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = sg_open, .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, }; static const struct class sg_sysfs_class = { .name = "scsi_generic" }; static int sg_sysfs_valid = 0; static Sg_device * sg_alloc(struct scsi_device *scsidp) { struct request_queue *q = scsidp->request_queue; Sg_device *sdp; unsigned long iflags; int error; u32 k; sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); if (!sdp) { sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " "failure\n", __func__); return ERR_PTR(-ENOMEM); } idr_preload(GFP_KERNEL); write_lock_irqsave(&sg_index_lock, iflags); error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT); if (error < 0) { if (error == -ENOSPC) { sdev_printk(KERN_WARNING, scsidp, "Unable to attach sg device type=%d, minor number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); error = -ENODEV; } else { sdev_printk(KERN_WARNING, scsidp, "%s: idr " "allocation Sg_device failure: %d\n", __func__, error); } goto out_unlock; } k = error; SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, "sg_alloc: dev=%d \n", k)); sprintf(sdp->name, "sg%d", k); sdp->device = scsidp; mutex_init(&sdp->open_rel_lock); INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->open_wait); atomic_set(&sdp->detaching, 0); rwlock_init(&sdp->sfd_lock); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); error = 0; out_unlock: write_unlock_irqrestore(&sg_index_lock, iflags); idr_preload_end(); if (error) { kfree(sdp); return ERR_PTR(error); } return sdp; } static int sg_add_device(struct device *cl_dev) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = NULL; struct cdev * cdev = NULL; int error; unsigned long iflags; if (!blk_get_queue(scsidp->request_queue)) { pr_warn("%s: get scsi_device queue failed\n", __func__); return -ENODEV; } error = -ENOMEM; cdev = cdev_alloc(); if (!cdev) { pr_warn("%s: cdev_alloc failed\n", __func__); goto out; } cdev->owner = THIS_MODULE; cdev->ops = &sg_fops; sdp = sg_alloc(scsidp); if (IS_ERR(sdp)) { pr_warn("%s: sg_alloc failed\n", __func__); error = PTR_ERR(sdp); goto out; } error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); if (error) goto cdev_add_err; sdp->cdev = cdev; if (sg_sysfs_valid) { struct device *sg_class_member; sg_class_member = device_create(&sg_sysfs_class, cl_dev->parent, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), sdp, "%s", sdp->name); if (IS_ERR(sg_class_member)) { pr_err("%s: device_create failed\n", __func__); error = PTR_ERR(sg_class_member); goto cdev_add_err; } error = sysfs_create_link(&scsidp->sdev_gendev.kobj, &sg_class_member->kobj, "generic"); if (error) pr_err("%s: unable to make symlink 'generic' back " "to sg%d\n", __func__, sdp->index); } else pr_warn("%s: sg_sys Invalid\n", __func__); sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " "type %d\n", sdp->index, scsidp->type); dev_set_drvdata(cl_dev, sdp); return 0; cdev_add_err: write_lock_irqsave(&sg_index_lock, iflags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, iflags); kfree(sdp); out: if (cdev) cdev_del(cdev); blk_put_queue(scsidp->request_queue); return error; } static void sg_device_destroy(struct kref *kref) { struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); struct request_queue *q = sdp->device->request_queue; unsigned long flags; /* CAUTION! Note that the device can still be found via idr_find() * even though the refcount is 0. Therefore, do idr_remove() BEFORE * any other cleanup. */ blk_trace_remove(q); blk_put_queue(q); write_lock_irqsave(&sg_index_lock, flags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, flags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_device_destroy\n")); kfree(sdp); } static void sg_remove_device(struct device *cl_dev) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = dev_get_drvdata(cl_dev); unsigned long iflags; Sg_fd *sfp; int val; if (!sdp) return; /* want sdp->detaching non-zero as soon as possible */ val = atomic_inc_return(&sdp->detaching); if (val > 1) return; /* only want to do following once per device */ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "%s\n", __func__)); read_lock_irqsave(&sdp->sfd_lock, iflags); list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible_all(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } wake_up_interruptible_all(&sdp->open_wait); read_unlock_irqrestore(&sdp->sfd_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); device_destroy(&sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); cdev_del(sdp->cdev); sdp->cdev = NULL; kref_put(&sdp->d_ref, sg_device_destroy); } module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO | S_IWUSR); module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Douglas Gilbert"); MODULE_DESCRIPTION("SCSI generic (sg) driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SG_VERSION_STR); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static const struct ctl_table sg_sysctls[] = { { .procname = "sg-big-buff", .data = &sg_big_buff, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, }; static struct ctl_table_header *hdr; static void register_sg_sysctls(void) { if (!hdr) hdr = register_sysctl("kernel", sg_sysctls); } static void unregister_sg_sysctls(void) { unregister_sysctl_table(hdr); } #else #define register_sg_sysctls() do { } while (0) #define unregister_sg_sysctls() do { } while (0) #endif /* CONFIG_SYSCTL */ static int __init init_sg(void) { int rc; if (scatter_elem_sz < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = scatter_elem_sz; } if (def_reserved_size >= 0) sg_big_buff = def_reserved_size; else def_reserved_size = sg_big_buff; rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; rc = class_register(&sg_sysfs_class); if (rc) goto err_out; sg_sysfs_valid = 1; rc = scsi_register_interface(&sg_interface); if (0 == rc) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_init(); #endif /* CONFIG_SCSI_PROC_FS */ return 0; } class_unregister(&sg_sysfs_class); register_sg_sysctls(); err_out: unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); return rc; } static void __exit exit_sg(void) { unregister_sg_sysctls(); #ifdef CONFIG_SCSI_PROC_FS remove_proc_subtree("scsi/sg", NULL); #endif /* CONFIG_SCSI_PROC_FS */ scsi_unregister_interface(&sg_interface); class_unregister(&sg_sysfs_class); sg_sysfs_valid = 0; unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); idr_destroy(&sg_index_idr); } static int sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? ITER_SOURCE : ITER_DEST; struct scsi_cmnd *scmd; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_start_req: dxfer_len=%d\n", dxfer_len)); /* * NOTE * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the * preallocated requests are already in use, then scsi_alloc_request() * will sleep until an active command completes, freeing up a request. * Although waiting in an asynchronous interface is less than ideal, we * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might * not expect an EWOULDBLOCK from this condition. */ rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); scmd = blk_mq_rq_to_pdu(rq); if (hp->cmd_len > sizeof(scmd->cmnd)) { blk_mq_free_request(rq); return -EINVAL; } memcpy(scmd->cmnd, cmd, hp->cmd_len); scmd->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; scmd->allowed = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { mutex_lock(&sfp->f_mutex); if (dxfer_len <= rsv_schp->bufflen && !sfp->res_in_use) { sfp->res_in_use = 1; sg_link_reserve(sfp, srp, dxfer_len); } else if (hp->flags & SG_FLAG_MMAP_IO) { res = -EBUSY; /* sfp->res_in_use == 1 */ if (dxfer_len > rsv_schp->bufflen) res = -ENOMEM; mutex_unlock(&sfp->f_mutex); return res; } else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) { mutex_unlock(&sfp->f_mutex); return res; } } mutex_unlock(&sfp->f_mutex); md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC, iov_count, iov_count, 1, rw); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; } static int sg_finish_rem_req(Sg_request *srp) { int ret = 0; Sg_fd *sfp = srp->parentfp; Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); if (srp->bio) ret = blk_rq_unmap_user(srp->bio); if (srp->rq) blk_mq_free_request(srp->rq); if (srp->res_used) sg_unlink_reserve(sfp, srp); else sg_remove_scat(sfp, req_schp); return ret; } static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) { int sg_bufflen = tablesize * sizeof(struct page *); gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; schp->pages = kzalloc(sg_bufflen, gfp_flags); if (!schp->pages) return -ENOMEM; schp->sglist_len = sg_bufflen; return tablesize; /* number of scat_gath elements allocated */ } static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) { int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; int sg_tablesize = sfp->parentdp->sg_tablesize; int blk_size = buff_size, order; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO; if (blk_size < 0) return -EFAULT; if (0 == blk_size) ++blk_size; /* don't know why */ /* round request up to next highest SG_SECTOR_SZ byte boundary */ blk_size = ALIGN(blk_size, SG_SECTOR_SZ); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: buff_size=%d, blk_size=%d\n", buff_size, blk_size)); /* N.B. ret_sz carried into this block ... */ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); if (mx_sc_elems < 0) return mx_sc_elems; /* most likely -ENOMEM */ num = scatter_elem_sz; if (unlikely(num != scatter_elem_sz_prev)) { if (num < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = PAGE_SIZE; } else scatter_elem_sz_prev = num; } order = get_order(num); retry: ret_sz = 1 << (PAGE_SHIFT + order); for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; k++, rem_sz -= ret_sz) { num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; schp->pages[k] = alloc_pages(gfp_mask, order); if (!schp->pages[k]) goto out; if (num == scatter_elem_sz_prev) { if (unlikely(ret_sz > scatter_elem_sz_prev)) { scatter_elem_sz = ret_sz; scatter_elem_sz_prev = ret_sz; } } SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n", k, num, ret_sz)); } /* end of for loop */ schp->page_order = order; schp->k_use_sg = k; SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); schp->bufflen = blk_size; if (rem_sz > 0) /* must have failed */ return -ENOMEM; return 0; out: for (i = 0; i < k; i++) __free_pages(schp->pages[i], order); if (--order >= 0) goto retry; return -ENOMEM; } static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) { SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); if (schp->pages && schp->sglist_len > 0) { if (!schp->dio_in_use) { int k; for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k=%d, pg=0x%p\n", k, schp->pages[k])); __free_pages(schp->pages[k], schp->page_order); } kfree(schp->pages); } } memset(schp, 0, sizeof (*schp)); } static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) { Sg_scatter_hold *schp = &srp->data; int k, num; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_read_oxfer: num_read_xfer=%d\n", num_read_xfer)); if ((!outp) || (num_read_xfer <= 0)) return 0; num = 1 << (PAGE_SHIFT + schp->page_order); for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { if (num > num_read_xfer) { if (copy_to_user(outp, page_address(schp->pages[k]), num_read_xfer)) return -EFAULT; break; } else { if (copy_to_user(outp, page_address(schp->pages[k]), num)) return -EFAULT; num_read_xfer -= num; if (num_read_xfer <= 0) break; outp += num; } } return 0; } static void sg_build_reserve(Sg_fd * sfp, int req_size) { Sg_scatter_hold *schp = &sfp->reserve; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_reserve: req_size=%d\n", req_size)); do { if (req_size < PAGE_SIZE) req_size = PAGE_SIZE; if (0 == sg_build_indirect(schp, sfp, req_size)) return; else sg_remove_scat(sfp, schp); req_size >>= 1; /* divide by 2 */ } while (req_size > (PAGE_SIZE / 2)); } static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) { Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; int k, num, rem; srp->res_used = 1; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: size=%d\n", size)); rem = size; num = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg; k++) { if (rem <= num) { req_schp->k_use_sg = k + 1; req_schp->sglist_len = rsv_schp->sglist_len; req_schp->pages = rsv_schp->pages; req_schp->bufflen = size; req_schp->page_order = rsv_schp->page_order; break; } else rem -= num; } if (k >= rsv_schp->k_use_sg) SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: BAD size\n")); } static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) { Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_unlink_reserve: req->k_use_sg=%d\n", (int) req_schp->k_use_sg)); req_schp->k_use_sg = 0; req_schp->bufflen = 0; req_schp->pages = NULL; req_schp->page_order = 0; req_schp->sglist_len = 0; srp->res_used = 0; /* Called without mutex lock to avoid deadlock */ sfp->res_in_use = 0; } static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy) { Sg_request *resp; unsigned long iflags; *busy = false; write_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(resp, &sfp->rq_list, entry) { /* look for requests that are not SG_IO owned */ if ((!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { switch (resp->done) { case 0: /* request active */ *busy = true; break; case 1: /* request done; response ready to return */ resp->done = 2; /* guard against other readers */ write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; case 2: /* response already being returned */ break; } } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return NULL; } /* always adds to end of list */ static Sg_request * sg_add_request(Sg_fd * sfp) { int k; unsigned long iflags; Sg_request *rp = sfp->req_arr; write_lock_irqsave(&sfp->rq_list_lock, iflags); if (!list_empty(&sfp->rq_list)) { if (!sfp->cmd_q) goto out_unlock; for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { if (!rp->parentfp) break; } if (k >= SG_MAX_QUEUE) goto out_unlock; } memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; rp->header.duration = jiffies_to_msecs(jiffies); list_add_tail(&rp->entry, &sfp->rq_list); write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return rp; out_unlock: write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return NULL; } /* Return of 1 for found; 0 for not found */ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { unsigned long iflags; int res = 0; if (!sfp || !srp || list_empty(&sfp->rq_list)) return res; write_lock_irqsave(&sfp->rq_list_lock, iflags); if (!list_empty(&srp->entry)) { list_del(&srp->entry); srp->parentfp = NULL; res = 1; } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); /* * If the device is detaching, wakeup any readers in case we just * removed the last response, which would leave nothing for them to * return other than -ENODEV. */ if (unlikely(atomic_read(&sfp->parentdp->detaching))) wake_up_interruptible_all(&sfp->read_wait); return res; } static Sg_fd * sg_add_sfp(Sg_device * sdp) { Sg_fd *sfp; unsigned long iflags; int bufflen; sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) return ERR_PTR(-ENOMEM); init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); INIT_LIST_HEAD(&sfp->rq_list); kref_init(&sfp->f_ref); mutex_init(&sfp->f_mutex); sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); kfree(sfp); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); write_unlock_irqrestore(&sdp->sfd_lock, iflags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; bufflen = min_t(int, sg_big_buff, max_sectors_bytes(sdp->device->request_queue)); sg_build_reserve(sfp, bufflen); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: bufflen=%d, k_use_sg=%d\n", sfp->reserve.bufflen, sfp->reserve.k_use_sg)); kref_get(&sdp->d_ref); __module_get(THIS_MODULE); return sfp; } static void sg_remove_sfp_usercontext(struct work_struct *work) { struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; struct scsi_device *device = sdp->device; Sg_request *srp; unsigned long iflags; /* Cleanup any responses which were never read(). */ write_lock_irqsave(&sfp->rq_list_lock, iflags); while (!list_empty(&sfp->rq_list)) { srp = list_first_entry(&sfp->rq_list, Sg_request, entry); sg_finish_rem_req(srp); list_del(&srp->entry); srp->parentfp = NULL; } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); sg_remove_scat(sfp, &sfp->reserve); } SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: sfp=0x%p\n", sfp)); kfree(sfp); kref_put(&sdp->d_ref, sg_device_destroy); scsi_device_put(device); module_put(THIS_MODULE); } static void sg_remove_sfp(struct kref *kref) { struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); struct sg_device *sdp = sfp->parentdp; unsigned long iflags; write_lock_irqsave(&sdp->sfd_lock, iflags); list_del(&sfp->sfd_siblings); write_unlock_irqrestore(&sdp->sfd_lock, iflags); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); } #ifdef CONFIG_SCSI_PROC_FS static int sg_idr_max_id(int id, void *p, void *data) { int *k = data; if (*k < id) *k = id; return 0; } static int sg_last_dev(void) { int k = -1; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); idr_for_each(&sg_index_idr, sg_idr_max_id, &k); read_unlock_irqrestore(&sg_index_lock, iflags); return k + 1; /* origin 1 */ } #endif /* must be called with sg_index_lock held */ static Sg_device *sg_lookup_dev(int dev) { return idr_find(&sg_index_idr, dev); } static Sg_device * sg_get_dev(int dev) { struct sg_device *sdp; unsigned long flags; read_lock_irqsave(&sg_index_lock, flags); sdp = sg_lookup_dev(dev); if (!sdp) sdp = ERR_PTR(-ENXIO); else if (atomic_read(&sdp->detaching)) { /* If sdp->detaching, then the refcount may already be 0, in * which case it would be a bug to do kref_get(). */ sdp = ERR_PTR(-ENODEV); } else kref_get(&sdp->d_ref); read_unlock_irqrestore(&sg_index_lock, flags); return sdp; } #ifdef CONFIG_SCSI_PROC_FS static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct proc_ops adio_proc_ops = { .proc_open = sg_proc_single_open_adio, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_write = sg_proc_write_adio, .proc_release = single_release, }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct proc_ops dressz_proc_ops = { .proc_open = sg_proc_single_open_dressz, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_write = sg_proc_write_dressz, .proc_release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); static int sg_proc_seq_show_dev(struct seq_file *s, void *v); static void * dev_seq_start(struct seq_file *s, loff_t *pos); static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); static void dev_seq_stop(struct seq_file *s, void *v); static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_dev, }; static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); static const struct seq_operations devstrs_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_devstrs, }; static int sg_proc_seq_show_debug(struct seq_file *s, void *v); static const struct seq_operations debug_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_debug, }; static int sg_proc_init(void) { struct proc_dir_entry *p; p = proc_mkdir("scsi/sg", NULL); if (!p) return 1; proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_proc_ops); proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops); proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_proc_ops); proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr); proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops); proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops); proc_create_single("version", S_IRUGO, p, sg_proc_seq_show_version); return 0; } static int sg_proc_seq_show_int(struct seq_file *s, void *v) { seq_printf(s, "%d\n", *((int *)s->private)); return 0; } static int sg_proc_single_open_adio(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long num; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &num); if (err) return err; sg_allow_dio = num ? 1 : 0; return count; } static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long k = ULONG_MAX; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &k); if (err) return err; if (k <= 1048576) { /* limit "big buff" to 1 MB */ sg_big_buff = k; return count; } return -ERANGE; } static int sg_proc_seq_show_version(struct seq_file *s, void *v) { seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, sg_version_date); return 0; } static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) { seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n"); return 0; } struct sg_proc_deviter { loff_t index; size_t max; }; static void * dev_seq_start(struct seq_file *s, loff_t *pos) { struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); s->private = it; if (! it) return NULL; it->index = *pos; it->max = sg_last_dev(); if (it->index >= it->max) return NULL; return it; } static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct sg_proc_deviter * it = s->private; *pos = ++it->index; return (it->index < it->max) ? it : NULL; } static void dev_seq_stop(struct seq_file *s, void *v) { kfree(s->private); } static int sg_proc_seq_show_dev(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if ((NULL == sdp) || (NULL == sdp->device) || (atomic_read(&sdp->detaching))) seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); else { scsidp = sdp->device; seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, (int) scsidp->type, 1, (int) scsidp->queue_depth, (int) scsi_device_busy(scsidp), (int) scsi_device_online(scsidp)); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; scsidp = sdp ? sdp->device : NULL; if (sdp && scsidp && (!atomic_read(&sdp->detaching))) seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", scsidp->vendor, scsidp->model, scsidp->rev); else seq_puts(s, "<no active device>\n"); read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } /* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, new_interface, blen, usg; Sg_request *srp; Sg_fd *fp; const sg_io_hdr_t *hp; const char * cp; unsigned int ms; k = 0; list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { k++; read_lock(&fp->rq_list_lock); /* irqs already disabled */ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " "(res)sgat=%d low_dma=%d\n", k, jiffies_to_msecs(fp->timeout), fp->reserve.bufflen, (int) fp->reserve.k_use_sg, 0); seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", (int) fp->cmd_q, (int) fp->force_packid, (int) fp->keep_orphan); list_for_each_entry(srp, &fp->rq_list, entry) { hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else cp = " rb>> "; } else { if (SG_INFO_DIRECT_IO_MASK & hp->info) cp = " dio>> "; else cp = " "; } seq_puts(s, cp); blen = srp->data.bufflen; usg = srp->data.k_use_sg; seq_puts(s, srp->done ? ((1 == srp->done) ? "rcv:" : "fin:") : "act:"); seq_printf(s, " id=%d blen=%d", srp->header.pack_id, blen); if (srp->done) seq_printf(s, " dur=%d", hp->duration); else { ms = jiffies_to_msecs(jiffies); seq_printf(s, " t_o/elap=%d/%d", (new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout)), (ms > hp->duration ? ms - hp->duration : 0)); } seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, (int) srp->data.cmd_opcode); } if (list_empty(&fp->rq_list)) seq_puts(s, " No requests active\n"); read_unlock(&fp->rq_list_lock); } } static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", (int)it->max, sg_big_buff); read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (NULL == sdp) goto skip; read_lock(&sdp->sfd_lock); if (!list_empty(&sdp->sfds)) { seq_printf(s, " >>> device=%s ", sdp->name); if (atomic_read(&sdp->detaching)) seq_puts(s, "detaching pending close "); else if (sdp->device) { struct scsi_device *scsidp = sdp->device; seq_printf(s, "%d:%d:%d:%llu em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); } seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); sg_proc_debug_helper(s, sdp); } read_unlock(&sdp->sfd_lock); skip: read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } #endif /* CONFIG_SCSI_PROC_FS */ module_init(init_sg); module_exit(exit_sg); |
| 2 2 1 2 2 1 1 1 1 1 1 1 1 1 3 3 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. */ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/gfs2_ondisk.h> #include <linux/crc32.h> #include "gfs2.h" #include "incore.h" #include "dir.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "super.h" #include "rgrp.h" #include "util.h" #define GFS2_SMALL_FH_SIZE 4 #define GFS2_LARGE_FH_SIZE 8 #define GFS2_OLD_FH_SIZE 10 static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len, struct inode *parent) { __be32 *fh = (__force __be32 *)p; struct super_block *sb = inode->i_sb; struct gfs2_inode *ip = GFS2_I(inode); if (parent && (*len < GFS2_LARGE_FH_SIZE)) { *len = GFS2_LARGE_FH_SIZE; return FILEID_INVALID; } else if (*len < GFS2_SMALL_FH_SIZE) { *len = GFS2_SMALL_FH_SIZE; return FILEID_INVALID; } fh[0] = cpu_to_be32(ip->i_no_formal_ino >> 32); fh[1] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); fh[2] = cpu_to_be32(ip->i_no_addr >> 32); fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); *len = GFS2_SMALL_FH_SIZE; if (!parent || inode == d_inode(sb->s_root)) return *len; ip = GFS2_I(parent); fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32); fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); fh[6] = cpu_to_be32(ip->i_no_addr >> 32); fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); *len = GFS2_LARGE_FH_SIZE; return *len; } struct get_name_filldir { struct dir_context ctx; struct gfs2_inum_host inum; char *name; }; static bool get_name_filldir(struct dir_context *ctx, const char *name, int length, loff_t offset, u64 inum, unsigned int type) { struct get_name_filldir *gnfd = container_of(ctx, struct get_name_filldir, ctx); if (inum != gnfd->inum.no_addr) return true; memcpy(gnfd->name, name, length); gnfd->name[length] = 0; return false; } static int gfs2_get_name(struct dentry *parent, char *name, struct dentry *child) { struct inode *dir = d_inode(parent); struct inode *inode = d_inode(child); struct gfs2_inode *dip, *ip; struct get_name_filldir gnfd = { .ctx.actor = get_name_filldir, .name = name }; struct gfs2_holder gh; int error; struct file_ra_state f_ra = { .start = 0 }; if (!dir) return -EINVAL; if (!S_ISDIR(dir->i_mode) || !inode) return -EINVAL; dip = GFS2_I(dir); ip = GFS2_I(inode); *name = 0; gnfd.inum.no_addr = ip->i_no_addr; gnfd.inum.no_formal_ino = ip->i_no_formal_ino; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); if (error) return error; error = gfs2_dir_read(dir, &gnfd.ctx, &f_ra); gfs2_glock_dq_uninit(&gh); if (!error && !*name) error = -ENOENT; return error; } static struct dentry *gfs2_get_parent(struct dentry *child) { return d_obtain_alias(gfs2_lookupi(d_inode(child), &gfs2_qdotdot, 1)); } static struct dentry *gfs2_get_dentry(struct super_block *sb, struct gfs2_inum_host *inum) { struct gfs2_sbd *sdp = sb->s_fs_info; struct inode *inode; if (!inum->no_formal_ino) return ERR_PTR(-ESTALE); inode = gfs2_lookup_by_inum(sdp, inum->no_addr, inum->no_formal_ino, GFS2_BLKST_DINODE); return d_obtain_alias(inode); } static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct gfs2_inum_host this; __be32 *fh = (__force __be32 *)fid->raw; switch (fh_type) { case GFS2_SMALL_FH_SIZE: case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: if (fh_len < GFS2_SMALL_FH_SIZE) return NULL; this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32; this.no_formal_ino |= be32_to_cpu(fh[1]); this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32; this.no_addr |= be32_to_cpu(fh[3]); return gfs2_get_dentry(sb, &this); default: return NULL; } } static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct gfs2_inum_host parent; __be32 *fh = (__force __be32 *)fid->raw; switch (fh_type) { case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: if (fh_len < GFS2_LARGE_FH_SIZE) return NULL; parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32; parent.no_formal_ino |= be32_to_cpu(fh[5]); parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32; parent.no_addr |= be32_to_cpu(fh[7]); return gfs2_get_dentry(sb, &parent); default: return NULL; } } const struct export_operations gfs2_export_ops = { .encode_fh = gfs2_encode_fh, .fh_to_dentry = gfs2_fh_to_dentry, .fh_to_parent = gfs2_fh_to_parent, .get_name = gfs2_get_name, .get_parent = gfs2_get_parent, }; |
| 4634 4663 4399 4414 4384 3 1 2 2 2 2 3 2 2 2 1 2 1 1 1 1 2 2 2 8 3 2 2 2 4408 4415 4412 4402 4405 4410 23 1 1 4395 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/filesystems.c * * Copyright (C) 1991, 1992 Linus Torvalds * * table of configured filesystems */ #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/fs_parser.h> /* * Handling of filesystem drivers list. * Rules: * Inclusion to/removals from/scanning of list are protected by spinlock. * During the unload module must call unregister_filesystem(). * We can access the fields of list element if: * 1) spinlock is held or * 2) we hold the reference to the module. * The latter can be guaranteed by call of try_module_get(); if it * returned 0 we must skip the element, otherwise we got the reference. * Once the reference is obtained we can drop the spinlock. */ static struct file_system_type *file_systems; static DEFINE_RWLOCK(file_systems_lock); /* WARNING: This can be used only if we _already_ own a reference */ struct file_system_type *get_filesystem(struct file_system_type *fs) { __module_get(fs->owner); return fs; } void put_filesystem(struct file_system_type *fs) { module_put(fs->owner); } static struct file_system_type **find_filesystem(const char *name, unsigned len) { struct file_system_type **p; for (p = &file_systems; *p; p = &(*p)->next) if (strncmp((*p)->name, name, len) == 0 && !(*p)->name[len]) break; return p; } /** * register_filesystem - register a new filesystem * @fs: the file system structure * * Adds the file system passed to the list of file systems the kernel * is aware of for mount and other syscalls. Returns 0 on success, * or a negative errno code on an error. * * The &struct file_system_type that is passed is linked into the kernel * structures and must not be freed until the file system has been * unregistered. */ int register_filesystem(struct file_system_type * fs) { int res = 0; struct file_system_type ** p; if (fs->parameters && !fs_validate_description(fs->name, fs->parameters)) return -EINVAL; BUG_ON(strchr(fs->name, '.')); if (fs->next) return -EBUSY; write_lock(&file_systems_lock); p = find_filesystem(fs->name, strlen(fs->name)); if (*p) res = -EBUSY; else *p = fs; write_unlock(&file_systems_lock); return res; } EXPORT_SYMBOL(register_filesystem); /** * unregister_filesystem - unregister a file system * @fs: filesystem to unregister * * Remove a file system that was previously successfully registered * with the kernel. An error is returned if the file system is not found. * Zero is returned on a success. * * Once this function has returned the &struct file_system_type structure * may be freed or reused. */ int unregister_filesystem(struct file_system_type * fs) { struct file_system_type ** tmp; write_lock(&file_systems_lock); tmp = &file_systems; while (*tmp) { if (fs == *tmp) { *tmp = fs->next; fs->next = NULL; write_unlock(&file_systems_lock); synchronize_rcu(); return 0; } tmp = &(*tmp)->next; } write_unlock(&file_systems_lock); return -EINVAL; } EXPORT_SYMBOL(unregister_filesystem); #ifdef CONFIG_SYSFS_SYSCALL static int fs_index(const char __user * __name) { struct file_system_type * tmp; struct filename *name; int err, index; name = getname(__name); err = PTR_ERR(name); if (IS_ERR(name)) return err; err = -EINVAL; read_lock(&file_systems_lock); for (tmp=file_systems, index=0 ; tmp ; tmp=tmp->next, index++) { if (strcmp(tmp->name, name->name) == 0) { err = index; break; } } read_unlock(&file_systems_lock); putname(name); return err; } static int fs_name(unsigned int index, char __user * buf) { struct file_system_type * tmp; int len, res = -EINVAL; read_lock(&file_systems_lock); for (tmp = file_systems; tmp; tmp = tmp->next, index--) { if (index == 0) { if (try_module_get(tmp->owner)) res = 0; break; } } read_unlock(&file_systems_lock); if (res) return res; /* OK, we got the reference, so we can safely block */ len = strlen(tmp->name) + 1; res = copy_to_user(buf, tmp->name, len) ? -EFAULT : 0; put_filesystem(tmp); return res; } static int fs_maxindex(void) { struct file_system_type * tmp; int index; read_lock(&file_systems_lock); for (tmp = file_systems, index = 0 ; tmp ; tmp = tmp->next, index++) ; read_unlock(&file_systems_lock); return index; } /* * Whee.. Weird sysv syscall. */ SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2) { int retval = -EINVAL; switch (option) { case 1: retval = fs_index((const char __user *) arg1); break; case 2: retval = fs_name(arg1, (char __user *) arg2); break; case 3: retval = fs_maxindex(); break; } return retval; } #endif int __init list_bdev_fs_names(char *buf, size_t size) { struct file_system_type *p; size_t len; int count = 0; read_lock(&file_systems_lock); for (p = file_systems; p; p = p->next) { if (!(p->fs_flags & FS_REQUIRES_DEV)) continue; len = strlen(p->name) + 1; if (len > size) { pr_warn("%s: truncating file system list\n", __func__); break; } memcpy(buf, p->name, len); buf += len; size -= len; count++; } read_unlock(&file_systems_lock); return count; } #ifdef CONFIG_PROC_FS static int filesystems_proc_show(struct seq_file *m, void *v) { struct file_system_type * tmp; read_lock(&file_systems_lock); tmp = file_systems; while (tmp) { seq_printf(m, "%s\t%s\n", (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev", tmp->name); tmp = tmp->next; } read_unlock(&file_systems_lock); return 0; } static int __init proc_filesystems_init(void) { proc_create_single("filesystems", 0, NULL, filesystems_proc_show); return 0; } module_init(proc_filesystems_init); #endif static struct file_system_type *__get_fs_type(const char *name, int len) { struct file_system_type *fs; read_lock(&file_systems_lock); fs = *(find_filesystem(name, len)); if (fs && !try_module_get(fs->owner)) fs = NULL; read_unlock(&file_systems_lock); return fs; } struct file_system_type *get_fs_type(const char *name) { struct file_system_type *fs; const char *dot = strchr(name, '.'); int len = dot ? dot - name : strlen(name); fs = __get_fs_type(name, len); if (!fs && (request_module("fs-%.*s", len, name) == 0)) { fs = __get_fs_type(name, len); if (!fs) pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n", len, name); } if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { put_filesystem(fs); fs = NULL; } return fs; } EXPORT_SYMBOL(get_fs_type); |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_ELF_H #define _ASM_X86_ELF_H /* * ELF register definitions.. */ #include <linux/thread_info.h> #include <asm/ia32.h> #include <asm/ptrace.h> #include <asm/user.h> #include <asm/auxvec.h> #include <asm/fsgsbase.h> typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; #ifdef __i386__ #define R_386_NONE 0 #define R_386_32 1 #define R_386_PC32 2 #define R_386_GOT32 3 #define R_386_PLT32 4 #define R_386_COPY 5 #define R_386_GLOB_DAT 6 #define R_386_JMP_SLOT 7 #define R_386_RELATIVE 8 #define R_386_GOTOFF 9 #define R_386_GOTPC 10 #define R_386_NUM 11 /* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS32 #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_386 #else /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ #define R_X86_64_64 1 /* Direct 64 bit */ #define R_X86_64_PC32 2 /* PC relative 32 bit signed */ #define R_X86_64_GOT32 3 /* 32 bit GOT entry */ #define R_X86_64_PLT32 4 /* 32 bit PLT address */ #define R_X86_64_COPY 5 /* Copy symbol at runtime */ #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ #define R_X86_64_RELATIVE 8 /* Adjust by program base */ #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative offset to GOT */ #define R_X86_64_GOTPCRELX 41 #define R_X86_64_REX_GOTPCRELX 42 #define R_X86_64_32 10 /* Direct 32 bit zero extended */ #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ #define R_X86_64_16 12 /* Direct 16 bit zero extended */ #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ #define R_X86_64_8 14 /* Direct 8 bit sign extended */ #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ #define R_X86_64_PC64 24 /* Place relative 64-bit signed */ /* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS64 #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_X86_64 #endif #include <asm/vdso.h> extern unsigned int vdso64_enabled; extern unsigned int vdso32_enabled; /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch_ia32(x) \ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) #include <asm/processor.h> #ifdef CONFIG_X86_32 #include <asm/desc.h> #define elf_check_arch(x) elf_check_arch_ia32(x) /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have been loaded before the code runs. A value of 0 tells we have no such handler. We might as well make sure everything else is cleared too (except for %esp), just to make things more deterministic. */ #define ELF_PLAT_INIT(_r, load_addr) \ do { \ _r->bx = 0; _r->cx = 0; _r->dx = 0; \ _r->si = 0; _r->di = 0; _r->bp = 0; \ _r->ax = 0; \ } while (0) /* * regs is struct pt_regs, pr_reg is elf_gregset_t (which is * now struct_user_regs, they are different) */ #define ELF_CORE_COPY_REGS(pr_reg, regs) \ do { \ pr_reg[0] = regs->bx; \ pr_reg[1] = regs->cx; \ pr_reg[2] = regs->dx; \ pr_reg[3] = regs->si; \ pr_reg[4] = regs->di; \ pr_reg[5] = regs->bp; \ pr_reg[6] = regs->ax; \ pr_reg[7] = regs->ds; \ pr_reg[8] = regs->es; \ pr_reg[9] = regs->fs; \ savesegment(gs, pr_reg[10]); \ pr_reg[11] = regs->orig_ax; \ pr_reg[12] = regs->ip; \ pr_reg[13] = regs->cs; \ pr_reg[14] = regs->flags; \ pr_reg[15] = regs->sp; \ pr_reg[16] = regs->ss; \ } while (0); #define ELF_PLATFORM (utsname()->machine) #define set_personality_64bit() do { } while (0) #else /* CONFIG_X86_32 */ /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(x) \ ((x)->e_machine == EM_X86_64) #define compat_elf_check_arch(x) \ ((elf_check_arch_ia32(x) && ia32_enabled_verbose()) || \ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64)) static inline void elf_common_init(struct thread_struct *t, struct pt_regs *regs, const u16 ds) { /* ax gets execve's return value. */ /*regs->ax = */ regs->bx = regs->cx = regs->dx = 0; regs->si = regs->di = regs->bp = 0; regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0; regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; t->fsbase = t->gsbase = 0; t->fsindex = t->gsindex = 0; t->ds = t->es = ds; } #define ELF_PLAT_INIT(_r, load_addr) \ elf_common_init(¤t->thread, _r, 0) #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ elf_common_init(¤t->thread, regs, __USER_DS) void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32); #define COMPAT_START_THREAD(ex, regs, new_ip, new_sp) \ compat_start_thread(regs, new_ip, new_sp, ex->e_machine == EM_X86_64) void set_personality_ia32(bool); #define COMPAT_SET_PERSONALITY(ex) \ set_personality_ia32((ex).e_machine == EM_X86_64) #define COMPAT_ELF_PLATFORM ("i686") /* * regs is struct pt_regs, pr_reg is elf_gregset_t (which is * now struct_user_regs, they are different). Assumes current is the process * getting dumped. */ #define ELF_CORE_COPY_REGS(pr_reg, regs) \ do { \ unsigned v; \ (pr_reg)[0] = (regs)->r15; \ (pr_reg)[1] = (regs)->r14; \ (pr_reg)[2] = (regs)->r13; \ (pr_reg)[3] = (regs)->r12; \ (pr_reg)[4] = (regs)->bp; \ (pr_reg)[5] = (regs)->bx; \ (pr_reg)[6] = (regs)->r11; \ (pr_reg)[7] = (regs)->r10; \ (pr_reg)[8] = (regs)->r9; \ (pr_reg)[9] = (regs)->r8; \ (pr_reg)[10] = (regs)->ax; \ (pr_reg)[11] = (regs)->cx; \ (pr_reg)[12] = (regs)->dx; \ (pr_reg)[13] = (regs)->si; \ (pr_reg)[14] = (regs)->di; \ (pr_reg)[15] = (regs)->orig_ax; \ (pr_reg)[16] = (regs)->ip; \ (pr_reg)[17] = (regs)->cs; \ (pr_reg)[18] = (regs)->flags; \ (pr_reg)[19] = (regs)->sp; \ (pr_reg)[20] = (regs)->ss; \ (pr_reg)[21] = x86_fsbase_read_cpu(); \ (pr_reg)[22] = x86_gsbase_read_cpu_inactive(); \ asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \ asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ } while (0); /* I'm not sure if we can use '-' here */ #define ELF_PLATFORM ("x86_64") extern void set_personality_64bit(void); extern int force_personality32; #endif /* !CONFIG_X86_32 */ #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 /* * This is the base location for PIE (ET_DYN with INTERP) loads. On * 64-bit, this is above 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ (DEFAULT_MAP_WINDOW / 3 * 2)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX]) extern u32 elf_hwcap2; /* * HWCAP2 supplies mask with kernel enabled CPU features, so that * the application can discover that it can safely use them. * The bits are defined in uapi/asm/hwcap2.h. */ #define ELF_HWCAP2 (elf_hwcap2) /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in intent than poking at uname or /proc/cpuinfo. For the moment, we have only optimizations for the Intel generations, but that could change... */ #define SET_PERSONALITY(ex) set_personality_64bit() /* * An executable for which elf_read_implies_exec() returns TRUE will * have the READ_IMPLIES_EXEC personality flag set automatically. * * The decision process for determining the results are: * * CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 | * ELF: | | | | * ---------------------|------------|------------------|----------------| * missing PT_GNU_STACK | exec-all | exec-all | exec-none | * PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack | * PT_GNU_STACK == RW | exec-none | exec-none | exec-none | * * exec-all : all PROT_READ user mappings are executable, except when * backed by files on a noexec-filesystem. * exec-none : only PROT_EXEC user mappings are executable. * exec-stack: only the stack and PROT_EXEC user mappings are executable. * * *this column has no architectural effect: NX markings are ignored by * hardware, but may have behavioral effects when "wants X" collides with * "cannot be X" constraints in memory permission flags, as in * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com * */ #define elf_read_implies_exec(ex, executable_stack) \ (mmap_is_ia32() && executable_stack == EXSTACK_DEFAULT) struct task_struct; #define ARCH_DLINFO_IA32 \ do { \ if (VDSO_CURRENT_BASE) { \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ } \ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \ } while (0) /* * True on X86_32 or when emulating IA32 on X86_64 */ static inline int mmap_is_ia32(void) { return IS_ENABLED(CONFIG_X86_32) || (IS_ENABLED(CONFIG_COMPAT) && test_thread_flag(TIF_ADDR32)); } extern unsigned long task_size_32bit(void); extern unsigned long task_size_64bit(int full_addr_space); extern unsigned long get_mmap_base(int is_legacy); extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); extern unsigned long get_sigframe_size(void); #ifdef CONFIG_X86_32 #define __STACK_RND_MASK(is32bit) (0x7ff) #define STACK_RND_MASK (0x7ff) #define ARCH_DLINFO ARCH_DLINFO_IA32 /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #else /* CONFIG_X86_32 */ /* 1GB for 64bit, 8MB for 32bit */ #define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff) #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32()) #define ARCH_DLINFO \ do { \ if (vdso64_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ (unsigned long __force)current->mm->context.vdso); \ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \ } while (0) /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */ #define ARCH_DLINFO_X32 \ do { \ if (vdso64_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ (unsigned long __force)current->mm->context.vdso); \ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \ } while (0) #define AT_SYSINFO 32 #define COMPAT_ARCH_DLINFO \ if (exec->e_machine == EM_X86_64) \ ARCH_DLINFO_X32; \ else if (IS_ENABLED(CONFIG_IA32_EMULATION)) \ ARCH_DLINFO_IA32 #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) #endif /* !CONFIG_X86_32 */ #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) #define VDSO_ENTRY \ ((unsigned long)current->mm->context.vdso + \ vdso_image_32.sym___kernel_vsyscall) struct linux_binprm; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp, bool x32); #define COMPAT_ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \ compat_arch_setup_additional_pages(bprm, interpreter, \ (ex->e_machine == EM_X86_64)) extern bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs); /* Do not change the values. See get_align_mask() */ enum align_flags { ALIGN_VA_32 = BIT(0), ALIGN_VA_64 = BIT(1), }; struct va_alignment { int flags; unsigned long mask; unsigned long bits; } ____cacheline_aligned; extern struct va_alignment va_align; #endif /* _ASM_X86_ELF_H */ |
| 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 | // SPDX-License-Identifier: GPL-2.0+ /* * copyright (C) 1999/2000 by Henning Zabel <henning@uni-paderborn.de> */ /* * USB-Kernel Driver for the Mustek MDC800 Digital Camera * (c) 1999/2000 Henning Zabel <henning@uni-paderborn.de> * * * The driver brings the USB functions of the MDC800 to Linux. * To use the Camera you must support the USB Protocol of the camera * to the Kernel Node. * The Driver uses a misc device Node. Create it with : * mknod /dev/mustek c 180 32 * * The driver supports only one camera. * * Fix: mdc800 used sleep_on and slept with io_lock held. * Converted sleep_on to waitqueues with schedule_timeout and made io_lock * a semaphore from a spinlock. * by Oliver Neukum <oliver@neukum.name> * (02/12/2001) * * Identify version on module load. * (08/04/2001) gb * * version 0.7.5 * Fixed potential SMP races with Spinlocks. * Thanks to Oliver Neukum <oliver@neukum.name> who * noticed the race conditions. * (30/10/2000) * * Fixed: Setting urb->dev before submitting urb. * by Greg KH <greg@kroah.com> * (13/10/2000) * * version 0.7.3 * bugfix : The mdc800->state field gets set to READY after the * disconnect function sets it to NOT_CONNECTED. This makes the * driver running like the camera is connected and causes some * hang ups. * * version 0.7.1 * MOD_INC and MOD_DEC are changed in usb_probe to prevent load/unload * problems when compiled as Module. * (04/04/2000) * * The mdc800 driver gets assigned the USB Minor 32-47. The Registration * was updated to use these values. * (26/03/2000) * * The Init und Exit Module Function are updated. * (01/03/2000) * * version 0.7.0 * Rewrite of the driver : The driver now uses URB's. The old stuff * has been removed. * * version 0.6.0 * Rewrite of this driver: The Emulation of the rs232 protocoll * has been removed from the driver. A special executeCommand function * for this driver is included to gphoto. * The driver supports two kind of communication to bulk endpoints. * Either with the dev->bus->ops->bulk... or with callback function. * (09/11/1999) * * version 0.5.0: * first Version that gets a version number. Most of the needed * functions work. * (20/10/1999) */ #include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/random.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/usb.h> #include <linux/fs.h> /* * Version Information */ #define DRIVER_VERSION "v0.7.5 (30/10/2000)" #define DRIVER_AUTHOR "Henning Zabel <henning@uni-paderborn.de>" #define DRIVER_DESC "USB Driver for Mustek MDC800 Digital Camera" /* Vendor and Product Information */ #define MDC800_VENDOR_ID 0x055f #define MDC800_PRODUCT_ID 0xa800 /* Timeouts (msec) */ #define TO_DOWNLOAD_GET_READY 1500 #define TO_DOWNLOAD_GET_BUSY 1500 #define TO_WRITE_GET_READY 1000 #define TO_DEFAULT_COMMAND 5000 #define TO_READ_FROM_IRQ TO_DEFAULT_COMMAND #define TO_GET_READY TO_DEFAULT_COMMAND /* Minor Number of the device (create with mknod /dev/mustek c 180 32) */ #define MDC800_DEVICE_MINOR_BASE 32 /************************************************************************** Data and structs ***************************************************************************/ typedef enum { NOT_CONNECTED, READY, WORKING, DOWNLOAD } mdc800_state; /* Data for the driver */ struct mdc800_data { struct usb_device * dev; // Device Data mdc800_state state; unsigned int endpoint [4]; struct urb * irq_urb; wait_queue_head_t irq_wait; int irq_woken; char* irq_urb_buffer; int camera_busy; // is camera busy ? int camera_request_ready; // Status to synchronize with irq char camera_response [8]; // last Bytes send after busy struct urb * write_urb; char* write_urb_buffer; wait_queue_head_t write_wait; int written; struct urb * download_urb; char* download_urb_buffer; wait_queue_head_t download_wait; int downloaded; int download_left; // Bytes left to download ? /* Device Data */ char out [64]; // Answer Buffer int out_ptr; // Index to the first not readen byte int out_count; // Bytes in the buffer int open; // Camera device open ? struct mutex io_lock; // IO -lock char in [8]; // Command Input Buffer int in_count; int pic_index; // Cache for the Imagesize (-1 for nothing cached ) int pic_len; int minor; }; /* Specification of the Endpoints */ static struct usb_endpoint_descriptor mdc800_ed [4] = { { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x01, .bmAttributes = 0x02, .wMaxPacketSize = cpu_to_le16(8), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x82, .bmAttributes = 0x03, .wMaxPacketSize = cpu_to_le16(8), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x03, .bmAttributes = 0x02, .wMaxPacketSize = cpu_to_le16(64), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x84, .bmAttributes = 0x02, .wMaxPacketSize = cpu_to_le16(64), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, }; /* The Variable used by the driver */ static struct mdc800_data* mdc800; /*************************************************************************** The USB Part of the driver ****************************************************************************/ static int mdc800_endpoint_equals (struct usb_endpoint_descriptor *a,struct usb_endpoint_descriptor *b) { return ( ( a->bEndpointAddress == b->bEndpointAddress ) && ( a->bmAttributes == b->bmAttributes ) && ( a->wMaxPacketSize == b->wMaxPacketSize ) ); } /* * Checks whether the camera responds busy */ static int mdc800_isBusy (char* ch) { int i=0; while (i<8) { if (ch [i] != (char)0x99) return 0; i++; } return 1; } /* * Checks whether the Camera is ready */ static int mdc800_isReady (char *ch) { int i=0; while (i<8) { if (ch [i] != (char)0xbb) return 0; i++; } return 1; } /* * USB IRQ Handler for InputLine */ static void mdc800_usb_irq (struct urb *urb) { int data_received=0, wake_up; unsigned char* b=urb->transfer_buffer; struct mdc800_data* mdc800=urb->context; struct device *dev = &mdc800->dev->dev; int status = urb->status; if (status >= 0) { if (mdc800_isBusy (b)) { if (!mdc800->camera_busy) { mdc800->camera_busy=1; dev_dbg(dev, "gets busy\n"); } } else { if (mdc800->camera_busy && mdc800_isReady (b)) { mdc800->camera_busy=0; dev_dbg(dev, "gets ready\n"); } } if (!(mdc800_isBusy (b) || mdc800_isReady (b))) { /* Store Data in camera_answer field */ dev_dbg(dev, "%i %i %i %i %i %i %i %i \n",b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7]); memcpy (mdc800->camera_response,b,8); data_received=1; } } wake_up= ( mdc800->camera_request_ready > 0 ) && ( ((mdc800->camera_request_ready == 1) && (!mdc800->camera_busy)) || ((mdc800->camera_request_ready == 2) && data_received) || ((mdc800->camera_request_ready == 3) && (mdc800->camera_busy)) || (status < 0) ); if (wake_up) { mdc800->camera_request_ready=0; mdc800->irq_woken=1; wake_up (&mdc800->irq_wait); } } /* * Waits a while until the irq responds that camera is ready * * mode : 0: Wait for camera gets ready * 1: Wait for receiving data * 2: Wait for camera gets busy * * msec: Time to wait */ static int mdc800_usb_waitForIRQ (int mode, int msec) { mdc800->camera_request_ready=1+mode; wait_event_timeout(mdc800->irq_wait, mdc800->irq_woken, msecs_to_jiffies(msec)); mdc800->irq_woken = 0; if (mdc800->camera_request_ready>0) { mdc800->camera_request_ready=0; dev_err(&mdc800->dev->dev, "timeout waiting for camera.\n"); return -1; } if (mdc800->state == NOT_CONNECTED) { printk(KERN_WARNING "mdc800: Camera gets disconnected " "during waiting for irq.\n"); mdc800->camera_request_ready=0; return -2; } return 0; } /* * The write_urb callback function */ static void mdc800_usb_write_notify (struct urb *urb) { struct mdc800_data* mdc800=urb->context; int status = urb->status; if (status != 0) dev_err(&mdc800->dev->dev, "writing command fails (status=%i)\n", status); else mdc800->state=READY; mdc800->written = 1; wake_up (&mdc800->write_wait); } /* * The download_urb callback function */ static void mdc800_usb_download_notify (struct urb *urb) { struct mdc800_data* mdc800=urb->context; int status = urb->status; if (status == 0) { /* Fill output buffer with these data */ memcpy (mdc800->out, urb->transfer_buffer, 64); mdc800->out_count=64; mdc800->out_ptr=0; mdc800->download_left-=64; if (mdc800->download_left == 0) { mdc800->state=READY; } } else { dev_err(&mdc800->dev->dev, "request bytes fails (status:%i)\n", status); } mdc800->downloaded = 1; wake_up (&mdc800->download_wait); } /*************************************************************************** Probing for the Camera ***************************************************************************/ static struct usb_driver mdc800_usb_driver; static const struct file_operations mdc800_device_ops; static struct usb_class_driver mdc800_class = { .name = "mdc800%d", .fops = &mdc800_device_ops, .minor_base = MDC800_DEVICE_MINOR_BASE, }; /* * Callback to search the Mustek MDC800 on the USB Bus */ static int mdc800_usb_probe (struct usb_interface *intf, const struct usb_device_id *id) { int i,j; struct usb_host_interface *intf_desc; struct usb_device *dev = interface_to_usbdev (intf); int irq_interval=0; int retval; dev_dbg(&intf->dev, "(%s) called.\n", __func__); if (mdc800->dev != NULL) { dev_warn(&intf->dev, "only one Mustek MDC800 is supported.\n"); return -ENODEV; } if (dev->descriptor.bNumConfigurations != 1) { dev_err(&intf->dev, "probe fails -> wrong Number of Configuration\n"); return -ENODEV; } intf_desc = intf->cur_altsetting; if ( ( intf_desc->desc.bInterfaceClass != 0xff ) || ( intf_desc->desc.bInterfaceSubClass != 0 ) || ( intf_desc->desc.bInterfaceProtocol != 0 ) || ( intf_desc->desc.bNumEndpoints != 4) ) { dev_err(&intf->dev, "probe fails -> wrong Interface\n"); return -ENODEV; } /* Check the Endpoints */ for (i=0; i<4; i++) { mdc800->endpoint[i]=-1; for (j=0; j<4; j++) { if (mdc800_endpoint_equals (&intf_desc->endpoint [j].desc,&mdc800_ed [i])) { mdc800->endpoint[i]=intf_desc->endpoint [j].desc.bEndpointAddress ; if (i==1) { irq_interval=intf_desc->endpoint [j].desc.bInterval; } } } if (mdc800->endpoint[i] == -1) { dev_err(&intf->dev, "probe fails -> Wrong Endpoints.\n"); return -ENODEV; } } dev_info(&intf->dev, "Found Mustek MDC800 on USB.\n"); mutex_lock(&mdc800->io_lock); retval = usb_register_dev(intf, &mdc800_class); if (retval) { dev_err(&intf->dev, "Not able to get a minor for this device.\n"); mutex_unlock(&mdc800->io_lock); return -ENODEV; } mdc800->dev=dev; mdc800->open=0; /* Setup URB Structs */ usb_fill_int_urb ( mdc800->irq_urb, mdc800->dev, usb_rcvintpipe (mdc800->dev,mdc800->endpoint [1]), mdc800->irq_urb_buffer, 8, mdc800_usb_irq, mdc800, irq_interval ); usb_fill_bulk_urb ( mdc800->write_urb, mdc800->dev, usb_sndbulkpipe (mdc800->dev, mdc800->endpoint[0]), mdc800->write_urb_buffer, 8, mdc800_usb_write_notify, mdc800 ); usb_fill_bulk_urb ( mdc800->download_urb, mdc800->dev, usb_rcvbulkpipe (mdc800->dev, mdc800->endpoint [3]), mdc800->download_urb_buffer, 64, mdc800_usb_download_notify, mdc800 ); mdc800->state=READY; mutex_unlock(&mdc800->io_lock); usb_set_intfdata(intf, mdc800); return 0; } /* * Disconnect USB device (maybe the MDC800) */ static void mdc800_usb_disconnect (struct usb_interface *intf) { struct mdc800_data* mdc800 = usb_get_intfdata(intf); dev_dbg(&intf->dev, "(%s) called\n", __func__); if (mdc800) { if (mdc800->state == NOT_CONNECTED) return; usb_deregister_dev(intf, &mdc800_class); /* must be under lock to make sure no URB is submitted after usb_kill_urb() */ mutex_lock(&mdc800->io_lock); mdc800->state=NOT_CONNECTED; usb_kill_urb(mdc800->irq_urb); usb_kill_urb(mdc800->write_urb); usb_kill_urb(mdc800->download_urb); mutex_unlock(&mdc800->io_lock); mdc800->dev = NULL; usb_set_intfdata(intf, NULL); } dev_info(&intf->dev, "Mustek MDC800 disconnected from USB.\n"); } /*************************************************************************** The Misc device Part (file_operations) ****************************************************************************/ /* * This Function calc the Answersize for a command. */ static int mdc800_getAnswerSize (char command) { switch ((unsigned char) command) { case 0x2a: case 0x49: case 0x51: case 0x0d: case 0x20: case 0x07: case 0x01: case 0x25: case 0x00: return 8; case 0x05: case 0x3e: return mdc800->pic_len; case 0x09: return 4096; default: return 0; } } /* * Init the device: (1) alloc mem (2) Increase MOD Count .. */ static int mdc800_device_open (struct inode* inode, struct file *file) { int retval=0; int errn=0; mutex_lock(&mdc800->io_lock); if (mdc800->state == NOT_CONNECTED) { errn=-EBUSY; goto error_out; } if (mdc800->open) { errn=-EBUSY; goto error_out; } mdc800->in_count=0; mdc800->out_count=0; mdc800->out_ptr=0; mdc800->pic_index=0; mdc800->pic_len=-1; mdc800->download_left=0; mdc800->camera_busy=0; mdc800->camera_request_ready=0; mdc800->irq_urb->dev = mdc800->dev; retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL); if (retval) { dev_err(&mdc800->dev->dev, "request USB irq fails (submit_retval=%i).\n", retval); errn = -EIO; goto error_out; } mdc800->open=1; dev_dbg(&mdc800->dev->dev, "Mustek MDC800 device opened.\n"); error_out: mutex_unlock(&mdc800->io_lock); return errn; } /* * Close the Camera and release Memory */ static int mdc800_device_release (struct inode* inode, struct file *file) { int retval=0; mutex_lock(&mdc800->io_lock); if (mdc800->open && (mdc800->state != NOT_CONNECTED)) { usb_kill_urb(mdc800->irq_urb); usb_kill_urb(mdc800->write_urb); usb_kill_urb(mdc800->download_urb); mdc800->open=0; } else { retval=-EIO; } mutex_unlock(&mdc800->io_lock); return retval; } /* * The Device read callback Function */ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t len, loff_t *pos) { size_t left=len, sts=len; /* single transfer size */ char __user *ptr = buf; int retval; mutex_lock(&mdc800->io_lock); if (mdc800->state == NOT_CONNECTED) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } if (mdc800->state == WORKING) { printk(KERN_WARNING "mdc800: Illegal State \"working\"" "reached during read ?!\n"); mutex_unlock(&mdc800->io_lock); return -EBUSY; } if (!mdc800->open) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } while (left) { if (signal_pending (current)) { mutex_unlock(&mdc800->io_lock); return -EINTR; } sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left; if (sts <= 0) { /* Too less Data in buffer */ if (mdc800->state == DOWNLOAD) { mdc800->out_count=0; mdc800->out_ptr=0; /* Download -> Request new bytes */ mdc800->download_urb->dev = mdc800->dev; retval = usb_submit_urb (mdc800->download_urb, GFP_KERNEL); if (retval) { dev_err(&mdc800->dev->dev, "Can't submit download urb " "(retval=%i)\n", retval); mutex_unlock(&mdc800->io_lock); return len-left; } wait_event_timeout(mdc800->download_wait, mdc800->downloaded, msecs_to_jiffies(TO_DOWNLOAD_GET_READY)); mdc800->downloaded = 0; if (mdc800->download_urb->status != 0) { dev_err(&mdc800->dev->dev, "request download-bytes fails " "(status=%i)\n", mdc800->download_urb->status); mutex_unlock(&mdc800->io_lock); return len-left; } } else { /* No more bytes -> that's an error*/ mutex_unlock(&mdc800->io_lock); return -EIO; } } else { /* Copy Bytes */ if (copy_to_user(ptr, &mdc800->out [mdc800->out_ptr], sts)) { mutex_unlock(&mdc800->io_lock); return -EFAULT; } ptr+=sts; left-=sts; mdc800->out_ptr+=sts; } } mutex_unlock(&mdc800->io_lock); return len-left; } /* * The Device write callback Function * If a 8Byte Command is received, it will be send to the camera. * After this the driver initiates the request for the answer or * just waits until the camera becomes ready. */ static ssize_t mdc800_device_write (struct file *file, const char __user *buf, size_t len, loff_t *pos) { size_t i=0; int retval; mutex_lock(&mdc800->io_lock); if (mdc800->state != READY) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } if (!mdc800->open ) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } while (i<len) { unsigned char c; if (signal_pending (current)) { mutex_unlock(&mdc800->io_lock); return -EINTR; } if(get_user(c, buf+i)) { mutex_unlock(&mdc800->io_lock); return -EFAULT; } /* check for command start */ if (c == 0x55) { mdc800->in_count=0; mdc800->out_count=0; mdc800->out_ptr=0; mdc800->download_left=0; } /* save command byte */ if (mdc800->in_count < 8) { mdc800->in[mdc800->in_count] = c; mdc800->in_count++; } else { mutex_unlock(&mdc800->io_lock); return -EIO; } /* Command Buffer full ? -> send it to camera */ if (mdc800->in_count == 8) { int answersize; if (mdc800_usb_waitForIRQ (0,TO_GET_READY)) { dev_err(&mdc800->dev->dev, "Camera didn't get ready.\n"); mutex_unlock(&mdc800->io_lock); return -EIO; } answersize=mdc800_getAnswerSize (mdc800->in[1]); mdc800->state=WORKING; memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8); mdc800->write_urb->dev = mdc800->dev; retval = usb_submit_urb (mdc800->write_urb, GFP_KERNEL); if (retval) { dev_err(&mdc800->dev->dev, "submitting write urb fails " "(retval=%i)\n", retval); mutex_unlock(&mdc800->io_lock); return -EIO; } wait_event_timeout(mdc800->write_wait, mdc800->written, msecs_to_jiffies(TO_WRITE_GET_READY)); mdc800->written = 0; if (mdc800->state == WORKING) { usb_kill_urb(mdc800->write_urb); mutex_unlock(&mdc800->io_lock); return -EIO; } switch ((unsigned char) mdc800->in[1]) { case 0x05: /* Download Image */ case 0x3e: /* Take shot in Fine Mode (WCam Mode) */ if (mdc800->pic_len < 0) { dev_err(&mdc800->dev->dev, "call 0x07 before " "0x05,0x3e\n"); mdc800->state=READY; mutex_unlock(&mdc800->io_lock); return -EIO; } mdc800->pic_len=-1; fallthrough; case 0x09: /* Download Thumbnail */ mdc800->download_left=answersize+64; mdc800->state=DOWNLOAD; mdc800_usb_waitForIRQ (0,TO_DOWNLOAD_GET_BUSY); break; default: if (answersize) { if (mdc800_usb_waitForIRQ (1,TO_READ_FROM_IRQ)) { dev_err(&mdc800->dev->dev, "requesting answer from irq fails\n"); mutex_unlock(&mdc800->io_lock); return -EIO; } /* Write dummy data, (this is ugly but part of the USB Protocol */ /* if you use endpoint 1 as bulk and not as irq) */ memcpy (mdc800->out, mdc800->camera_response,8); /* This is the interpreted answer */ memcpy (&mdc800->out[8], mdc800->camera_response,8); mdc800->out_ptr=0; mdc800->out_count=16; /* Cache the Imagesize, if command was getImageSize */ if (mdc800->in [1] == (char) 0x07) { mdc800->pic_len=(int) 65536*(unsigned char) mdc800->camera_response[0]+256*(unsigned char) mdc800->camera_response[1]+(unsigned char) mdc800->camera_response[2]; dev_dbg(&mdc800->dev->dev, "cached imagesize = %i\n", mdc800->pic_len); } } else { if (mdc800_usb_waitForIRQ (0,TO_DEFAULT_COMMAND)) { dev_err(&mdc800->dev->dev, "Command Timeout.\n"); mutex_unlock(&mdc800->io_lock); return -EIO; } } mdc800->state=READY; break; } } i++; } mutex_unlock(&mdc800->io_lock); return i; } /*************************************************************************** Init and Cleanup this driver (Structs and types) ****************************************************************************/ /* File Operations of this drivers */ static const struct file_operations mdc800_device_ops = { .owner = THIS_MODULE, .read = mdc800_device_read, .write = mdc800_device_write, .open = mdc800_device_open, .release = mdc800_device_release, .llseek = noop_llseek, }; static const struct usb_device_id mdc800_table[] = { { USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, mdc800_table); /* * USB Driver Struct for this device */ static struct usb_driver mdc800_usb_driver = { .name = "mdc800", .probe = mdc800_usb_probe, .disconnect = mdc800_usb_disconnect, .id_table = mdc800_table }; /************************************************************************ Init and Cleanup this driver (Main Functions) *************************************************************************/ static int __init usb_mdc800_init (void) { int retval = -ENODEV; /* Allocate Memory */ mdc800=kzalloc (sizeof (struct mdc800_data), GFP_KERNEL); if (!mdc800) goto cleanup_on_fail; mdc800->dev = NULL; mdc800->state=NOT_CONNECTED; mutex_init (&mdc800->io_lock); init_waitqueue_head (&mdc800->irq_wait); init_waitqueue_head (&mdc800->write_wait); init_waitqueue_head (&mdc800->download_wait); mdc800->irq_woken = 0; mdc800->downloaded = 0; mdc800->written = 0; mdc800->irq_urb_buffer=kmalloc (8, GFP_KERNEL); if (!mdc800->irq_urb_buffer) goto cleanup_on_fail; mdc800->write_urb_buffer=kmalloc (8, GFP_KERNEL); if (!mdc800->write_urb_buffer) goto cleanup_on_fail; mdc800->download_urb_buffer=kmalloc (64, GFP_KERNEL); if (!mdc800->download_urb_buffer) goto cleanup_on_fail; mdc800->irq_urb=usb_alloc_urb (0, GFP_KERNEL); if (!mdc800->irq_urb) goto cleanup_on_fail; mdc800->download_urb=usb_alloc_urb (0, GFP_KERNEL); if (!mdc800->download_urb) goto cleanup_on_fail; mdc800->write_urb=usb_alloc_urb (0, GFP_KERNEL); if (!mdc800->write_urb) goto cleanup_on_fail; /* Register the driver */ retval = usb_register(&mdc800_usb_driver); if (retval) goto cleanup_on_fail; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; /* Clean driver up, when something fails */ cleanup_on_fail: if (mdc800 != NULL) { printk(KERN_ERR "mdc800: can't alloc memory!\n"); kfree(mdc800->download_urb_buffer); kfree(mdc800->write_urb_buffer); kfree(mdc800->irq_urb_buffer); usb_free_urb(mdc800->write_urb); usb_free_urb(mdc800->download_urb); usb_free_urb(mdc800->irq_urb); kfree (mdc800); } mdc800 = NULL; return retval; } static void __exit usb_mdc800_cleanup (void) { usb_deregister (&mdc800_usb_driver); usb_free_urb (mdc800->irq_urb); usb_free_urb (mdc800->download_urb); usb_free_urb (mdc800->write_urb); kfree (mdc800->irq_urb_buffer); kfree (mdc800->write_urb_buffer); kfree (mdc800->download_urb_buffer); kfree (mdc800); mdc800 = NULL; } module_init (usb_mdc800_init); module_exit (usb_mdc800_cleanup); MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_LICENSE("GPL"); |
| 14 13 5 5 8 14 14 14 14 8 8 7 8 8 40 40 40 40 40 40 7 2 2 2 5 4 1924 1923 14 8 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 | // SPDX-License-Identifier: GPL-2.0 /* * The USB Monitor, inspired by Dave Harding's USBMon. * * mon_main.c: Main file, module initiation and exit, registrations, etc. * * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/mutex.h> #include "usb_mon.h" static void mon_stop(struct mon_bus *mbus); static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus); static void mon_bus_drop(struct kref *r); static void mon_bus_init(struct usb_bus *ubus); DEFINE_MUTEX(mon_lock); struct mon_bus mon_bus0; /* Pseudo bus meaning "all buses" */ static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */ /* * Link a reader into the bus. * * This must be called with mon_lock taken because of mbus->ref. */ void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r) { unsigned long flags; struct list_head *p; spin_lock_irqsave(&mbus->lock, flags); if (mbus->nreaders == 0) { if (mbus == &mon_bus0) { list_for_each (p, &mon_buses) { struct mon_bus *m1; m1 = list_entry(p, struct mon_bus, bus_link); m1->u_bus->monitored = 1; } } else { mbus->u_bus->monitored = 1; } } mbus->nreaders++; list_add_tail(&r->r_link, &mbus->r_list); spin_unlock_irqrestore(&mbus->lock, flags); kref_get(&mbus->ref); } /* * Unlink reader from the bus. * * This is called with mon_lock taken, so we can decrement mbus->ref. */ void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r) { unsigned long flags; spin_lock_irqsave(&mbus->lock, flags); list_del(&r->r_link); --mbus->nreaders; if (mbus->nreaders == 0) mon_stop(mbus); spin_unlock_irqrestore(&mbus->lock, flags); kref_put(&mbus->ref, mon_bus_drop); } /* */ static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb) { unsigned long flags; struct mon_reader *r; spin_lock_irqsave(&mbus->lock, flags); mbus->cnt_events++; list_for_each_entry(r, &mbus->r_list, r_link) r->rnf_submit(r->r_data, urb); spin_unlock_irqrestore(&mbus->lock, flags); } static void mon_submit(struct usb_bus *ubus, struct urb *urb) { struct mon_bus *mbus; mbus = ubus->mon_bus; if (mbus != NULL) mon_bus_submit(mbus, urb); mon_bus_submit(&mon_bus0, urb); } /* */ static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int error) { unsigned long flags; struct mon_reader *r; spin_lock_irqsave(&mbus->lock, flags); mbus->cnt_events++; list_for_each_entry(r, &mbus->r_list, r_link) r->rnf_error(r->r_data, urb, error); spin_unlock_irqrestore(&mbus->lock, flags); } static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error) { struct mon_bus *mbus; mbus = ubus->mon_bus; if (mbus != NULL) mon_bus_submit_error(mbus, urb, error); mon_bus_submit_error(&mon_bus0, urb, error); } /* */ static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb, int status) { unsigned long flags; struct mon_reader *r; spin_lock_irqsave(&mbus->lock, flags); mbus->cnt_events++; list_for_each_entry(r, &mbus->r_list, r_link) r->rnf_complete(r->r_data, urb, status); spin_unlock_irqrestore(&mbus->lock, flags); } static void mon_complete(struct usb_bus *ubus, struct urb *urb, int status) { struct mon_bus *mbus; mbus = ubus->mon_bus; if (mbus != NULL) mon_bus_complete(mbus, urb, status); mon_bus_complete(&mon_bus0, urb, status); } /* int (*unlink_urb) (struct urb *urb, int status); */ /* * Stop monitoring. */ static void mon_stop(struct mon_bus *mbus) { struct usb_bus *ubus; if (mbus == &mon_bus0) { list_for_each_entry(mbus, &mon_buses, bus_link) { /* * We do not change nreaders here, so rely on mon_lock. */ if (mbus->nreaders == 0 && (ubus = mbus->u_bus) != NULL) ubus->monitored = 0; } } else { /* * A stop can be called for a dissolved mon_bus in case of * a reader staying across an rmmod foo_hcd, so test ->u_bus. */ if (mon_bus0.nreaders == 0 && (ubus = mbus->u_bus) != NULL) { ubus->monitored = 0; mb(); } } } /* * Add a USB bus (usually by a modprobe foo-hcd) * * This does not return an error code because the core cannot care less * if monitoring is not established. */ static void mon_bus_add(struct usb_bus *ubus) { mon_bus_init(ubus); mutex_lock(&mon_lock); if (mon_bus0.nreaders != 0) ubus->monitored = 1; mutex_unlock(&mon_lock); } /* * Remove a USB bus (either from rmmod foo-hcd or from a hot-remove event). */ static void mon_bus_remove(struct usb_bus *ubus) { struct mon_bus *mbus = ubus->mon_bus; mutex_lock(&mon_lock); list_del(&mbus->bus_link); if (mbus->text_inited) mon_text_del(mbus); if (mbus->bin_inited) mon_bin_del(mbus); mon_dissolve(mbus, ubus); kref_put(&mbus->ref, mon_bus_drop); mutex_unlock(&mon_lock); } static int mon_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_BUS_ADD: mon_bus_add(dev); break; case USB_BUS_REMOVE: mon_bus_remove(dev); } return NOTIFY_OK; } static struct notifier_block mon_nb = { .notifier_call = mon_notify, }; /* * Ops */ static const struct usb_mon_operations mon_ops_0 = { .urb_submit = mon_submit, .urb_submit_error = mon_submit_error, .urb_complete = mon_complete, }; /* * Tear usb_bus and mon_bus apart. */ static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus) { if (ubus->monitored) { ubus->monitored = 0; mb(); } ubus->mon_bus = NULL; mbus->u_bus = NULL; mb(); /* We want synchronize_irq() here, but that needs an argument. */ } /* */ static void mon_bus_drop(struct kref *r) { struct mon_bus *mbus = container_of(r, struct mon_bus, ref); kfree(mbus); } /* * Initialize a bus for us: * - allocate mon_bus * - refcount USB bus struct * - link */ static void mon_bus_init(struct usb_bus *ubus) { struct mon_bus *mbus; mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL); if (mbus == NULL) goto err_alloc; kref_init(&mbus->ref); spin_lock_init(&mbus->lock); INIT_LIST_HEAD(&mbus->r_list); /* * We don't need to take a reference to ubus, because we receive * a notification if the bus is about to be removed. */ mbus->u_bus = ubus; ubus->mon_bus = mbus; mbus->text_inited = mon_text_add(mbus, ubus); mbus->bin_inited = mon_bin_add(mbus, ubus); mutex_lock(&mon_lock); list_add_tail(&mbus->bus_link, &mon_buses); mutex_unlock(&mon_lock); return; err_alloc: return; } static void mon_bus0_init(void) { struct mon_bus *mbus = &mon_bus0; kref_init(&mbus->ref); spin_lock_init(&mbus->lock); INIT_LIST_HEAD(&mbus->r_list); mbus->text_inited = mon_text_add(mbus, NULL); mbus->bin_inited = mon_bin_add(mbus, NULL); } /* * Search a USB bus by number. Notice that USB bus numbers start from one, * which we may later use to identify "all" with zero. * * This function must be called with mon_lock held. * * This is obviously inefficient and may be revised in the future. */ struct mon_bus *mon_bus_lookup(unsigned int num) { struct mon_bus *mbus; if (num == 0) { return &mon_bus0; } list_for_each_entry(mbus, &mon_buses, bus_link) { if (mbus->u_bus->busnum == num) { return mbus; } } return NULL; } static int __init mon_init(void) { struct usb_bus *ubus; int rc, id; if ((rc = mon_text_init()) != 0) goto err_text; if ((rc = mon_bin_init()) != 0) goto err_bin; mon_bus0_init(); if (usb_mon_register(&mon_ops_0) != 0) { printk(KERN_NOTICE TAG ": unable to register with the core\n"); rc = -ENODEV; goto err_reg; } // MOD_INC_USE_COUNT(which_module?); mutex_lock(&usb_bus_idr_lock); idr_for_each_entry(&usb_bus_idr, ubus, id) mon_bus_init(ubus); usb_register_notify(&mon_nb); mutex_unlock(&usb_bus_idr_lock); return 0; err_reg: mon_bin_exit(); err_bin: mon_text_exit(); err_text: return rc; } static void __exit mon_exit(void) { struct mon_bus *mbus; struct list_head *p; usb_unregister_notify(&mon_nb); usb_mon_deregister(); mutex_lock(&mon_lock); while (!list_empty(&mon_buses)) { p = mon_buses.next; mbus = list_entry(p, struct mon_bus, bus_link); list_del(p); if (mbus->text_inited) mon_text_del(mbus); if (mbus->bin_inited) mon_bin_del(mbus); /* * This never happens, because the open/close paths in * file level maintain module use counters and so rmmod fails * before reaching here. However, better be safe... */ if (mbus->nreaders) { printk(KERN_ERR TAG ": Outstanding opens (%d) on usb%d, leaking...\n", mbus->nreaders, mbus->u_bus->busnum); kref_get(&mbus->ref); /* Force leak */ } mon_dissolve(mbus, mbus->u_bus); kref_put(&mbus->ref, mon_bus_drop); } mbus = &mon_bus0; if (mbus->text_inited) mon_text_del(mbus); if (mbus->bin_inited) mon_bin_del(mbus); mutex_unlock(&mon_lock); mon_text_exit(); mon_bin_exit(); } module_init(mon_init); module_exit(mon_exit); MODULE_DESCRIPTION("USB Monitor"); MODULE_LICENSE("GPL"); |
| 12 16 2 16 16 23 23 23 9 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 | // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/netrom.h> static void nr_heartbeat_expiry(struct timer_list *); static void nr_t1timer_expiry(struct timer_list *); static void nr_t2timer_expiry(struct timer_list *); static void nr_t4timer_expiry(struct timer_list *); static void nr_idletimer_expiry(struct timer_list *); void nr_init_timers(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); timer_setup(&nr->t1timer, nr_t1timer_expiry, 0); timer_setup(&nr->t2timer, nr_t2timer_expiry, 0); timer_setup(&nr->t4timer, nr_t4timer_expiry, 0); timer_setup(&nr->idletimer, nr_idletimer_expiry, 0); /* initialized by sock_init_data */ sk->sk_timer.function = nr_heartbeat_expiry; } void nr_start_t1timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1); } void nr_start_t2timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2); } void nr_start_t4timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4); } void nr_start_idletimer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); if (nr->idle > 0) sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle); } void nr_start_heartbeat(struct sock *sk) { sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ); } void nr_stop_t1timer(struct sock *sk) { sk_stop_timer(sk, &nr_sk(sk)->t1timer); } void nr_stop_t2timer(struct sock *sk) { sk_stop_timer(sk, &nr_sk(sk)->t2timer); } void nr_stop_t4timer(struct sock *sk) { sk_stop_timer(sk, &nr_sk(sk)->t4timer); } void nr_stop_idletimer(struct sock *sk) { sk_stop_timer(sk, &nr_sk(sk)->idletimer); } void nr_stop_heartbeat(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); } int nr_t1timer_running(struct sock *sk) { return timer_pending(&nr_sk(sk)->t1timer); } static void nr_heartbeat_expiry(struct timer_list *t) { struct sock *sk = timer_container_of(sk, t, sk_timer); struct nr_sock *nr = nr_sk(sk); bh_lock_sock(sk); switch (nr->state) { case NR_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { if (sk->sk_state == TCP_LISTEN) sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); goto out; } break; case NR_STATE_3: /* * Check for the state of the receive buffer. */ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && (nr->condition & NR_COND_OWN_RX_BUSY)) { nr->condition &= ~NR_COND_OWN_RX_BUSY; nr->condition &= ~NR_COND_ACK_PENDING; nr->vl = nr->vr; nr_write_internal(sk, NR_INFOACK); break; } break; } nr_start_heartbeat(sk); bh_unlock_sock(sk); out: sock_put(sk); } static void nr_t2timer_expiry(struct timer_list *t) { struct nr_sock *nr = timer_container_of(nr, t, t2timer); struct sock *sk = &nr->sock; bh_lock_sock(sk); if (nr->condition & NR_COND_ACK_PENDING) { nr->condition &= ~NR_COND_ACK_PENDING; nr_enquiry_response(sk); } bh_unlock_sock(sk); sock_put(sk); } static void nr_t4timer_expiry(struct timer_list *t) { struct nr_sock *nr = timer_container_of(nr, t, t4timer); struct sock *sk = &nr->sock; bh_lock_sock(sk); nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; bh_unlock_sock(sk); sock_put(sk); } static void nr_idletimer_expiry(struct timer_list *t) { struct nr_sock *nr = timer_container_of(nr, t, idletimer); struct sock *sk = &nr->sock; bh_lock_sock(sk); nr_clear_queues(sk); nr->n2count = 0; nr_write_internal(sk, NR_DISCREQ); nr->state = NR_STATE_2; nr_start_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); sock_put(sk); } static void nr_t1timer_expiry(struct timer_list *t) { struct nr_sock *nr = timer_container_of(nr, t, t1timer); struct sock *sk = &nr->sock; bh_lock_sock(sk); switch (nr->state) { case NR_STATE_1: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); goto out; } else { nr->n2count++; nr_write_internal(sk, NR_CONNREQ); } break; case NR_STATE_2: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); goto out; } else { nr->n2count++; nr_write_internal(sk, NR_DISCREQ); } break; case NR_STATE_3: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); goto out; } else { nr->n2count++; nr_requeue_frames(sk); } break; } nr_start_t1timer(sk); out: bh_unlock_sock(sk); sock_put(sk); } |
| 2 17 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 7 7 1 1 1 1 1 2 2 11 5 5 5 5 5 4 2 1 1 1 2 2 1 8 8 8 8 8 8 8 11 5 8 1 8 8 8 8 5 5 5 5 15 15 15 15 15 11 5 1 11 1 5 5 2 2 2 1 1 1 1 1 1 1 1 1 1 2 2 2 1 2 3 3 3 3 3 3 3 3 3 3 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 8 8 8 3 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include <linux/device.h> #include <net/genetlink.h> #include <net/sock.h> #include "devl_internal.h" struct devlink_info_req { struct sk_buff *msg; void (*version_cb)(const char *version_name, enum devlink_info_version_type version_type, void *version_cb_priv); void *version_cb_priv; }; struct devlink_reload_combination { enum devlink_reload_action action; enum devlink_reload_limit limit; }; static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = { { /* can't reinitialize driver with no down time */ .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT, .limit = DEVLINK_RELOAD_LIMIT_NO_RESET, }, }; static bool devlink_reload_combination_is_invalid(enum devlink_reload_action action, enum devlink_reload_limit limit) { int i; for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) if (devlink_reload_invalid_combinations[i].action == action && devlink_reload_invalid_combinations[i].limit == limit) return true; return false; } static bool devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action) { return test_bit(action, &devlink->ops->reload_actions); } static bool devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit) { return test_bit(limit, &devlink->ops->reload_limits); } static int devlink_reload_stat_put(struct sk_buff *msg, enum devlink_reload_limit limit, u32 value) { struct nlattr *reload_stats_entry; reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY); if (!reload_stats_entry) return -EMSGSIZE; if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) || nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value)) goto nla_put_failure; nla_nest_end(msg, reload_stats_entry); return 0; nla_put_failure: nla_nest_cancel(msg, reload_stats_entry); return -EMSGSIZE; } static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote) { struct nlattr *reload_stats_attr, *act_info, *act_stats; int i, j, stat_idx; u32 value; if (!is_remote) reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS); else reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS); if (!reload_stats_attr) return -EMSGSIZE; for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) { if ((!is_remote && !devlink_reload_action_is_supported(devlink, i)) || i == DEVLINK_RELOAD_ACTION_UNSPEC) continue; act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO); if (!act_info) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i)) goto action_info_nest_cancel; act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS); if (!act_stats) goto action_info_nest_cancel; for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) { /* Remote stats are shown even if not locally supported. * Stats of actions with unspecified limit are shown * though drivers don't need to register unspecified * limit. */ if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC && !devlink_reload_limit_is_supported(devlink, j)) || devlink_reload_combination_is_invalid(i, j)) continue; stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i; if (!is_remote) value = devlink->stats.reload_stats[stat_idx]; else value = devlink->stats.remote_reload_stats[stat_idx]; if (devlink_reload_stat_put(msg, j, value)) goto action_stats_nest_cancel; } nla_nest_end(msg, act_stats); nla_nest_end(msg, act_info); } nla_nest_end(msg, reload_stats_attr); return 0; action_stats_nest_cancel: nla_nest_cancel(msg, act_stats); action_info_nest_cancel: nla_nest_cancel(msg, act_info); nla_put_failure: nla_nest_cancel(msg, reload_stats_attr); return -EMSGSIZE; } static int devlink_nl_nested_fill(struct sk_buff *msg, struct devlink *devlink) { unsigned long rel_index; void *unused; int err; xa_for_each(&devlink->nested_rels, rel_index, unused) { err = devlink_rel_devlink_handle_put(msg, devlink, rel_index, DEVLINK_ATTR_NESTED_DEVLINK, NULL); if (err) return err; } return 0; } static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags) { struct nlattr *dev_stats; void *hdr; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed)) goto nla_put_failure; dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS); if (!dev_stats) goto nla_put_failure; if (devlink_reload_stats_put(msg, devlink, false)) goto dev_stats_nest_cancel; if (devlink_reload_stats_put(msg, devlink, true)) goto dev_stats_nest_cancel; nla_nest_end(msg, dev_stats); if (devlink_nl_nested_fill(msg, devlink)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; dev_stats_nest_cancel: nla_nest_cancel(msg, dev_stats); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static void devlink_notify(struct devlink *devlink, enum devlink_command cmd) { struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL); WARN_ON(!devl_is_registered(devlink)); if (!devlink_nl_notify_need(devlink)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0); if (err) { nlmsg_free(msg); return; } devlink_nl_notify_send(devlink, msg); } int devlink_nl_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { return devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); } int devlink_nl_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { return devlink_nl_dumpit(msg, cb, devlink_nl_get_dump_one); } static void devlink_rel_notify_cb(struct devlink *devlink, u32 obj_index) { devlink_notify(devlink, DEVLINK_CMD_NEW); } static void devlink_rel_cleanup_cb(struct devlink *devlink, u32 obj_index, u32 rel_index) { xa_erase(&devlink->nested_rels, rel_index); } int devl_nested_devlink_set(struct devlink *devlink, struct devlink *nested_devlink) { u32 rel_index; int err; err = devlink_rel_nested_in_add(&rel_index, devlink->index, 0, devlink_rel_notify_cb, devlink_rel_cleanup_cb, nested_devlink); if (err) return err; return xa_insert(&devlink->nested_rels, rel_index, xa_mk_value(0), GFP_KERNEL); } EXPORT_SYMBOL_GPL(devl_nested_devlink_set); void devlink_notify_register(struct devlink *devlink) { devlink_notify(devlink, DEVLINK_CMD_NEW); devlink_linecards_notify_register(devlink); devlink_ports_notify_register(devlink); devlink_trap_policers_notify_register(devlink); devlink_trap_groups_notify_register(devlink); devlink_traps_notify_register(devlink); devlink_rates_notify_register(devlink); devlink_regions_notify_register(devlink); devlink_params_notify_register(devlink); } void devlink_notify_unregister(struct devlink *devlink) { devlink_params_notify_unregister(devlink); devlink_regions_notify_unregister(devlink); devlink_rates_notify_unregister(devlink); devlink_traps_notify_unregister(devlink); devlink_trap_groups_notify_unregister(devlink); devlink_trap_policers_notify_unregister(devlink); devlink_ports_notify_unregister(devlink); devlink_linecards_notify_unregister(devlink); devlink_notify(devlink, DEVLINK_CMD_DEL); } static void devlink_reload_failed_set(struct devlink *devlink, bool reload_failed) { if (devlink->reload_failed == reload_failed) return; devlink->reload_failed = reload_failed; devlink_notify(devlink, DEVLINK_CMD_NEW); } bool devlink_is_reload_failed(const struct devlink *devlink) { return devlink->reload_failed; } EXPORT_SYMBOL_GPL(devlink_is_reload_failed); static void __devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats, enum devlink_reload_limit limit, u32 actions_performed) { unsigned long actions = actions_performed; int stat_idx; int action; for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) { stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action; reload_stats[stat_idx]++; } devlink_notify(devlink, DEVLINK_CMD_NEW); } static void devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit, u32 actions_performed) { __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit, actions_performed); } /** * devlink_remote_reload_actions_performed - Update devlink on reload actions * performed which are not a direct result of devlink reload call. * * This should be called by a driver after performing reload actions in case it was not * a result of devlink reload call. For example fw_activate was performed as a result * of devlink reload triggered fw_activate on another host. * The motivation for this function is to keep data on reload actions performed on this * function whether it was done due to direct devlink reload call or not. * * @devlink: devlink * @limit: reload limit * @actions_performed: bitmask of actions performed */ void devlink_remote_reload_actions_performed(struct devlink *devlink, enum devlink_reload_limit limit, u32 actions_performed) { if (WARN_ON(!actions_performed || actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) || actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) || limit > DEVLINK_RELOAD_LIMIT_MAX)) return; __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit, actions_performed); } EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed); static struct net *devlink_netns_get(struct sk_buff *skb, struct genl_info *info) { struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID]; struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD]; struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID]; struct net *net; if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) { NL_SET_ERR_MSG(info->extack, "multiple netns identifying attributes specified"); return ERR_PTR(-EINVAL); } if (netns_pid_attr) { net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr)); } else if (netns_fd_attr) { net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr)); } else if (netns_id_attr) { net = get_net_ns_by_id(sock_net(skb->sk), nla_get_u32(netns_id_attr)); if (!net) net = ERR_PTR(-EINVAL); } else { WARN_ON(1); net = ERR_PTR(-EINVAL); } if (IS_ERR(net)) { NL_SET_ERR_MSG(info->extack, "Unknown network namespace"); return ERR_PTR(-EINVAL); } if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { put_net(net); return ERR_PTR(-EPERM); } return net; } static void devlink_reload_netns_change(struct devlink *devlink, struct net *curr_net, struct net *dest_net) { /* Userspace needs to be notified about devlink objects * removed from original and entering new network namespace. * The rest of the devlink objects are re-created during * reload process so the notifications are generated separatelly. */ devlink_notify_unregister(devlink); write_pnet(&devlink->_net, dest_net); devlink_notify_register(devlink); devlink_rel_nested_in_notify(devlink); } static void devlink_reload_reinit_sanity_check(struct devlink *devlink) { WARN_ON(!list_empty(&devlink->trap_policer_list)); WARN_ON(!list_empty(&devlink->trap_group_list)); WARN_ON(!list_empty(&devlink->trap_list)); WARN_ON(!list_empty(&devlink->dpipe_table_list)); WARN_ON(!list_empty(&devlink->sb_list)); WARN_ON(!list_empty(&devlink->rate_list)); WARN_ON(!list_empty(&devlink->linecard_list)); WARN_ON(!xa_empty(&devlink->ports)); } int devlink_reload(struct devlink *devlink, struct net *dest_net, enum devlink_reload_action action, enum devlink_reload_limit limit, u32 *actions_performed, struct netlink_ext_ack *extack) { u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; struct net *curr_net; int err; /* Make sure the reload operations are invoked with the device lock * held to allow drivers to trigger functionality that expects it * (e.g., PCI reset) and to close possible races between these * operations and probe/remove. */ device_lock_assert(devlink->dev); memcpy(remote_reload_stats, devlink->stats.remote_reload_stats, sizeof(remote_reload_stats)); err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack); if (err) return err; curr_net = devlink_net(devlink); if (dest_net && !net_eq(dest_net, curr_net)) devlink_reload_netns_change(devlink, curr_net, dest_net); if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) { devlink_params_driverinit_load_new(devlink); devlink_reload_reinit_sanity_check(devlink); } err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack); devlink_reload_failed_set(devlink, !!err); if (err) return err; WARN_ON(!(*actions_performed & BIT(action))); /* Catch driver on updating the remote action within devlink reload */ WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats, sizeof(remote_reload_stats))); devlink_reload_stats_update(devlink, limit, *actions_performed); return 0; } static int devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed, enum devlink_command cmd, struct genl_info *info) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd); if (!hdr) goto free_msg; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed, actions_performed)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int devlink_nl_reload_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; enum devlink_reload_action action; enum devlink_reload_limit limit; struct net *dest_net = NULL; u32 actions_performed; int err; err = devlink_resources_validate(devlink, NULL, info); if (err) { NL_SET_ERR_MSG(info->extack, "resources size validation failed"); return err; } action = nla_get_u8_default(info->attrs[DEVLINK_ATTR_RELOAD_ACTION], DEVLINK_RELOAD_ACTION_DRIVER_REINIT); if (!devlink_reload_action_is_supported(devlink, action)) { NL_SET_ERR_MSG(info->extack, "Requested reload action is not supported by the driver"); return -EOPNOTSUPP; } limit = DEVLINK_RELOAD_LIMIT_UNSPEC; if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) { struct nla_bitfield32 limits; u32 limits_selected; limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]); limits_selected = limits.value & limits.selector; if (!limits_selected) { NL_SET_ERR_MSG(info->extack, "Invalid limit selected"); return -EINVAL; } for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++) if (limits_selected & BIT(limit)) break; /* UAPI enables multiselection, but currently it is not used */ if (limits_selected != BIT(limit)) { NL_SET_ERR_MSG(info->extack, "Multiselection of limit is not supported"); return -EOPNOTSUPP; } if (!devlink_reload_limit_is_supported(devlink, limit)) { NL_SET_ERR_MSG(info->extack, "Requested limit is not supported by the driver"); return -EOPNOTSUPP; } if (devlink_reload_combination_is_invalid(action, limit)) { NL_SET_ERR_MSG(info->extack, "Requested limit is invalid for this action"); return -EINVAL; } } if (info->attrs[DEVLINK_ATTR_NETNS_PID] || info->attrs[DEVLINK_ATTR_NETNS_FD] || info->attrs[DEVLINK_ATTR_NETNS_ID]) { dest_net = devlink_netns_get(skb, info); if (IS_ERR(dest_net)) return PTR_ERR(dest_net); if (!net_eq(dest_net, devlink_net(devlink)) && action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT) { NL_SET_ERR_MSG_MOD(info->extack, "Changing namespace is only supported for reinit action"); return -EOPNOTSUPP; } } err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack); if (dest_net) put_net(dest_net); if (err) return err; /* For backward compatibility generate reply only if attributes used by user */ if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) return 0; return devlink_nl_reload_actions_performed_snd(devlink, actions_performed, DEVLINK_CMD_RELOAD, info); } bool devlink_reload_actions_valid(const struct devlink_ops *ops) { const struct devlink_reload_combination *comb; int i; if (!devlink_reload_supported(ops)) { if (WARN_ON(ops->reload_actions)) return false; return true; } if (WARN_ON(!ops->reload_actions || ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) || ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX))) return false; if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) || ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX))) return false; for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) { comb = &devlink_reload_invalid_combinations[i]; if (ops->reload_actions == BIT(comb->action) && ops->reload_limits == BIT(comb->limit)) return false; } return true; } static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags) { const struct devlink_ops *ops = devlink->ops; enum devlink_eswitch_encap_mode encap_mode; u8 inline_mode; void *hdr; int err = 0; u16 mode; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; err = devlink_nl_put_handle(msg, devlink); if (err) goto nla_put_failure; if (ops->eswitch_mode_get) { err = ops->eswitch_mode_get(devlink, &mode); if (err) goto nla_put_failure; err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode); if (err) goto nla_put_failure; } if (ops->eswitch_inline_mode_get) { err = ops->eswitch_inline_mode_get(devlink, &inline_mode); if (err) goto nla_put_failure; err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE, inline_mode); if (err) goto nla_put_failure; } if (ops->eswitch_encap_mode_get) { err = ops->eswitch_encap_mode_get(devlink, &encap_mode); if (err) goto nla_put_failure; err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode); if (err) goto nla_put_failure; } genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return err; } int devlink_nl_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } int devlink_nl_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; enum devlink_eswitch_encap_mode encap_mode; u8 inline_mode; int err = 0; u16 mode; if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) { if (!ops->eswitch_mode_set) return -EOPNOTSUPP; mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]); err = devlink_rate_nodes_check(devlink, mode, info->extack); if (err) return err; err = ops->eswitch_mode_set(devlink, mode, info->extack); if (err) return err; } if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) { if (!ops->eswitch_inline_mode_set) return -EOPNOTSUPP; inline_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]); err = ops->eswitch_inline_mode_set(devlink, inline_mode, info->extack); if (err) return err; } if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) { if (!ops->eswitch_encap_mode_set) return -EOPNOTSUPP; encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]); err = ops->eswitch_encap_mode_set(devlink, encap_mode, info->extack); if (err) return err; } return 0; } int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) { if (!req->msg) return 0; return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn); } EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); int devlink_info_board_serial_number_put(struct devlink_info_req *req, const char *bsn) { if (!req->msg) return 0; return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, bsn); } EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); static int devlink_info_version_put(struct devlink_info_req *req, int attr, const char *version_name, const char *version_value, enum devlink_info_version_type version_type) { struct nlattr *nest; int err; if (req->version_cb) req->version_cb(version_name, version_type, req->version_cb_priv); if (!req->msg || !*version_value) return 0; nest = nla_nest_start_noflag(req->msg, attr); if (!nest) return -EMSGSIZE; err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME, version_name); if (err) goto nla_put_failure; err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE, version_value); if (err) goto nla_put_failure; nla_nest_end(req->msg, nest); return 0; nla_put_failure: nla_nest_cancel(req->msg, nest); return err; } int devlink_info_version_fixed_put(struct devlink_info_req *req, const char *version_name, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED, version_name, version_value, DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put); int devlink_info_version_stored_put(struct devlink_info_req *req, const char *version_name, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, version_name, version_value, DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_stored_put); int devlink_info_version_stored_put_ext(struct devlink_info_req *req, const char *version_name, const char *version_value, enum devlink_info_version_type version_type) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, version_name, version_value, version_type); } EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext); int devlink_info_version_running_put(struct devlink_info_req *req, const char *version_name, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, version_name, version_value, DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_running_put); int devlink_info_version_running_put_ext(struct devlink_info_req *req, const char *version_name, const char *version_value, enum devlink_info_version_type version_type) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, version_name, version_value, version_type); } EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext); static int devlink_nl_driver_info_get(struct device_driver *drv, struct devlink_info_req *req) { if (!drv) return 0; if (drv->name[0]) return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, drv->name); return 0; } static int devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { struct device *dev = devlink_to_dev(devlink); struct devlink_info_req req = {}; void *hdr; int err; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; err = -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto err_cancel_msg; req.msg = msg; if (devlink->ops->info_get) { err = devlink->ops->info_get(devlink, &req, extack); if (err) goto err_cancel_msg; } err = devlink_nl_driver_info_get(dev->driver, &req); if (err) goto err_cancel_msg; genlmsg_end(msg, hdr); return 0; err_cancel_msg: genlmsg_cancel(msg, hdr); return err; } int devlink_nl_info_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, info->snd_portid, info->snd_seq, 0, info->extack); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_info_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { int err; err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, cb->extack); if (err == -EOPNOTSUPP) err = 0; return err; } int devlink_nl_info_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { return devlink_nl_dumpit(msg, cb, devlink_nl_info_get_dump_one); } static int devlink_nl_flash_update_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, struct devlink_flash_notify *params) { void *hdr; hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS) goto out; if (params->status_msg && nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG, params->status_msg)) goto nla_put_failure; if (params->component && nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT, params->component)) goto nla_put_failure; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE, params->done)) goto nla_put_failure; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL, params->total)) goto nla_put_failure; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT, params->timeout)) goto nla_put_failure; out: genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static void __devlink_flash_update_notify(struct devlink *devlink, enum devlink_command cmd, struct devlink_flash_notify *params) { struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE && cmd != DEVLINK_CMD_FLASH_UPDATE_END && cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS); if (!devl_is_registered(devlink) || !devlink_nl_notify_need(devlink)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; err = devlink_nl_flash_update_fill(msg, devlink, cmd, params); if (err) goto out_free_msg; devlink_nl_notify_send(devlink, msg); return; out_free_msg: nlmsg_free(msg); } static void devlink_flash_update_begin_notify(struct devlink *devlink) { struct devlink_flash_notify params = {}; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE, ¶ms); } static void devlink_flash_update_end_notify(struct devlink *devlink) { struct devlink_flash_notify params = {}; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE_END, ¶ms); } void devlink_flash_update_status_notify(struct devlink *devlink, const char *status_msg, const char *component, unsigned long done, unsigned long total) { struct devlink_flash_notify params = { .status_msg = status_msg, .component = component, .done = done, .total = total, }; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE_STATUS, ¶ms); } EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify); void devlink_flash_update_timeout_notify(struct devlink *devlink, const char *status_msg, const char *component, unsigned long timeout) { struct devlink_flash_notify params = { .status_msg = status_msg, .component = component, .timeout = timeout, }; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE_STATUS, ¶ms); } EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify); struct devlink_flash_component_lookup_ctx { const char *lookup_name; bool lookup_name_found; }; static void devlink_flash_component_lookup_cb(const char *version_name, enum devlink_info_version_type version_type, void *version_cb_priv) { struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv; if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT || lookup_ctx->lookup_name_found) return; lookup_ctx->lookup_name_found = !strcmp(lookup_ctx->lookup_name, version_name); } static int devlink_flash_component_get(struct devlink *devlink, struct nlattr *nla_component, const char **p_component, struct netlink_ext_ack *extack) { struct devlink_flash_component_lookup_ctx lookup_ctx = {}; struct devlink_info_req req = {}; const char *component; int ret; if (!nla_component) return 0; component = nla_data(nla_component); if (!devlink->ops->info_get) { NL_SET_ERR_MSG_ATTR(extack, nla_component, "component update is not supported by this device"); return -EOPNOTSUPP; } lookup_ctx.lookup_name = component; req.version_cb = devlink_flash_component_lookup_cb; req.version_cb_priv = &lookup_ctx; ret = devlink->ops->info_get(devlink, &req, NULL); if (ret) return ret; if (!lookup_ctx.lookup_name_found) { NL_SET_ERR_MSG_ATTR(extack, nla_component, "selected component is not supported by this device"); return -EINVAL; } *p_component = component; return 0; } int devlink_nl_flash_update_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *nla_overwrite_mask, *nla_file_name; struct devlink_flash_update_params params = {}; struct devlink *devlink = info->user_ptr[0]; const char *file_name; u32 supported_params; int ret; if (!devlink->ops->flash_update) return -EOPNOTSUPP; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME)) return -EINVAL; ret = devlink_flash_component_get(devlink, info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT], ¶ms.component, info->extack); if (ret) return ret; supported_params = devlink->ops->supported_flash_update_params; nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK]; if (nla_overwrite_mask) { struct nla_bitfield32 sections; if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) { NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask, "overwrite settings are not supported by this device"); return -EOPNOTSUPP; } sections = nla_get_bitfield32(nla_overwrite_mask); params.overwrite_mask = sections.value & sections.selector; } nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]; file_name = nla_data(nla_file_name); ret = request_firmware(¶ms.fw, file_name, devlink->dev); if (ret) { NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file"); return ret; } devlink_flash_update_begin_notify(devlink); ret = devlink->ops->flash_update(devlink, ¶ms, info->extack); devlink_flash_update_end_notify(devlink); release_firmware(params.fw); return ret; } static void __devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len) { struct devlink_info_req req = {}; const struct nlattr *nlattr; struct sk_buff *msg; int rem, err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; req.msg = msg; err = devlink->ops->info_get(devlink, &req, NULL); if (err) goto free_msg; nla_for_each_attr_type(nlattr, DEVLINK_ATTR_INFO_VERSION_RUNNING, (void *)msg->data, msg->len, rem) { const struct nlattr *kv; int rem_kv; nla_for_each_nested_type(kv, DEVLINK_ATTR_INFO_VERSION_VALUE, nlattr, rem_kv) { strlcat(buf, nla_data(kv), len); strlcat(buf, " ", len); } } free_msg: nlmsg_consume(msg); } void devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len) { if (!devlink->ops->info_get) return; devl_lock(devlink); if (devl_is_registered(devlink)) __devlink_compat_running_version(devlink, buf, len); devl_unlock(devlink); } int devlink_compat_flash_update(struct devlink *devlink, const char *file_name) { struct devlink_flash_update_params params = {}; int ret; devl_lock(devlink); if (!devl_is_registered(devlink)) { ret = -ENODEV; goto out_unlock; } if (!devlink->ops->flash_update) { ret = -EOPNOTSUPP; goto out_unlock; } ret = request_firmware(¶ms.fw, file_name, devlink->dev); if (ret) goto out_unlock; devlink_flash_update_begin_notify(devlink); ret = devlink->ops->flash_update(devlink, ¶ms, NULL); devlink_flash_update_end_notify(devlink); release_firmware(params.fw); out_unlock: devl_unlock(devlink); return ret; } static int devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { struct nlattr *selftests; void *hdr; int err; int i; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, DEVLINK_CMD_SELFTESTS_GET); if (!hdr) return -EMSGSIZE; err = -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto err_cancel_msg; selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS); if (!selftests) goto err_cancel_msg; for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1; i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) { if (devlink->ops->selftest_check(devlink, i, extack)) { err = nla_put_flag(msg, i); if (err) goto err_cancel_msg; } } nla_nest_end(msg, selftests); genlmsg_end(msg, hdr); return 0; err_cancel_msg: genlmsg_cancel(msg, hdr); return err; } int devlink_nl_selftests_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; if (!devlink->ops->selftest_check) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid, info->snd_seq, 0, info->extack); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_selftests_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { if (!devlink->ops->selftest_check) return 0; return devlink_nl_selftests_fill(msg, devlink, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, cb->extack); } int devlink_nl_selftests_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_selftests_get_dump_one); } static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id, enum devlink_selftest_status test_status) { struct nlattr *result_attr; result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT); if (!result_attr) return -EMSGSIZE; if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) || nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS, test_status)) goto nla_put_failure; nla_nest_end(skb, result_attr); return 0; nla_put_failure: nla_nest_cancel(skb, result_attr); return -EMSGSIZE; } static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = { [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG }, }; int devlink_nl_selftests_run_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1]; struct devlink *devlink = info->user_ptr[0]; struct nlattr *attrs, *selftests; struct sk_buff *msg; void *hdr; int err; int i; if (!devlink->ops->selftest_run || !devlink->ops->selftest_check) return -EOPNOTSUPP; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS)) return -EINVAL; attrs = info->attrs[DEVLINK_ATTR_SELFTESTS]; err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs, devlink_selftest_nl_policy, info->extack); if (err < 0) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = -EMSGSIZE; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN); if (!hdr) goto free_msg; if (devlink_nl_put_handle(msg, devlink)) goto genlmsg_cancel; selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS); if (!selftests) goto genlmsg_cancel; for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1; i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) { enum devlink_selftest_status test_status; if (nla_get_flag(tb[i])) { if (!devlink->ops->selftest_check(devlink, i, info->extack)) { if (devlink_selftest_result_put(msg, i, DEVLINK_SELFTEST_STATUS_SKIP)) goto selftests_nest_cancel; continue; } test_status = devlink->ops->selftest_run(devlink, i, info->extack); if (devlink_selftest_result_put(msg, i, test_status)) goto selftests_nest_cancel; } } nla_nest_end(msg, selftests); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); selftests_nest_cancel: nla_nest_cancel(msg, selftests); genlmsg_cancel: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return err; } |
| 1865 209 210 210 209 82 208 209 208 207 210 195 209 30 15 30 30 30 30 30 17 17 17 30 30 30 218 81 196 208 207 207 208 190 81 82 82 82 82 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 | // SPDX-License-Identifier: GPL-2.0-only /* * Power Management Quality of Service (PM QoS) support base. * * Copyright (C) 2020 Intel Corporation * * Authors: * Mark Gross <mgross@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> * * Provided here is an interface for specifying PM QoS dependencies. It allows * entities depending on QoS constraints to register their requests which are * aggregated as appropriate to produce effective constraints (target values) * that can be monitored by entities needing to respect them, either by polling * or through a built-in notification mechanism. * * In addition to the basic functionality, more specific interfaces for managing * global CPU latency QoS requests and frequency QoS requests are provided. */ /*#define DEBUG*/ #include <linux/pm_qos.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/export.h> #include <trace/events/power.h> /* * locking rule: all changes to constraints or notifiers lists * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock * held, taken with _irqsave. One lock to rule them all */ static DEFINE_SPINLOCK(pm_qos_lock); /** * pm_qos_read_value - Return the current effective constraint value. * @c: List of PM QoS constraint requests. */ s32 pm_qos_read_value(struct pm_qos_constraints *c) { return READ_ONCE(c->target_value); } static int pm_qos_get_value(struct pm_qos_constraints *c) { if (plist_head_empty(&c->list)) return c->no_constraint_value; switch (c->type) { case PM_QOS_MIN: return plist_first(&c->list)->prio; case PM_QOS_MAX: return plist_last(&c->list)->prio; default: WARN(1, "Unknown PM QoS type in %s\n", __func__); return PM_QOS_DEFAULT_VALUE; } } static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value) { WRITE_ONCE(c->target_value, value); } /** * pm_qos_update_target - Update a list of PM QoS constraint requests. * @c: List of PM QoS requests. * @node: Target list entry. * @action: Action to carry out (add, update or remove). * @value: New request value for the target list entry. * * Update the given list of PM QoS constraint requests, @c, by carrying an * @action involving the @node list entry and @value on it. * * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove * @node from the list, ignore @value). * * Return: 1 if the aggregate constraint value has changed, 0 otherwise. */ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value) { int prev_value, curr_value, new_value; unsigned long flags; spin_lock_irqsave(&pm_qos_lock, flags); prev_value = pm_qos_get_value(c); if (value == PM_QOS_DEFAULT_VALUE) new_value = c->default_value; else new_value = value; switch (action) { case PM_QOS_REMOVE_REQ: plist_del(node, &c->list); break; case PM_QOS_UPDATE_REQ: /* * To change the list, atomically remove, reinit with new value * and add, then see if the aggregate has changed. */ plist_del(node, &c->list); fallthrough; case PM_QOS_ADD_REQ: plist_node_init(node, new_value); plist_add(node, &c->list); break; default: /* no action */ ; } curr_value = pm_qos_get_value(c); pm_qos_set_value(c, curr_value); spin_unlock_irqrestore(&pm_qos_lock, flags); trace_pm_qos_update_target(action, prev_value, curr_value); if (prev_value == curr_value) return 0; if (c->notifiers) blocking_notifier_call_chain(c->notifiers, curr_value, NULL); return 1; } /** * pm_qos_flags_remove_req - Remove device PM QoS flags request. * @pqf: Device PM QoS flags set to remove the request from. * @req: Request to remove from the set. */ static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req) { s32 val = 0; list_del(&req->node); list_for_each_entry(req, &pqf->list, node) val |= req->flags; pqf->effective_flags = val; } /** * pm_qos_update_flags - Update a set of PM QoS flags. * @pqf: Set of PM QoS flags to update. * @req: Request to add to the set, to modify, or to remove from the set. * @action: Action to take on the set. * @val: Value of the request to add or modify. * * Return: 1 if the aggregate constraint value has changed, 0 otherwise. */ bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val) { unsigned long irqflags; s32 prev_value, curr_value; spin_lock_irqsave(&pm_qos_lock, irqflags); prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; switch (action) { case PM_QOS_REMOVE_REQ: pm_qos_flags_remove_req(pqf, req); break; case PM_QOS_UPDATE_REQ: pm_qos_flags_remove_req(pqf, req); fallthrough; case PM_QOS_ADD_REQ: req->flags = val; INIT_LIST_HEAD(&req->node); list_add_tail(&req->node, &pqf->list); pqf->effective_flags |= val; break; default: /* no action */ ; } curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; spin_unlock_irqrestore(&pm_qos_lock, irqflags); trace_pm_qos_update_flags(action, prev_value, curr_value); return prev_value != curr_value; } #ifdef CONFIG_CPU_IDLE /* Definitions related to the CPU latency QoS. */ static struct pm_qos_constraints cpu_latency_constraints = { .list = PLIST_HEAD_INIT(cpu_latency_constraints.list), .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE, .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE, .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE, .type = PM_QOS_MIN, }; static inline bool cpu_latency_qos_value_invalid(s32 value) { return value < 0 && value != PM_QOS_DEFAULT_VALUE; } /** * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit. */ s32 cpu_latency_qos_limit(void) { return pm_qos_read_value(&cpu_latency_constraints); } /** * cpu_latency_qos_request_active - Check the given PM QoS request. * @req: PM QoS request to check. * * Return: 'true' if @req has been added to the CPU latency QoS list, 'false' * otherwise. */ bool cpu_latency_qos_request_active(struct pm_qos_request *req) { return req->qos == &cpu_latency_constraints; } EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active); static void cpu_latency_qos_apply(struct pm_qos_request *req, enum pm_qos_req_action action, s32 value) { int ret = pm_qos_update_target(req->qos, &req->node, action, value); if (ret > 0) wake_up_all_idle_cpus(); } /** * cpu_latency_qos_add_request - Add new CPU latency QoS request. * @req: Pointer to a preallocated handle. * @value: Requested constraint value. * * Use @value to initialize the request handle pointed to by @req, insert it as * a new entry to the CPU latency QoS list and recompute the effective QoS * constraint for that list. * * Callers need to save the handle for later use in updates and removal of the * QoS request represented by it. */ void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value) { if (!req || cpu_latency_qos_value_invalid(value)) return; if (cpu_latency_qos_request_active(req)) { WARN(1, KERN_ERR "%s called for already added request\n", __func__); return; } trace_pm_qos_add_request(value); req->qos = &cpu_latency_constraints; cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value); } EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request); /** * cpu_latency_qos_update_request - Modify existing CPU latency QoS request. * @req : QoS request to update. * @new_value: New requested constraint value. * * Use @new_value to update the QoS request represented by @req in the CPU * latency QoS list along with updating the effective constraint value for that * list. */ void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value) { if (!req || cpu_latency_qos_value_invalid(new_value)) return; if (!cpu_latency_qos_request_active(req)) { WARN(1, KERN_ERR "%s called for unknown object\n", __func__); return; } trace_pm_qos_update_request(new_value); if (new_value == req->node.prio) return; cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value); } EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request); /** * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request. * @req: QoS request to remove. * * Remove the CPU latency QoS request represented by @req from the CPU latency * QoS list along with updating the effective constraint value for that list. */ void cpu_latency_qos_remove_request(struct pm_qos_request *req) { if (!req) return; if (!cpu_latency_qos_request_active(req)) { WARN(1, KERN_ERR "%s called for unknown object\n", __func__); return; } trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE); cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request); /* User space interface to the CPU latency QoS via misc device. */ static int cpu_latency_qos_open(struct inode *inode, struct file *filp) { struct pm_qos_request *req; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE); filp->private_data = req; return 0; } static int cpu_latency_qos_release(struct inode *inode, struct file *filp) { struct pm_qos_request *req = filp->private_data; filp->private_data = NULL; cpu_latency_qos_remove_request(req); kfree(req); return 0; } static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct pm_qos_request *req = filp->private_data; unsigned long flags; s32 value; if (!req || !cpu_latency_qos_request_active(req)) return -EINVAL; spin_lock_irqsave(&pm_qos_lock, flags); value = pm_qos_get_value(&cpu_latency_constraints); spin_unlock_irqrestore(&pm_qos_lock, flags); return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); } static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { s32 value; if (count == sizeof(s32)) { if (copy_from_user(&value, buf, sizeof(s32))) return -EFAULT; } else { int ret; ret = kstrtos32_from_user(buf, count, 16, &value); if (ret) return ret; } cpu_latency_qos_update_request(filp->private_data, value); return count; } static const struct file_operations cpu_latency_qos_fops = { .write = cpu_latency_qos_write, .read = cpu_latency_qos_read, .open = cpu_latency_qos_open, .release = cpu_latency_qos_release, .llseek = noop_llseek, }; static struct miscdevice cpu_latency_qos_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "cpu_dma_latency", .fops = &cpu_latency_qos_fops, }; static int __init cpu_latency_qos_init(void) { int ret; ret = misc_register(&cpu_latency_qos_miscdev); if (ret < 0) pr_err("%s: %s setup failed\n", __func__, cpu_latency_qos_miscdev.name); return ret; } late_initcall(cpu_latency_qos_init); #endif /* CONFIG_CPU_IDLE */ /* Definitions related to the frequency QoS below. */ static inline bool freq_qos_value_invalid(s32 value) { return value < 0 && value != PM_QOS_DEFAULT_VALUE; } /** * freq_constraints_init - Initialize frequency QoS constraints. * @qos: Frequency QoS constraints to initialize. */ void freq_constraints_init(struct freq_constraints *qos) { struct pm_qos_constraints *c; c = &qos->min_freq; plist_head_init(&c->list); c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE; c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE; c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE; c->type = PM_QOS_MAX; c->notifiers = &qos->min_freq_notifiers; BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); c = &qos->max_freq; plist_head_init(&c->list); c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE; c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE; c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = &qos->max_freq_notifiers; BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); } /** * freq_qos_read_value - Get frequency QoS constraint for a given list. * @qos: Constraints to evaluate. * @type: QoS request type. */ s32 freq_qos_read_value(struct freq_constraints *qos, enum freq_qos_req_type type) { s32 ret; switch (type) { case FREQ_QOS_MIN: ret = IS_ERR_OR_NULL(qos) ? FREQ_QOS_MIN_DEFAULT_VALUE : pm_qos_read_value(&qos->min_freq); break; case FREQ_QOS_MAX: ret = IS_ERR_OR_NULL(qos) ? FREQ_QOS_MAX_DEFAULT_VALUE : pm_qos_read_value(&qos->max_freq); break; default: WARN_ON(1); ret = 0; } return ret; } /** * freq_qos_apply - Add/modify/remove frequency QoS request. * @req: Constraint request to apply. * @action: Action to perform (add/update/remove). * @value: Value to assign to the QoS request. * * This is only meant to be called from inside pm_qos, not drivers. */ int freq_qos_apply(struct freq_qos_request *req, enum pm_qos_req_action action, s32 value) { int ret; switch(req->type) { case FREQ_QOS_MIN: ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode, action, value); break; case FREQ_QOS_MAX: ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode, action, value); break; default: ret = -EINVAL; } return ret; } /** * freq_qos_add_request - Insert new frequency QoS request into a given list. * @qos: Constraints to update. * @req: Preallocated request object. * @type: Request type. * @value: Request value. * * Insert a new entry into the @qos list of requests, recompute the effective * QoS constraint value for that list and initialize the @req object. The * caller needs to save that object for later use in updates and removal. * * Return 1 if the effective constraint value has changed, 0 if the effective * constraint value has not changed, or a negative error code on failures. */ int freq_qos_add_request(struct freq_constraints *qos, struct freq_qos_request *req, enum freq_qos_req_type type, s32 value) { int ret; if (IS_ERR_OR_NULL(qos) || !req || freq_qos_value_invalid(value)) return -EINVAL; if (WARN(freq_qos_request_active(req), "%s() called for active request\n", __func__)) return -EINVAL; req->qos = qos; req->type = type; ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value); if (ret < 0) { req->qos = NULL; req->type = 0; } return ret; } EXPORT_SYMBOL_GPL(freq_qos_add_request); /** * freq_qos_update_request - Modify existing frequency QoS request. * @req: Request to modify. * @new_value: New request value. * * Update an existing frequency QoS request along with the effective constraint * value for the list of requests it belongs to. * * Return 1 if the effective constraint value has changed, 0 if the effective * constraint value has not changed, or a negative error code on failures. */ int freq_qos_update_request(struct freq_qos_request *req, s32 new_value) { if (!req || freq_qos_value_invalid(new_value)) return -EINVAL; if (WARN(!freq_qos_request_active(req), "%s() called for unknown object\n", __func__)) return -EINVAL; if (req->pnode.prio == new_value) return 0; return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value); } EXPORT_SYMBOL_GPL(freq_qos_update_request); /** * freq_qos_remove_request - Remove frequency QoS request from its list. * @req: Request to remove. * * Remove the given frequency QoS request from the list of constraints it * belongs to and recompute the effective constraint value for that list. * * Return 1 if the effective constraint value has changed, 0 if the effective * constraint value has not changed, or a negative error code on failures. */ int freq_qos_remove_request(struct freq_qos_request *req) { int ret; if (!req) return -EINVAL; if (WARN(!freq_qos_request_active(req), "%s() called for unknown object\n", __func__)) return -EINVAL; ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); req->qos = NULL; req->type = 0; return ret; } EXPORT_SYMBOL_GPL(freq_qos_remove_request); /** * freq_qos_add_notifier - Add frequency QoS change notifier. * @qos: List of requests to add the notifier to. * @type: Request type. * @notifier: Notifier block to add. */ int freq_qos_add_notifier(struct freq_constraints *qos, enum freq_qos_req_type type, struct notifier_block *notifier) { int ret; if (IS_ERR_OR_NULL(qos) || !notifier) return -EINVAL; switch (type) { case FREQ_QOS_MIN: ret = blocking_notifier_chain_register(qos->min_freq.notifiers, notifier); break; case FREQ_QOS_MAX: ret = blocking_notifier_chain_register(qos->max_freq.notifiers, notifier); break; default: WARN_ON(1); ret = -EINVAL; } return ret; } EXPORT_SYMBOL_GPL(freq_qos_add_notifier); /** * freq_qos_remove_notifier - Remove frequency QoS change notifier. * @qos: List of requests to remove the notifier from. * @type: Request type. * @notifier: Notifier block to remove. */ int freq_qos_remove_notifier(struct freq_constraints *qos, enum freq_qos_req_type type, struct notifier_block *notifier) { int ret; if (IS_ERR_OR_NULL(qos) || !notifier) return -EINVAL; switch (type) { case FREQ_QOS_MIN: ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers, notifier); break; case FREQ_QOS_MAX: ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers, notifier); break; default: WARN_ON(1); ret = -EINVAL; } return ret; } EXPORT_SYMBOL_GPL(freq_qos_remove_notifier); |
| 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ #ifndef RXE_LOC_H #define RXE_LOC_H /* rxe_av.c */ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av); int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr); int rxe_ah_chk_attr(struct rxe_ah *ah, struct rdma_ah_attr *attr); void rxe_av_from_attr(u8 port_num, struct rxe_av *av, struct rdma_ah_attr *attr); void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr); void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr); struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp); /* rxe_cq.c */ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector); int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp); int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct rxe_resize_cq_resp __user *uresp, struct ib_udata *udata); int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); void rxe_cq_cleanup(struct rxe_pool_elem *elem); /* rxe_mcast.c */ struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid); int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid); int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid); void rxe_cleanup_mcg(struct kref *kref); /* rxe_mmap.c */ struct rxe_mmap_info { struct list_head pending_mmaps; struct ib_ucontext *context; struct kref ref; void *obj; struct mminfo info; }; void rxe_mmap_release(struct kref *ref); struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size, struct ib_udata *udata, void *obj); int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); /* rxe_mr.c */ u8 rxe_get_next_key(u32 last_key); void rxe_mr_init(int access, struct rxe_mr *mr); void rxe_mr_init_dma(int access, struct rxe_mr *mr); int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, int access, struct rxe_mr *mr); int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr); int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length); int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, unsigned int length, enum rxe_mr_copy_dir dir); int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum rxe_mr_copy_dir dir); int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, u64 compare, u64 swap_add, u64 *orig_val); enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, enum rxe_mr_lookup_type type); int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); void rxe_mr_cleanup(struct rxe_pool_elem *elem); /* defined in rxe_mr.c; used in rxe_mr.c and rxe_odp.c */ extern spinlock_t atomic_ops_lock; /* rxe_mw.c */ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); int rxe_dealloc_mw(struct ib_mw *ibmw); int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey); struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey); void rxe_mw_cleanup(struct rxe_pool_elem *elem); /* rxe_net.c */ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt); int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, struct sk_buff *skb); int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct sk_buff *skb); const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); /* rxe_qp.c */ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init); int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct rxe_create_qp_resp __user *uresp, struct ib_pd *ibpd, struct ib_udata *udata); int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init); int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask); int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata); int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask); void rxe_qp_error(struct rxe_qp *qp); int rxe_qp_chk_destroy(struct rxe_qp *qp); void rxe_qp_cleanup(struct rxe_pool_elem *elem); static inline int qp_num(struct rxe_qp *qp) { return qp->ibqp.qp_num; } static inline enum ib_qp_type qp_type(struct rxe_qp *qp) { return qp->ibqp.qp_type; } static inline enum ib_qp_state qp_state(struct rxe_qp *qp) { return qp->attr.qp_state; } static inline int qp_mtu(struct rxe_qp *qp) { if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) return qp->attr.path_mtu; else return IB_MTU_4096; } static inline bool is_odp_mr(struct rxe_mr *mr) { return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && mr->umem->is_odp; } void free_rd_atomic_resource(struct resp_res *res); static inline void rxe_advance_resp_resource(struct rxe_qp *qp) { qp->resp.res_head++; if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic)) qp->resp.res_head = 0; } void retransmit_timer(struct timer_list *t); void rnr_nak_timer(struct timer_list *t); /* rxe_srq.c */ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init); int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, struct ib_udata *udata, struct rxe_create_srq_resp __user *uresp); int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata); void rxe_srq_cleanup(struct rxe_pool_elem *elem); void rxe_dealloc(struct ib_device *ib_dev); int rxe_completer(struct rxe_qp *qp); int rxe_requester(struct rxe_qp *qp); int rxe_sender(struct rxe_qp *qp); int rxe_receiver(struct rxe_qp *qp); /* rxe_icrc.c */ int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt); void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt); void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb); void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb); static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp) { return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type]; } /* rxe_odp.c */ extern const struct mmu_interval_notifier_ops rxe_mn_ops; #if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access_flags, struct rxe_mr *mr); int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, enum rxe_mr_copy_dir dir); enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, u64 compare, u64 swap_add, u64 *orig_val); int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length); enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ static inline int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access_flags, struct rxe_mr *mr) { return -EOPNOTSUPP; } static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, enum rxe_mr_copy_dir dir) { return -EOPNOTSUPP; } static inline enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, u64 compare, u64 swap_add, u64 *orig_val) { return RESPST_ERR_UNSUPPORTED_OPCODE; } static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) { return -EOPNOTSUPP; } static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) { return RESPST_ERR_UNSUPPORTED_OPCODE; } #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ #endif /* RXE_LOC_H */ |
| 43 43 42 42 43 43 43 46 3 43 46 46 1 46 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/isofs/joliet.c * * (C) 1996 Gordon Chaffee * * Joliet: Microsoft's Unicode extensions to iso9660 */ #include <linux/types.h> #include <linux/nls.h> #include "isofs.h" /* * Convert Unicode 16 to UTF-8 or ASCII. */ static int uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls) { __be16 *ip, ch; unsigned char *op; ip = uni; op = ascii; while ((ch = get_unaligned(ip)) && len) { int llen; llen = nls->uni2char(be16_to_cpu(ch), op, NLS_MAX_CHARSET_SIZE); if (llen > 0) op += llen; else *op++ = '?'; ip++; len--; } *op = 0; return (op - ascii); } int get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode) { struct nls_table *nls; unsigned char len = 0; nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset; if (!nls) { len = utf16s_to_utf8s((const wchar_t *) de->name, de->name_len[0] >> 1, UTF16_BIG_ENDIAN, outname, PAGE_SIZE); } else { len = uni16_to_x8(outname, (__be16 *) de->name, de->name_len[0] >> 1, nls); } if ((len > 2) && (outname[len-2] == ';') && (outname[len-1] == '1')) len -= 2; /* * Windows doesn't like periods at the end of a name, * so neither do we */ while (len >= 2 && (outname[len-1] == '.')) len--; return len; } |
| 1 1 1 1 1 3 3 3 1 11 11 11 11 14 14 14 14 13 11 11 11 11 11 11 11 11 13 2 2 2 8 7 1 1 2 2 1 2 2 1 8 7 4 4 4 4 4 4 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 | // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl> * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org> * Copyright (C) 2004, 05 Thomas Osterried DL9SAU <thomas@x-berg.in-berlin.de> */ #include <linux/module.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/crc16.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/major.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/jiffies.h> #include <linux/refcount.h> #include <net/ax25.h> #define AX_MTU 236 /* some arch define END as assembly function ending, just undef it */ #undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ #define ESC_END 0334 /* ESC ESC_END means END 'data' */ #define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */ struct mkiss { struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ /* These are pointers to the malloc()ed frame buffers. */ spinlock_t buflock;/* lock for rbuf and xbuf */ unsigned char *rbuff; /* receiver buffer */ int rcount; /* received chars counter */ unsigned char *xbuff; /* transmitter buffer */ unsigned char *xhead; /* pointer to next byte to XMIT */ int xleft; /* bytes left in XMIT queue */ /* Detailed SLIP statistics. */ int mtu; /* Our mtu (to spot changes!) */ int buffsize; /* Max buffers sizes */ unsigned long flags; /* Flag values/ mode etc */ /* long req'd: used by set_bit --RR */ #define AXF_INUSE 0 /* Channel in use */ #define AXF_ESCAPE 1 /* ESC received */ #define AXF_ERROR 2 /* Parity, etc. error */ #define AXF_KEEPTEST 3 /* Keepalive test flag */ #define AXF_OUTWAIT 4 /* is outpacket was flag */ int mode; int crcmode; /* MW: for FlexNet, SMACK etc. */ int crcauto; /* CRC auto mode */ #define CRC_MODE_NONE 0 #define CRC_MODE_FLEX 1 #define CRC_MODE_SMACK 2 #define CRC_MODE_FLEX_TEST 3 #define CRC_MODE_SMACK_TEST 4 refcount_t refcnt; struct completion dead; }; /*---------------------------------------------------------------------------*/ static const unsigned short crc_flex_table[] = { 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38, 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770, 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9, 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1, 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a, 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672, 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb, 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3, 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c, 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574, 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd, 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5, 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e, 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476, 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf, 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7, 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30, 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378, 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1, 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9, 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32, 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a, 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3, 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb, 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34, 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c, 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5, 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd, 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36, 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e, 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7, 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff }; static unsigned short calc_crc_flex(unsigned char *cp, int size) { unsigned short crc = 0xffff; while (size--) crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff]; return crc; } static int check_crc_flex(unsigned char *cp, int size) { unsigned short crc = 0xffff; if (size < 3) return -1; while (size--) crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff]; if ((crc & 0xffff) != 0x7070) return -1; return 0; } static int check_crc_16(unsigned char *cp, int size) { unsigned short crc = 0x0000; if (size < 3) return -1; crc = crc16(0, cp, size); if (crc != 0x0000) return -1; return 0; } /* * Standard encapsulation */ static int kiss_esc(unsigned char *s, unsigned char *d, int len) { unsigned char *ptr = d; unsigned char c; /* * Send an initial END character to flush out any data that may have * accumulated in the receiver due to line noise. */ *ptr++ = END; while (len-- > 0) { switch (c = *s++) { case END: *ptr++ = ESC; *ptr++ = ESC_END; break; case ESC: *ptr++ = ESC; *ptr++ = ESC_ESC; break; default: *ptr++ = c; break; } } *ptr++ = END; return ptr - d; } /* * MW: * OK its ugly, but tell me a better solution without copying the * packet to a temporary buffer :-) */ static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc, int len) { unsigned char *ptr = d; unsigned char c=0; *ptr++ = END; while (len > 0) { if (len > 2) c = *s++; else if (len > 1) c = crc >> 8; else c = crc & 0xff; len--; switch (c) { case END: *ptr++ = ESC; *ptr++ = ESC_END; break; case ESC: *ptr++ = ESC; *ptr++ = ESC_ESC; break; default: *ptr++ = c; break; } } *ptr++ = END; return ptr - d; } /* Send one completely decapsulated AX.25 packet to the AX.25 layer. */ static void ax_bump(struct mkiss *ax) { struct sk_buff *skb; int count; spin_lock_bh(&ax->buflock); if (ax->rbuff[0] > 0x0f) { if (ax->rbuff[0] & 0x80) { if (check_crc_16(ax->rbuff, ax->rcount) < 0) { ax->dev->stats.rx_errors++; spin_unlock_bh(&ax->buflock); return; } if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { printk(KERN_INFO "mkiss: %s: Switching to crc-smack\n", ax->dev->name); ax->crcmode = CRC_MODE_SMACK; } ax->rcount -= 2; *ax->rbuff &= ~0x80; } else if (ax->rbuff[0] & 0x20) { if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { ax->dev->stats.rx_errors++; spin_unlock_bh(&ax->buflock); return; } if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { printk(KERN_INFO "mkiss: %s: Switching to crc-flexnet\n", ax->dev->name); ax->crcmode = CRC_MODE_FLEX; } ax->rcount -= 2; /* * dl9sau bugfix: the trailling two bytes flexnet crc * will not be passed to the kernel. thus we have to * correct the kissparm signature, because it indicates * a crc but there's none */ *ax->rbuff &= ~0x20; } } count = ax->rcount; if ((skb = dev_alloc_skb(count)) == NULL) { printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", ax->dev->name); ax->dev->stats.rx_dropped++; spin_unlock_bh(&ax->buflock); return; } skb_put_data(skb, ax->rbuff, count); skb->protocol = ax25_type_trans(skb, ax->dev); netif_rx(skb); ax->dev->stats.rx_packets++; ax->dev->stats.rx_bytes += count; spin_unlock_bh(&ax->buflock); } static void kiss_unesc(struct mkiss *ax, unsigned char s) { switch (s) { case END: /* drop keeptest bit = VSV */ if (test_bit(AXF_KEEPTEST, &ax->flags)) clear_bit(AXF_KEEPTEST, &ax->flags); if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2)) ax_bump(ax); clear_bit(AXF_ESCAPE, &ax->flags); ax->rcount = 0; return; case ESC: set_bit(AXF_ESCAPE, &ax->flags); return; case ESC_ESC: if (test_and_clear_bit(AXF_ESCAPE, &ax->flags)) s = ESC; break; case ESC_END: if (test_and_clear_bit(AXF_ESCAPE, &ax->flags)) s = END; break; } spin_lock_bh(&ax->buflock); if (!test_bit(AXF_ERROR, &ax->flags)) { if (ax->rcount < ax->buffsize) { ax->rbuff[ax->rcount++] = s; spin_unlock_bh(&ax->buflock); return; } ax->dev->stats.rx_over_errors++; set_bit(AXF_ERROR, &ax->flags); } spin_unlock_bh(&ax->buflock); } static int ax_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr_ax25 *sa = addr; netif_tx_lock_bh(dev); netif_addr_lock(dev); __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); return 0; } /*---------------------------------------------------------------------------*/ static void ax_changedmtu(struct mkiss *ax) { struct net_device *dev = ax->dev; unsigned char *xbuff, *rbuff, *oxbuff, *orbuff; int len; len = dev->mtu * 2; /* * allow for arrival of larger UDP packets, even if we say not to * also fixes a bug in which SunOS sends 512-byte packets even with * an MSS of 128 */ if (len < 576 * 2) len = 576 * 2; xbuff = kmalloc(len + 4, GFP_ATOMIC); rbuff = kmalloc(len + 4, GFP_ATOMIC); if (xbuff == NULL || rbuff == NULL) { printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, " "MTU change cancelled.\n", ax->dev->name); dev->mtu = ax->mtu; kfree(xbuff); kfree(rbuff); return; } spin_lock_bh(&ax->buflock); oxbuff = ax->xbuff; ax->xbuff = xbuff; orbuff = ax->rbuff; ax->rbuff = rbuff; if (ax->xleft) { if (ax->xleft <= len) { memcpy(ax->xbuff, ax->xhead, ax->xleft); } else { ax->xleft = 0; dev->stats.tx_dropped++; } } ax->xhead = ax->xbuff; if (ax->rcount) { if (ax->rcount <= len) { memcpy(ax->rbuff, orbuff, ax->rcount); } else { ax->rcount = 0; dev->stats.rx_over_errors++; set_bit(AXF_ERROR, &ax->flags); } } ax->mtu = dev->mtu + 73; ax->buffsize = len; spin_unlock_bh(&ax->buflock); kfree(oxbuff); kfree(orbuff); } /* Encapsulate one AX.25 packet and stuff into a TTY queue. */ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) { struct mkiss *ax = netdev_priv(dev); unsigned char *p; int actual, count; if (ax->mtu != ax->dev->mtu + 73) /* Someone has been ifconfigging */ ax_changedmtu(ax); if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); dev->stats.tx_dropped++; netif_start_queue(dev); return; } p = icp; spin_lock_bh(&ax->buflock); if ((*p & 0x0f) != 0) { /* Configuration Command (kissparms(1). * Protocol spec says: never append CRC. * This fixes a very old bug in the linux * kiss driver. -- dl9sau */ switch (*p & 0xff) { case 0x85: /* command from userspace especially for us, * not for delivery to the tnc */ if (len > 1) { int cmd = (p[1] & 0xff); switch(cmd) { case 3: ax->crcmode = CRC_MODE_SMACK; break; case 2: ax->crcmode = CRC_MODE_FLEX; break; case 1: ax->crcmode = CRC_MODE_NONE; break; case 0: default: ax->crcmode = CRC_MODE_SMACK_TEST; cmd = 0; } ax->crcauto = (cmd ? 0 : 1); printk(KERN_INFO "mkiss: %s: crc mode set to %d\n", ax->dev->name, cmd); } spin_unlock_bh(&ax->buflock); netif_start_queue(dev); return; default: count = kiss_esc(p, ax->xbuff, len); } } else { unsigned short crc; switch (ax->crcmode) { case CRC_MODE_SMACK_TEST: ax->crcmode = CRC_MODE_FLEX_TEST; printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name); fallthrough; case CRC_MODE_SMACK: *p |= 0x80; crc = swab16(crc16(0, p, len)); count = kiss_esc_crc(p, ax->xbuff, crc, len+2); break; case CRC_MODE_FLEX_TEST: ax->crcmode = CRC_MODE_NONE; printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name); fallthrough; case CRC_MODE_FLEX: *p |= 0x20; crc = calc_crc_flex(p, len); count = kiss_esc_crc(p, ax->xbuff, crc, len+2); break; default: count = kiss_esc(p, ax->xbuff, len); } } spin_unlock_bh(&ax->buflock); set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); actual = ax->tty->ops->write(ax->tty, ax->xbuff, count); dev->stats.tx_packets++; dev->stats.tx_bytes += actual; netif_trans_update(ax->dev); ax->xleft = count - actual; ax->xhead = ax->xbuff + actual; } /* Encapsulate an AX.25 packet and kick it into a TTY queue. */ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev) { struct mkiss *ax = netdev_priv(dev); if (skb->protocol == htons(ETH_P_IP)) return ax25_ip_xmit(skb); if (!netif_running(dev)) { printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name); return NETDEV_TX_BUSY; } if (netif_queue_stopped(dev)) { /* * May be we must check transmitter timeout here ? * 14 Oct 1994 Dmitry Gorodchanin. */ if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) { /* 20 sec timeout not reached */ return NETDEV_TX_BUSY; } printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, (tty_chars_in_buffer(ax->tty) || ax->xleft) ? "bad line quality" : "driver error"); ax->xleft = 0; clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); netif_start_queue(dev); } /* We were not busy, so we are now... :-) */ netif_stop_queue(dev); ax_encaps(dev, skb->data, skb->len); kfree_skb(skb); return NETDEV_TX_OK; } static int ax_open_dev(struct net_device *dev) { struct mkiss *ax = netdev_priv(dev); if (ax->tty == NULL) return -ENODEV; return 0; } /* Open the low-level part of the AX25 channel. Easy! */ static int ax_open(struct net_device *dev) { struct mkiss *ax = netdev_priv(dev); unsigned long len; if (ax->tty == NULL) return -ENODEV; /* * Allocate the frame buffers: * * rbuff Receive buffer. * xbuff Transmit buffer. */ len = dev->mtu * 2; /* * allow for arrival of larger UDP packets, even if we say not to * also fixes a bug in which SunOS sends 512-byte packets even with * an MSS of 128 */ if (len < 576 * 2) len = 576 * 2; if ((ax->rbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL) goto norbuff; if ((ax->xbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL) goto noxbuff; ax->mtu = dev->mtu + 73; ax->buffsize = len; ax->rcount = 0; ax->xleft = 0; ax->flags &= (1 << AXF_INUSE); /* Clear ESCAPE & ERROR flags */ spin_lock_init(&ax->buflock); return 0; noxbuff: kfree(ax->rbuff); norbuff: return -ENOMEM; } /* Close the low-level part of the AX25 channel. Easy! */ static int ax_close(struct net_device *dev) { struct mkiss *ax = netdev_priv(dev); if (ax->tty) clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); netif_stop_queue(dev); return 0; } static const struct net_device_ops ax_netdev_ops = { .ndo_open = ax_open_dev, .ndo_stop = ax_close, .ndo_start_xmit = ax_xmit, .ndo_set_mac_address = ax_set_mac_address, }; static void ax_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->mtu = AX_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; dev->header_ops = &ax25_header_ops; dev->netdev_ops = &ax_netdev_ops; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); dev_addr_set(dev, (u8 *)&ax25_defaddr); dev->flags = IFF_BROADCAST | IFF_MULTICAST; } /* * We have a potential race on dereferencing tty->disc_data, because the tty * layer provides no locking at all - thus one cpu could be running * sixpack_receive_buf while another calls sixpack_close, which zeroes * tty->disc_data and frees the memory that sixpack_receive_buf is using. The * best way to fix this is to use a rwlock in the tty struct, but for now we * use a single global rwlock for all ttys in ppp line discipline. */ static DEFINE_RWLOCK(disc_data_lock); static struct mkiss *mkiss_get(struct tty_struct *tty) { struct mkiss *ax; read_lock(&disc_data_lock); ax = tty->disc_data; if (ax) refcount_inc(&ax->refcnt); read_unlock(&disc_data_lock); return ax; } static void mkiss_put(struct mkiss *ax) { if (refcount_dec_and_test(&ax->refcnt)) complete(&ax->dead); } static int crc_force = 0; /* Can be overridden with insmod */ static int mkiss_open(struct tty_struct *tty) { struct net_device *dev; struct mkiss *ax; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; dev = alloc_netdev(sizeof(struct mkiss), "ax%d", NET_NAME_UNKNOWN, ax_setup); if (!dev) { err = -ENOMEM; goto out; } ax = netdev_priv(dev); ax->dev = dev; spin_lock_init(&ax->buflock); refcount_set(&ax->refcnt, 1); init_completion(&ax->dead); ax->tty = tty; tty->disc_data = ax; tty->receive_room = 65535; tty_driver_flush_buffer(tty); /* Restore default settings */ dev->type = ARPHRD_AX25; /* Perform the low-level AX25 initialization. */ err = ax_open(ax->dev); if (err) goto out_free_netdev; err = register_netdev(dev); if (err) goto out_free_buffers; /* after register_netdev() - because else printk smashes the kernel */ switch (crc_force) { case 3: ax->crcmode = CRC_MODE_SMACK; printk(KERN_INFO "mkiss: %s: crc mode smack forced.\n", ax->dev->name); break; case 2: ax->crcmode = CRC_MODE_FLEX; printk(KERN_INFO "mkiss: %s: crc mode flexnet forced.\n", ax->dev->name); break; case 1: ax->crcmode = CRC_MODE_NONE; printk(KERN_INFO "mkiss: %s: crc mode disabled.\n", ax->dev->name); break; case 0: default: crc_force = 0; printk(KERN_INFO "mkiss: %s: crc mode is auto.\n", ax->dev->name); ax->crcmode = CRC_MODE_SMACK_TEST; } ax->crcauto = (crc_force ? 0 : 1); netif_start_queue(dev); /* Done. We have linked the TTY line to a channel. */ return 0; out_free_buffers: kfree(ax->rbuff); kfree(ax->xbuff); out_free_netdev: free_netdev(dev); out: return err; } static void mkiss_close(struct tty_struct *tty) { struct mkiss *ax; write_lock_irq(&disc_data_lock); ax = tty->disc_data; tty->disc_data = NULL; write_unlock_irq(&disc_data_lock); if (!ax) return; /* * We have now ensured that nobody can start using ap from now on, but * we have to wait for all existing users to finish. */ if (!refcount_dec_and_test(&ax->refcnt)) wait_for_completion(&ax->dead); /* * Halt the transmit queue so that a new transmit cannot scribble * on our buffers */ netif_stop_queue(ax->dev); unregister_netdev(ax->dev); /* Free all AX25 frame buffers after unreg. */ kfree(ax->rbuff); kfree(ax->xbuff); ax->tty = NULL; free_netdev(ax->dev); } /* Perform I/O control on an active ax25 channel. */ static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct mkiss *ax = mkiss_get(tty); struct net_device *dev; unsigned int tmp, err; /* First make sure we're connected. */ if (ax == NULL) return -ENXIO; dev = ax->dev; switch (cmd) { case SIOCGIFNAME: err = copy_to_user((void __user *) arg, ax->dev->name, strlen(ax->dev->name) + 1) ? -EFAULT : 0; break; case SIOCGIFENCAP: err = put_user(4, (int __user *) arg); break; case SIOCSIFENCAP: if (get_user(tmp, (int __user *) arg)) { err = -EFAULT; break; } ax->mode = tmp; dev->addr_len = AX25_ADDR_LEN; dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3; dev->type = ARPHRD_AX25; err = 0; break; case SIOCSIFHWADDR: { char addr[AX25_ADDR_LEN]; if (copy_from_user(&addr, (void __user *) arg, AX25_ADDR_LEN)) { err = -EFAULT; break; } netif_tx_lock_bh(dev); __dev_addr_set(dev, addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; break; } default: err = -ENOIOCTLCMD; } mkiss_put(ax); return err; } /* * Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of data has been received, which can now be decapsulated * and sent on to the AX.25 layer for further processing. */ static void mkiss_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct mkiss *ax = mkiss_get(tty); if (!ax) return; /* * Argh! mtu change time! - costs us the packet part received * at the change */ if (ax->mtu != ax->dev->mtu + 73) ax_changedmtu(ax); /* Read the characters out of the buffer */ while (count--) { if (fp != NULL && *fp++) { if (!test_and_set_bit(AXF_ERROR, &ax->flags)) ax->dev->stats.rx_errors++; cp++; continue; } kiss_unesc(ax, *cp++); } mkiss_put(ax); tty_unthrottle(tty); } /* * Called by the driver when there's room for more data. If we have * more packets to send, we send them here. */ static void mkiss_write_wakeup(struct tty_struct *tty) { struct mkiss *ax = mkiss_get(tty); int actual; if (!ax) return; if (ax->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); netif_wake_queue(ax->dev); goto out; } actual = tty->ops->write(tty, ax->xhead, ax->xleft); ax->xleft -= actual; ax->xhead += actual; out: mkiss_put(ax); } static struct tty_ldisc_ops ax_ldisc = { .owner = THIS_MODULE, .num = N_AX25, .name = "mkiss", .open = mkiss_open, .close = mkiss_close, .ioctl = mkiss_ioctl, .receive_buf = mkiss_receive_buf, .write_wakeup = mkiss_write_wakeup }; static const char banner[] __initconst = KERN_INFO \ "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n"; static const char msg_regfail[] __initconst = KERN_ERR \ "mkiss: can't register line discipline (err = %d)\n"; static int __init mkiss_init_driver(void) { int status; printk(banner); status = tty_register_ldisc(&ax_ldisc); if (status != 0) printk(msg_regfail, status); return status; } static void __exit mkiss_exit_driver(void) { tty_unregister_ldisc(&ax_ldisc); } MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>"); MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs"); module_param(crc_force, int, 0); MODULE_PARM_DESC(crc_force, "crc [0 = auto | 1 = none | 2 = flexnet | 3 = smack]"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_AX25); module_init(mkiss_init_driver); module_exit(mkiss_exit_driver); |
| 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 | // SPDX-License-Identifier: GPL-2.0+ /* * Belkin USB Serial Adapter Driver * * Copyright (C) 2000 William Greathouse (wgreathouse@smva.com) * Copyright (C) 2000-2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com) * * This program is largely derived from work by the linux-usb group * and associated source files. Please see the usb/serial files for * individual credits and copyrights. * * See Documentation/usb/usb-serial.rst for more information on using this * driver * * TODO: * -- Add true modem control line query capability. Currently we track the * states reported by the interrupt and the states we request. * -- Add support for flush commands */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "belkin_sa.h" #define DRIVER_AUTHOR "William Greathouse <wgreathouse@smva.com>" #define DRIVER_DESC "USB Belkin Serial converter driver" /* function prototypes for a Belkin USB Serial Adapter F5U103 */ static int belkin_sa_port_probe(struct usb_serial_port *port); static void belkin_sa_port_remove(struct usb_serial_port *port); static int belkin_sa_open(struct tty_struct *tty, struct usb_serial_port *port); static void belkin_sa_close(struct usb_serial_port *port); static void belkin_sa_read_int_callback(struct urb *urb); static void belkin_sa_process_read_urb(struct urb *urb); static void belkin_sa_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state); static int belkin_sa_tiocmget(struct tty_struct *tty); static int belkin_sa_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static const struct usb_device_id id_table[] = { { USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) }, { USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) }, { USB_DEVICE(PERACOM_VID, PERACOM_PID) }, { USB_DEVICE(GOHUBS_VID, GOHUBS_PID) }, { USB_DEVICE(GOHUBS_VID, HANDYLINK_PID) }, { USB_DEVICE(BELKIN_DOCKSTATION_VID, BELKIN_DOCKSTATION_PID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); /* All of the device info needed for the serial converters */ static struct usb_serial_driver belkin_device = { .driver = { .name = "belkin", }, .description = "Belkin / Peracom / GoHubs USB Serial Adapter", .id_table = id_table, .num_ports = 1, .open = belkin_sa_open, .close = belkin_sa_close, .read_int_callback = belkin_sa_read_int_callback, .process_read_urb = belkin_sa_process_read_urb, .set_termios = belkin_sa_set_termios, .break_ctl = belkin_sa_break_ctl, .tiocmget = belkin_sa_tiocmget, .tiocmset = belkin_sa_tiocmset, .port_probe = belkin_sa_port_probe, .port_remove = belkin_sa_port_remove, }; static struct usb_serial_driver * const serial_drivers[] = { &belkin_device, NULL }; struct belkin_sa_private { spinlock_t lock; unsigned long control_state; unsigned char last_lsr; unsigned char last_msr; int bad_flow_control; }; /* * *************************************************************************** * Belkin USB Serial Adapter F5U103 specific driver functions * *************************************************************************** */ #define WDR_TIMEOUT 5000 /* default urb timeout */ /* assumes that struct usb_serial *serial is available */ #define BSA_USB_CMD(c, v) usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), \ (c), BELKIN_SA_SET_REQUEST_TYPE, \ (v), 0, NULL, 0, WDR_TIMEOUT) static int belkin_sa_port_probe(struct usb_serial_port *port) { struct usb_device *dev = port->serial->dev; struct belkin_sa_private *priv; priv = kmalloc(sizeof(struct belkin_sa_private), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); priv->control_state = 0; priv->last_lsr = 0; priv->last_msr = 0; /* see comments at top of file */ priv->bad_flow_control = (le16_to_cpu(dev->descriptor.bcdDevice) <= 0x0206) ? 1 : 0; dev_info(&dev->dev, "bcdDevice: %04x, bfc: %d\n", le16_to_cpu(dev->descriptor.bcdDevice), priv->bad_flow_control); usb_set_serial_port_data(port, priv); return 0; } static void belkin_sa_port_remove(struct usb_serial_port *port) { struct belkin_sa_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); } static int belkin_sa_open(struct tty_struct *tty, struct usb_serial_port *port) { int retval; retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (retval) { dev_err(&port->dev, "usb_submit_urb(read int) failed\n"); return retval; } retval = usb_serial_generic_open(tty, port); if (retval) usb_kill_urb(port->interrupt_in_urb); return retval; } static void belkin_sa_close(struct usb_serial_port *port) { usb_serial_generic_close(port); usb_kill_urb(port->interrupt_in_urb); } static void belkin_sa_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct belkin_sa_private *priv; unsigned char *data = urb->transfer_buffer; int retval; int status = urb->status; unsigned long flags; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); /* Handle known interrupt data */ /* ignore data[0] and data[1] */ priv = usb_get_serial_port_data(port); spin_lock_irqsave(&priv->lock, flags); priv->last_msr = data[BELKIN_SA_MSR_INDEX]; /* Record Control Line states */ if (priv->last_msr & BELKIN_SA_MSR_DSR) priv->control_state |= TIOCM_DSR; else priv->control_state &= ~TIOCM_DSR; if (priv->last_msr & BELKIN_SA_MSR_CTS) priv->control_state |= TIOCM_CTS; else priv->control_state &= ~TIOCM_CTS; if (priv->last_msr & BELKIN_SA_MSR_RI) priv->control_state |= TIOCM_RI; else priv->control_state &= ~TIOCM_RI; if (priv->last_msr & BELKIN_SA_MSR_CD) priv->control_state |= TIOCM_CD; else priv->control_state &= ~TIOCM_CD; priv->last_lsr = data[BELKIN_SA_LSR_INDEX]; spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&port->dev, "%s - usb_submit_urb failed with " "result %d\n", __func__, retval); } static void belkin_sa_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; unsigned long flags; unsigned char status; char tty_flag; /* Update line status */ tty_flag = TTY_NORMAL; spin_lock_irqsave(&priv->lock, flags); status = priv->last_lsr; priv->last_lsr &= ~BELKIN_SA_LSR_ERR; spin_unlock_irqrestore(&priv->lock, flags); if (!urb->actual_length) return; if (status & BELKIN_SA_LSR_ERR) { /* Break takes precedence over parity, which takes precedence * over framing errors. */ if (status & BELKIN_SA_LSR_BI) tty_flag = TTY_BREAK; else if (status & BELKIN_SA_LSR_PE) tty_flag = TTY_PARITY; else if (status & BELKIN_SA_LSR_FE) tty_flag = TTY_FRAME; dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag); /* Overrun is special, not associated with a char. */ if (status & BELKIN_SA_LSR_OE) tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag, urb->actual_length); tty_flip_buffer_push(&port->port); } static void belkin_sa_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned int iflag; unsigned int cflag; unsigned int old_iflag = 0; unsigned int old_cflag = 0; __u16 urb_value = 0; /* Will hold the new flags */ unsigned long flags; unsigned long control_state; int bad_flow_control; speed_t baud; struct ktermios *termios = &tty->termios; iflag = termios->c_iflag; cflag = termios->c_cflag; termios->c_cflag &= ~CMSPAR; /* get a local copy of the current port settings */ spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; bad_flow_control = priv->bad_flow_control; spin_unlock_irqrestore(&priv->lock, flags); old_iflag = old_termios->c_iflag; old_cflag = old_termios->c_cflag; /* Set the baud rate */ if ((cflag & CBAUD) != (old_cflag & CBAUD)) { /* reassert DTR and (maybe) RTS on transition from B0 */ if ((old_cflag & CBAUD) == B0) { control_state |= (TIOCM_DTR|TIOCM_RTS); if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 1) < 0) dev_err(&port->dev, "Set DTR error\n"); /* don't set RTS if using hardware flow control */ if (!(old_cflag & CRTSCTS)) if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST , 1) < 0) dev_err(&port->dev, "Set RTS error\n"); } } baud = tty_get_baud_rate(tty); if (baud) { urb_value = BELKIN_SA_BAUD(baud); /* Clip to maximum speed */ if (urb_value == 0) urb_value = 1; /* Turn it back into a resulting real baud rate */ baud = BELKIN_SA_BAUD(urb_value); /* Report the actual baud rate back to the caller */ tty_encode_baud_rate(tty, baud, baud); if (BSA_USB_CMD(BELKIN_SA_SET_BAUDRATE_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set baudrate error\n"); } else { /* Disable flow control */ if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, BELKIN_SA_FLOW_NONE) < 0) dev_err(&port->dev, "Disable flowcontrol error\n"); /* Drop RTS and DTR */ control_state &= ~(TIOCM_DTR | TIOCM_RTS); if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 0) < 0) dev_err(&port->dev, "DTR LOW error\n"); if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 0) < 0) dev_err(&port->dev, "RTS LOW error\n"); } /* set the parity */ if ((cflag ^ old_cflag) & (PARENB | PARODD)) { if (cflag & PARENB) urb_value = (cflag & PARODD) ? BELKIN_SA_PARITY_ODD : BELKIN_SA_PARITY_EVEN; else urb_value = BELKIN_SA_PARITY_NONE; if (BSA_USB_CMD(BELKIN_SA_SET_PARITY_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set parity error\n"); } /* set the number of data bits */ if ((cflag & CSIZE) != (old_cflag & CSIZE)) { urb_value = BELKIN_SA_DATA_BITS(tty_get_char_size(cflag)); if (BSA_USB_CMD(BELKIN_SA_SET_DATA_BITS_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set data bits error\n"); } /* set the number of stop bits */ if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) { urb_value = (cflag & CSTOPB) ? BELKIN_SA_STOP_BITS(2) : BELKIN_SA_STOP_BITS(1); if (BSA_USB_CMD(BELKIN_SA_SET_STOP_BITS_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set stop bits error\n"); } /* Set flow control */ if (((iflag ^ old_iflag) & (IXOFF | IXON)) || ((cflag ^ old_cflag) & CRTSCTS)) { urb_value = 0; if ((iflag & IXOFF) || (iflag & IXON)) urb_value |= (BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON); else urb_value &= ~(BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON); if (cflag & CRTSCTS) urb_value |= (BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS); else urb_value &= ~(BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS); if (bad_flow_control) urb_value &= ~(BELKIN_SA_FLOW_IRTS); if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set flow control error\n"); } /* save off the modified port settings */ spin_lock_irqsave(&priv->lock, flags); priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); } static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; int ret; ret = BSA_USB_CMD(BELKIN_SA_SET_BREAK_REQUEST, break_state ? 1 : 0); if (ret < 0) { dev_err(&port->dev, "Set break_ctl %d\n", break_state); return ret; } return 0; } static int belkin_sa_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned long control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); return control_state; } static int belkin_sa_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned long control_state; unsigned long flags; int retval; int rts = 0; int dtr = 0; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; if (set & TIOCM_RTS) { control_state |= TIOCM_RTS; rts = 1; } if (set & TIOCM_DTR) { control_state |= TIOCM_DTR; dtr = 1; } if (clear & TIOCM_RTS) { control_state &= ~TIOCM_RTS; rts = 0; } if (clear & TIOCM_DTR) { control_state &= ~TIOCM_DTR; dtr = 0; } priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); retval = BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, rts); if (retval < 0) { dev_err(&port->dev, "Set RTS error %d\n", retval); goto exit; } retval = BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, dtr); if (retval < 0) { dev_err(&port->dev, "Set DTR error %d\n", retval); goto exit; } exit: return retval; } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
| 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2023 Thomas Weißschuh <linux@weissschuh.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/completion.h> #include <linux/device.h> #include <linux/hwmon.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/usb.h> #define DRIVER_NAME "powerz" #define POWERZ_EP_CMD_OUT 0x01 #define POWERZ_EP_DATA_IN 0x81 struct powerz_sensor_data { u8 _unknown_1[8]; __le32 V_bus; __le32 I_bus; __le32 V_bus_avg; __le32 I_bus_avg; u8 _unknown_2[8]; u8 temp[2]; __le16 V_cc1; __le16 V_cc2; __le16 V_dp; __le16 V_dm; __le16 V_dd; u8 _unknown_3[4]; } __packed; struct powerz_priv { char transfer_buffer[64]; /* first member to satisfy DMA alignment */ struct mutex mutex; struct completion completion; struct urb *urb; int status; }; static const struct hwmon_channel_info *const powerz_info[] = { HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL | HWMON_I_AVERAGE, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL | HWMON_C_AVERAGE), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL), NULL }; static int powerz_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { if (type == hwmon_curr && attr == hwmon_curr_label) { *str = "IBUS"; } else if (type == hwmon_in && attr == hwmon_in_label) { if (channel == 0) *str = "VBUS"; else if (channel == 1) *str = "VCC1"; else if (channel == 2) *str = "VCC2"; else if (channel == 3) *str = "VDP"; else if (channel == 4) *str = "VDM"; else if (channel == 5) *str = "VDD"; else return -EOPNOTSUPP; } else if (type == hwmon_temp && attr == hwmon_temp_label) { *str = "TEMP"; } else { return -EOPNOTSUPP; } return 0; } static void powerz_usb_data_complete(struct urb *urb) { struct powerz_priv *priv = urb->context; complete(&priv->completion); } static void powerz_usb_cmd_complete(struct urb *urb) { struct powerz_priv *priv = urb->context; usb_fill_bulk_urb(urb, urb->dev, usb_rcvbulkpipe(urb->dev, POWERZ_EP_DATA_IN), priv->transfer_buffer, sizeof(priv->transfer_buffer), powerz_usb_data_complete, priv); priv->status = usb_submit_urb(urb, GFP_ATOMIC); if (priv->status) complete(&priv->completion); } static int powerz_read_data(struct usb_device *udev, struct powerz_priv *priv) { int ret; priv->status = -ETIMEDOUT; reinit_completion(&priv->completion); priv->transfer_buffer[0] = 0x0c; priv->transfer_buffer[1] = 0x00; priv->transfer_buffer[2] = 0x02; priv->transfer_buffer[3] = 0x00; usb_fill_bulk_urb(priv->urb, udev, usb_sndbulkpipe(udev, POWERZ_EP_CMD_OUT), priv->transfer_buffer, 4, powerz_usb_cmd_complete, priv); ret = usb_submit_urb(priv->urb, GFP_KERNEL); if (ret) return ret; if (!wait_for_completion_interruptible_timeout (&priv->completion, msecs_to_jiffies(5))) { usb_kill_urb(priv->urb); return -EIO; } if (priv->urb->actual_length < sizeof(struct powerz_sensor_data)) return -EIO; return priv->status; } static int powerz_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct usb_interface *intf = to_usb_interface(dev->parent); struct usb_device *udev = interface_to_usbdev(intf); struct powerz_priv *priv = usb_get_intfdata(intf); struct powerz_sensor_data *data; int ret; if (!priv) return -EIO; /* disconnected */ mutex_lock(&priv->mutex); ret = powerz_read_data(udev, priv); if (ret) goto out; data = (struct powerz_sensor_data *)priv->transfer_buffer; if (type == hwmon_curr) { if (attr == hwmon_curr_input) *val = ((s32)le32_to_cpu(data->I_bus)) / 1000; else if (attr == hwmon_curr_average) *val = ((s32)le32_to_cpu(data->I_bus_avg)) / 1000; else ret = -EOPNOTSUPP; } else if (type == hwmon_in) { if (attr == hwmon_in_input) { if (channel == 0) *val = le32_to_cpu(data->V_bus) / 1000; else if (channel == 1) *val = le16_to_cpu(data->V_cc1) / 10; else if (channel == 2) *val = le16_to_cpu(data->V_cc2) / 10; else if (channel == 3) *val = le16_to_cpu(data->V_dp) / 10; else if (channel == 4) *val = le16_to_cpu(data->V_dm) / 10; else if (channel == 5) *val = le16_to_cpu(data->V_dd) / 10; else ret = -EOPNOTSUPP; } else if (attr == hwmon_in_average && channel == 0) { *val = le32_to_cpu(data->V_bus_avg) / 1000; } else { ret = -EOPNOTSUPP; } } else if (type == hwmon_temp && attr == hwmon_temp_input) { *val = data->temp[1] * 2000 + data->temp[0] * 1000 / 128; } else { ret = -EOPNOTSUPP; } out: mutex_unlock(&priv->mutex); return ret; } static const struct hwmon_ops powerz_hwmon_ops = { .visible = 0444, .read = powerz_read, .read_string = powerz_read_string, }; static const struct hwmon_chip_info powerz_chip_info = { .ops = &powerz_hwmon_ops, .info = powerz_info, }; static int powerz_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct powerz_priv *priv; struct device *hwmon_dev; struct device *parent; parent = &intf->dev; priv = devm_kzalloc(parent, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!priv->urb) return -ENOMEM; mutex_init(&priv->mutex); init_completion(&priv->completion); hwmon_dev = devm_hwmon_device_register_with_info(parent, DRIVER_NAME, priv, &powerz_chip_info, NULL); if (IS_ERR(hwmon_dev)) { usb_free_urb(priv->urb); return PTR_ERR(hwmon_dev); } usb_set_intfdata(intf, priv); return 0; } static void powerz_disconnect(struct usb_interface *intf) { struct powerz_priv *priv = usb_get_intfdata(intf); mutex_lock(&priv->mutex); usb_kill_urb(priv->urb); usb_free_urb(priv->urb); mutex_unlock(&priv->mutex); } static const struct usb_device_id powerz_id_table[] = { { USB_DEVICE_INTERFACE_NUMBER(0x5FC9, 0x0061, 0x00) }, /* ChargerLAB POWER-Z KM002C */ { USB_DEVICE_INTERFACE_NUMBER(0x5FC9, 0x0063, 0x00) }, /* ChargerLAB POWER-Z KM003C */ { } }; MODULE_DEVICE_TABLE(usb, powerz_id_table); static struct usb_driver powerz_driver = { .name = DRIVER_NAME, .id_table = powerz_id_table, .probe = powerz_probe, .disconnect = powerz_disconnect, }; module_usb_driver(powerz_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net>"); MODULE_DESCRIPTION("ChargerLAB POWER-Z USB-C tester"); |
| 633 8 633 1 22 1 8 8 8 8 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_MSR_H #define _ASM_X86_MSR_H #include "msr-index.h" #ifndef __ASSEMBLER__ #include <asm/asm.h> #include <asm/errno.h> #include <asm/cpumask.h> #include <uapi/asm/msr.h> #include <asm/shared/msr.h> #include <linux/types.h> #include <linux/percpu.h> struct msr_info { u32 msr_no; struct msr reg; struct msr __percpu *msrs; int err; }; struct msr_regs_info { u32 *regs; int err; }; struct saved_msr { bool valid; struct msr_info info; }; struct saved_msrs { unsigned int num; struct saved_msr *array; }; /* * Be very careful with includes. This header is prone to include loops. */ #include <asm/atomic.h> #include <linux/tracepoint-defs.h> #ifdef CONFIG_TRACEPOINTS DECLARE_TRACEPOINT(read_msr); DECLARE_TRACEPOINT(write_msr); DECLARE_TRACEPOINT(rdpmc); extern void do_trace_write_msr(u32 msr, u64 val, int failed); extern void do_trace_read_msr(u32 msr, u64 val, int failed); extern void do_trace_rdpmc(u32 msr, u64 val, int failed); #else static inline void do_trace_write_msr(u32 msr, u64 val, int failed) {} static inline void do_trace_read_msr(u32 msr, u64 val, int failed) {} static inline void do_trace_rdpmc(u32 msr, u64 val, int failed) {} #endif /* * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR * accessors and should not have any tracing or other functionality piggybacking * on them - those are *purely* for accessing MSRs and nothing more. So don't even * think of extending them - you will be slapped with a stinking trout or a frozen * shark will reach you, wherever you are! You've been warned. */ static __always_inline u64 __rdmsr(u32 msr) { EAX_EDX_DECLARE_ARGS(val, low, high); asm volatile("1: rdmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR) : EAX_EDX_RET(val, low, high) : "c" (msr)); return EAX_EDX_VAL(val, low, high); } static __always_inline void __wrmsrq(u32 msr, u64 val) { asm volatile("1: wrmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)) : "memory"); } #define native_rdmsr(msr, val1, val2) \ do { \ u64 __val = __rdmsr((msr)); \ (void)((val1) = (u32)__val); \ (void)((val2) = (u32)(__val >> 32)); \ } while (0) static __always_inline u64 native_rdmsrq(u32 msr) { return __rdmsr(msr); } #define native_wrmsr(msr, low, high) \ __wrmsrq((msr), (u64)(high) << 32 | (low)) #define native_wrmsrq(msr, val) \ __wrmsrq((msr), (val)) static inline u64 native_read_msr(u32 msr) { u64 val; val = __rdmsr(msr); if (tracepoint_enabled(read_msr)) do_trace_read_msr(msr, val, 0); return val; } static inline int native_read_msr_safe(u32 msr, u64 *p) { int err; EAX_EDX_DECLARE_ARGS(val, low, high); asm volatile("1: rdmsr ; xor %[err],%[err]\n" "2:\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err]) : [err] "=r" (err), EAX_EDX_RET(val, low, high) : "c" (msr)); if (tracepoint_enabled(read_msr)) do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err); *p = EAX_EDX_VAL(val, low, high); return err; } /* Can be uninlined because referenced by paravirt */ static inline void notrace native_write_msr(u32 msr, u64 val) { native_wrmsrq(msr, val); if (tracepoint_enabled(write_msr)) do_trace_write_msr(msr, val, 0); } /* Can be uninlined because referenced by paravirt */ static inline int notrace native_write_msr_safe(u32 msr, u64 val) { int err; asm volatile("1: wrmsr ; xor %[err],%[err]\n" "2:\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err]) : [err] "=a" (err) : "c" (msr), "0" ((u32)val), "d" ((u32)(val >> 32)) : "memory"); if (tracepoint_enabled(write_msr)) do_trace_write_msr(msr, val, err); return err; } extern int rdmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]); static inline u64 native_read_pmc(int counter) { EAX_EDX_DECLARE_ARGS(val, low, high); asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); if (tracepoint_enabled(rdpmc)) do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); return EAX_EDX_VAL(val, low, high); } #ifdef CONFIG_PARAVIRT_XXL #include <asm/paravirt.h> #else #include <linux/errno.h> /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ #define rdmsr(msr, low, high) \ do { \ u64 __val = native_read_msr((msr)); \ (void)((low) = (u32)__val); \ (void)((high) = (u32)(__val >> 32)); \ } while (0) static inline void wrmsr(u32 msr, u32 low, u32 high) { native_write_msr(msr, (u64)high << 32 | low); } #define rdmsrq(msr, val) \ ((val) = native_read_msr((msr))) static inline void wrmsrq(u32 msr, u64 val) { native_write_msr(msr, val); } /* wrmsr with exception handling */ static inline int wrmsrq_safe(u32 msr, u64 val) { return native_write_msr_safe(msr, val); } /* rdmsr with exception handling */ #define rdmsr_safe(msr, low, high) \ ({ \ u64 __val; \ int __err = native_read_msr_safe((msr), &__val); \ (*low) = (u32)__val; \ (*high) = (u32)(__val >> 32); \ __err; \ }) static inline int rdmsrq_safe(u32 msr, u64 *p) { return native_read_msr_safe(msr, p); } static __always_inline u64 rdpmc(int counter) { return native_read_pmc(counter); } #endif /* !CONFIG_PARAVIRT_XXL */ /* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */ #define ASM_WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6) /* Non-serializing WRMSR, when available. Falls back to a serializing WRMSR. */ static __always_inline void wrmsrns(u32 msr, u64 val) { /* * WRMSR is 2 bytes. WRMSRNS is 3 bytes. Pad WRMSR with a redundant * DS prefix to avoid a trailing NOP. */ asm volatile("1: " ALTERNATIVE("ds wrmsr", ASM_WRMSRNS, X86_FEATURE_WRMSRNS) "2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32))); } /* * Dual u32 version of wrmsrq_safe(): */ static inline int wrmsr_safe(u32 msr, u32 low, u32 high) { return wrmsrq_safe(msr, (u64)high << 32 | low); } struct msr __percpu *msrs_alloc(void); void msrs_free(struct msr __percpu *msrs); int msr_set_bit(u32 msr, u8 bit); int msr_clear_bit(u32 msr, u8 bit); #ifdef CONFIG_SMP int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q); void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); #else /* CONFIG_SMP */ static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { rdmsr(msr_no, *l, *h); return 0; } static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { wrmsr(msr_no, l, h); return 0; } static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { rdmsrq(msr_no, *q); return 0; } static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { wrmsrq(msr_no, q); return 0; } static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr __percpu *msrs) { rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h)); } static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr __percpu *msrs) { wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h)); } static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { return rdmsr_safe(msr_no, l, h); } static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { return wrmsr_safe(msr_no, l, h); } static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { return rdmsrq_safe(msr_no, q); } static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { return wrmsrq_safe(msr_no, q); } static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { return rdmsr_safe_regs(regs); } static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { return wrmsr_safe_regs(regs); } #endif /* CONFIG_SMP */ /* Compatibility wrappers: */ #define rdmsrl(msr, val) rdmsrq(msr, val) #define wrmsrl(msr, val) wrmsrq(msr, val) #define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q) #endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_MSR_H */ |
| 12 35 22 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CEPH_DECODE_H #define __CEPH_DECODE_H #include <linux/err.h> #include <linux/bug.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/unaligned.h> #include <linux/ceph/types.h> /* * in all cases, * void **p pointer to position pointer * void *end pointer to end of buffer (last byte + 1) */ static inline u64 ceph_decode_64(void **p) { u64 v = get_unaligned_le64(*p); *p += sizeof(u64); return v; } static inline u32 ceph_decode_32(void **p) { u32 v = get_unaligned_le32(*p); *p += sizeof(u32); return v; } static inline u16 ceph_decode_16(void **p) { u16 v = get_unaligned_le16(*p); *p += sizeof(u16); return v; } static inline u8 ceph_decode_8(void **p) { u8 v = *(u8 *)*p; (*p)++; return v; } static inline void ceph_decode_copy(void **p, void *pv, size_t n) { memcpy(pv, *p, n); *p += n; } /* * bounds check input. */ static inline bool ceph_has_room(void **p, void *end, size_t n) { return end >= *p && n <= end - *p; } #define ceph_decode_need(p, end, n, bad) \ do { \ if (!likely(ceph_has_room(p, end, n))) \ goto bad; \ } while (0) #define ceph_decode_64_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u64), bad); \ v = ceph_decode_64(p); \ } while (0) #define ceph_decode_32_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u32), bad); \ v = ceph_decode_32(p); \ } while (0) #define ceph_decode_16_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u16), bad); \ v = ceph_decode_16(p); \ } while (0) #define ceph_decode_8_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u8), bad); \ v = ceph_decode_8(p); \ } while (0) #define ceph_decode_copy_safe(p, end, pv, n, bad) \ do { \ ceph_decode_need(p, end, n, bad); \ ceph_decode_copy(p, pv, n); \ } while (0) /* * Allocate a buffer big enough to hold the wire-encoded string, and * decode the string into it. The resulting string will always be * terminated with '\0'. If successful, *p will be advanced * past the decoded data. Also, if lenp is not a null pointer, the * length (not including the terminating '\0') will be recorded in * *lenp. Note that a zero-length string is a valid return value. * * Returns a pointer to the newly-allocated string buffer, or a * pointer-coded errno if an error occurs. Neither *p nor *lenp * will have been updated if an error is returned. * * There are two possible failures: * - converting the string would require accessing memory at or * beyond the "end" pointer provided (-ERANGE) * - memory could not be allocated for the result (-ENOMEM) */ static inline char *ceph_extract_encoded_string(void **p, void *end, size_t *lenp, gfp_t gfp) { u32 len; void *sp = *p; char *buf; ceph_decode_32_safe(&sp, end, len, bad); if (!ceph_has_room(&sp, end, len)) goto bad; buf = kmalloc(len + 1, gfp); if (!buf) return ERR_PTR(-ENOMEM); if (len) memcpy(buf, sp, len); buf[len] = '\0'; *p = (char *) *p + sizeof (u32) + len; if (lenp) *lenp = (size_t) len; return buf; bad: return ERR_PTR(-ERANGE); } /* * skip helpers */ #define ceph_decode_skip_n(p, end, n, bad) \ do { \ ceph_decode_need(p, end, n, bad); \ *p += n; \ } while (0) #define ceph_decode_skip_64(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u64), bad) #define ceph_decode_skip_32(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u32), bad) #define ceph_decode_skip_16(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u16), bad) #define ceph_decode_skip_8(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u8), bad) #define ceph_decode_skip_string(p, end, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ ceph_decode_skip_n(p, end, len, bad); \ } while (0) #define ceph_decode_skip_set(p, end, type, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ while (len--) \ ceph_decode_skip_##type(p, end, bad); \ } while (0) #define ceph_decode_skip_map(p, end, ktype, vtype, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ while (len--) { \ ceph_decode_skip_##ktype(p, end, bad); \ ceph_decode_skip_##vtype(p, end, bad); \ } \ } while (0) #define ceph_decode_skip_map_of_map(p, end, ktype1, ktype2, vtype2, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ while (len--) { \ ceph_decode_skip_##ktype1(p, end, bad); \ ceph_decode_skip_map(p, end, ktype2, vtype2, bad); \ } \ } while (0) /* * struct ceph_timespec <-> struct timespec64 */ static inline void ceph_decode_timespec64(struct timespec64 *ts, const struct ceph_timespec *tv) { /* * This will still overflow in year 2106. We could extend * the protocol to steal two more bits from tv_nsec to * add three more 136 year epochs after that the way ext4 * does if necessary. */ ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec); ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec); } static inline void ceph_encode_timespec64(struct ceph_timespec *tv, const struct timespec64 *ts) { tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); } /* * sockaddr_storage <-> ceph_sockaddr */ #define CEPH_ENTITY_ADDR_TYPE_NONE 0 #define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1) #define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2) #define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3) static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = htons(a->in_addr.ss_family); a->in_addr.ss_family = *(__u16 *)&ss_family; /* Banner addresses require TYPE_NONE */ a->type = CEPH_ENTITY_ADDR_TYPE_NONE; } static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; a->in_addr.ss_family = ntohs(ss_family); WARN_ON(a->in_addr.ss_family == 512); a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY; } extern int ceph_decode_entity_addr(void **p, void *end, struct ceph_entity_addr *addr); int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2, struct ceph_entity_addr *addr); int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr); void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr); /* * encoders */ static inline void ceph_encode_64(void **p, u64 v) { put_unaligned_le64(v, (__le64 *)*p); *p += sizeof(u64); } static inline void ceph_encode_32(void **p, u32 v) { put_unaligned_le32(v, (__le32 *)*p); *p += sizeof(u32); } static inline void ceph_encode_16(void **p, u16 v) { put_unaligned_le16(v, (__le16 *)*p); *p += sizeof(u16); } static inline void ceph_encode_8(void **p, u8 v) { *(u8 *)*p = v; (*p)++; } static inline void ceph_encode_copy(void **p, const void *s, int len) { memcpy(*p, s, len); *p += len; } /* * filepath, string encoders */ static inline void ceph_encode_filepath(void **p, void *end, u64 ino, const char *path) { u32 len = path ? strlen(path) : 0; BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end); ceph_encode_8(p, 1); ceph_encode_64(p, ino); ceph_encode_32(p, len); if (len) memcpy(*p, path, len); *p += len; } static inline void ceph_encode_string(void **p, void *end, const char *s, u32 len) { BUG_ON(*p + sizeof(len) + len > end); ceph_encode_32(p, len); if (len) memcpy(*p, s, len); *p += len; } /* * version and length starting block encoders/decoders */ /* current code version (u8) + compat code version (u8) + len of struct (u32) */ #define CEPH_ENCODING_START_BLK_LEN 6 /** * ceph_start_encoding - start encoding block * @struct_v: current (code) version of the encoding * @struct_compat: oldest code version that can decode it * @struct_len: length of struct encoding */ static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat, u32 struct_len) { ceph_encode_8(p, struct_v); ceph_encode_8(p, struct_compat); ceph_encode_32(p, struct_len); } /** * ceph_start_decoding - start decoding block * @v: current version of the encoding that the code supports * @name: name of the struct (free-form) * @struct_v: out param for the encoding version * @struct_len: out param for the length of struct encoding * * Validates the length of struct encoding, so unsafe ceph_decode_* * variants can be used for decoding. */ static inline int ceph_start_decoding(void **p, void *end, u8 v, const char *name, u8 *struct_v, u32 *struct_len) { u8 struct_compat; ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad); *struct_v = ceph_decode_8(p); struct_compat = ceph_decode_8(p); if (v < struct_compat) { pr_warn("got struct_v %d struct_compat %d > %d of %s\n", *struct_v, struct_compat, v, name); return -EINVAL; } *struct_len = ceph_decode_32(p); ceph_decode_need(p, end, *struct_len, bad); return 0; bad: return -ERANGE; } #define ceph_encode_need(p, end, n, bad) \ do { \ if (!likely(ceph_has_room(p, end, n))) \ goto bad; \ } while (0) #define ceph_encode_64_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u64), bad); \ ceph_encode_64(p, v); \ } while (0) #define ceph_encode_32_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u32), bad); \ ceph_encode_32(p, v); \ } while (0) #define ceph_encode_16_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u16), bad); \ ceph_encode_16(p, v); \ } while (0) #define ceph_encode_8_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u8), bad); \ ceph_encode_8(p, v); \ } while (0) #define ceph_encode_copy_safe(p, end, pv, n, bad) \ do { \ ceph_encode_need(p, end, n, bad); \ ceph_encode_copy(p, pv, n); \ } while (0) #define ceph_encode_string_safe(p, end, s, n, bad) \ do { \ ceph_encode_need(p, end, n, bad); \ ceph_encode_string(p, end, s, n); \ } while (0) #endif |
| 7 3 7 5 4 3 2 1 7 370 1543 1542 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2011, Intel Corporation. * * Description: Data Center Bridging netlink interface * Author: Lucy Liu <lucy.liu@intel.com> */ #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/slab.h> #include <net/netlink.h> #include <net/rtnetlink.h> #include <linux/dcbnl.h> #include <net/dcbevent.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <net/sock.h> /* Data Center Bridging (DCB) is a collection of Ethernet enhancements * intended to allow network traffic with differing requirements * (highly reliable, no drops vs. best effort vs. low latency) to operate * and co-exist on Ethernet. Current DCB features are: * * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a * framework for assigning bandwidth guarantees to traffic classes. * * Priority-based Flow Control (PFC) - provides a flow control mechanism which * can work independently for each 802.1p priority. * * Congestion Notification - provides a mechanism for end-to-end congestion * control for protocols which do not have built-in congestion management. * * More information about the emerging standards for these Ethernet features * can be found at: http://www.ieee802.org/1/pages/dcbridges.html * * This file implements an rtnetlink interface to allow configuration of DCB * features for capable devices. */ /**************** DCB attribute policies *************************************/ /* DCB netlink attributes policy */ static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, [DCB_ATTR_STATE] = {.type = NLA_U8}, [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, [DCB_ATTR_CAP] = {.type = NLA_NESTED}, [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, [DCB_ATTR_BCN] = {.type = NLA_NESTED}, [DCB_ATTR_APP] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, [DCB_ATTR_DCBX] = {.type = NLA_U8}, [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, }; /* DCB priority flow control to User Priority nested attributes */ static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB priority grouping nested attributes */ static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, }; /* DCB traffic class nested attributes. */ static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, }; /* DCB BCN nested attributes. */ static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, [DCB_BCN_ATTR_W] = {.type = NLA_U32}, [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, [DCB_BCN_ATTR_C] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB APP nested attributes. */ static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, [DCB_APP_ATTR_ID] = {.type = NLA_U16}, [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, }; /* IEEE 802.1Qaz nested attributes. */ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)}, [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)}, [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)}, [DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED}, }; /* DCB number of traffic classes nested attributes. */ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, }; static LIST_HEAD(dcb_app_list); static LIST_HEAD(dcb_rewr_list); static DEFINE_SPINLOCK(dcb_lock); static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector) { switch (selector) { case IEEE_8021QAZ_APP_SEL_ETHERTYPE: case IEEE_8021QAZ_APP_SEL_STREAM: case IEEE_8021QAZ_APP_SEL_DGRAM: case IEEE_8021QAZ_APP_SEL_ANY: case IEEE_8021QAZ_APP_SEL_DSCP: return DCB_ATTR_IEEE_APP; case DCB_APP_SEL_PCP: return DCB_ATTR_DCB_APP; default: return DCB_ATTR_IEEE_APP_UNSPEC; } } static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type) { switch (type) { case DCB_ATTR_IEEE_APP: case DCB_ATTR_DCB_APP: return true; default: return false; } } static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector) { return dcbnl_app_attr_type_get(selector) == type; } static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq, u32 flags, struct nlmsghdr **nlhp) { struct sk_buff *skb; struct dcbmsg *dcb; struct nlmsghdr *nlh; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return NULL; nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags); BUG_ON(!nlh); dcb = nlmsg_data(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = cmd; dcb->dcb_pad = 0; if (nlhp) *nlhp = nlh; return skb; } static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ if (!netdev->dcbnl_ops->getstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->getstate(netdev)); } static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG); if (!nest) return -EMSGSIZE; if (data[DCB_PFC_UP_ATTR_ALL]) getall = 1; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (!getall && !data[i]) continue; netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 perm_addr[MAX_ADDR_LEN]; if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); } static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_CAP]) return -EINVAL; if (!netdev->dcbnl_ops->getcap) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], dcbnl_cap_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP); if (!nest) return -EMSGSIZE; if (data[DCB_CAP_ATTR_ALL]) getall = 1; for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { if (!getall && !data[i]) continue; if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->getnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS); if (!nest) return -EMSGSIZE; if (data[DCB_NUMTCS_ATTR_ALL]) getall = 1; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); if (!ret) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } else return -EINVAL; } nla_nest_end(skb, nest); return 0; } static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; int ret; u8 value; int i; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->setnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest, NULL); if (ret) return ret; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); if (ret) break; } return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret); } static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { if (!netdev->dcbnl_ops->getpfcstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_PFC_STATE, netdev->dcbnl_ops->getpfcstate(netdev)); } static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_PFC_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setpfcstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); netdev->dcbnl_ops->setpfcstate(netdev, value); return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0); } static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *app_nest; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; u16 id; u8 up, idtype; int ret; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest, NULL); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); if (netdev->dcbnl_ops->getapp) { ret = netdev->dcbnl_ops->getapp(netdev, idtype, id); if (ret < 0) return ret; else up = ret; } else { struct dcb_app app = { .selector = idtype, .protocol = id, }; up = dcb_getapp(netdev, &app); } app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP); if (!app_nest) return -EMSGSIZE; ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype); if (ret) goto out_cancel; ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id); if (ret) goto out_cancel; ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up); if (ret) goto out_cancel; nla_nest_end(skb, app_nest); return 0; out_cancel: nla_nest_cancel(skb, app_nest); return ret; } static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; u16 id; u8 up, idtype; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest, NULL); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID]) || (!app_tb[DCB_APP_ATTR_PRIORITY])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); if (netdev->dcbnl_ops->setapp) { ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); if (ret < 0) return ret; } else { struct dcb_app app; app.selector = idtype; app.protocol = id; app.priority = up; ret = dcb_setapp(netdev, &app); } ret = nla_put_u8(skb, DCB_ATTR_APP, ret); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); return ret; } static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_nest, *param_nest, *data; struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; u8 prio, pgid, tc_pct, up_map; int ret; int getall = 0; int i; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpgtccfgtx || !netdev->dcbnl_ops->getpgtccfgrx || !netdev->dcbnl_ops->getpgbwgcfgtx || !netdev->dcbnl_ops->getpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest, NULL); if (ret) return ret; pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG); if (!pg_nest) return -EMSGSIZE; if (pg_tb[DCB_PG_ATTR_TC_ALL]) getall = 1; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!getall && !pg_tb[i]) continue; if (pg_tb[DCB_PG_ATTR_TC_ALL]) data = pg_tb[DCB_PG_ATTR_TC_ALL]; else data = pg_tb[i]; ret = nla_parse_nested_deprecated(param_tb, DCB_TC_ATTR_PARAM_MAX, data, dcbnl_tc_param_nest, NULL); if (ret) goto err_pg; param_nest = nla_nest_start_noflag(skb, i); if (!param_nest) goto err_pg; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } else { /* Tx */ netdev->dcbnl_ops->getpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } if (param_tb[DCB_TC_ATTR_PARAM_PGID] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); if (ret) goto err_param; } nla_nest_end(skb, param_nest); } if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) getall = 1; else getall = 0; for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!getall && !pg_tb[i]) continue; tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } else { /* Tx */ netdev->dcbnl_ops->getpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } ret = nla_put_u8(skb, i, tc_pct); if (ret) goto err_pg; } nla_nest_end(skb, pg_nest); return 0; err_param: nla_nest_cancel(skb, param_nest); err_pg: nla_nest_cancel(skb, pg_nest); return -EMSGSIZE; } static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); } static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); } static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_STATE]); return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->setstate(netdev, value)); } static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; int i; int ret; u8 value; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); netdev->dcbnl_ops->setpfccfg(netdev, data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); } return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0); } static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; if (!tb[DCB_ATTR_SET_ALL]) return -EINVAL; if (!netdev->dcbnl_ops->setall) return -EOPNOTSUPP; ret = nla_put_u8(skb, DCB_ATTR_SET_ALL, netdev->dcbnl_ops->setall(netdev)); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); return ret; } static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; int ret; int i; u8 pgid; u8 up_map; u8 prio; u8 tc_pct; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpgtccfgtx || !netdev->dcbnl_ops->setpgtccfgrx || !netdev->dcbnl_ops->setpgbwgcfgtx || !netdev->dcbnl_ops->setpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest, NULL); if (ret) return ret; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!pg_tb[i]) continue; ret = nla_parse_nested_deprecated(param_tb, DCB_TC_ATTR_PARAM_MAX, pg_tb[i], dcbnl_tc_param_nest, NULL); if (ret) return ret; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) prio = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); if (param_tb[DCB_TC_ATTR_PARAM_PGID]) pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) up_map = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } else { /* Tx */ netdev->dcbnl_ops->setpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!pg_tb[i]) continue; tc_pct = nla_get_u8(pg_tb[i]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } else { /* Tx */ netdev->dcbnl_ops->setpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } } return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0); } static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); } static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); } static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *bcn_nest; struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; u8 value_byte; u32 value_integer; int ret; bool getall = false; int i; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->getbcnrp || !netdev->dcbnl_ops->getbcncfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN); if (!bcn_nest) return -EMSGSIZE; if (bcn_tb[DCB_BCN_ATTR_ALL]) getall = true; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, &value_byte); ret = nla_put_u8(skb, i, value_byte); if (ret) goto err_bcn; } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcncfg(netdev, i, &value_integer); ret = nla_put_u32(skb, i, value_integer); if (ret) goto err_bcn; } nla_nest_end(skb, bcn_nest); return 0; err_bcn: nla_nest_cancel(skb, bcn_nest); return ret; } static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; int i; int ret; u8 value_byte; u32 value_int; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->setbcncfg || !netdev->dcbnl_ops->setbcnrp) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (data[i] == NULL) continue; value_byte = nla_get_u8(data[i]); netdev->dcbnl_ops->setbcnrp(netdev, data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (data[i] == NULL) continue; value_int = nla_get_u32(data[i]); netdev->dcbnl_ops->setbcncfg(netdev, i, value_int); } return nla_put_u8(skb, DCB_ATTR_BCN, 0); } static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, int app_nested_type, int app_info_type, int app_entry_type) { struct dcb_peer_app_info info; struct dcb_app *table = NULL; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; u16 app_count; int err; /** * retrieve the peer app configuration form the driver. If the driver * handlers fail exit without doing anything */ err = ops->peer_getappinfo(netdev, &info, &app_count); if (!err && app_count) { table = kmalloc_array(app_count, sizeof(struct dcb_app), GFP_KERNEL); if (!table) return -ENOMEM; err = ops->peer_getapptable(netdev, table); } if (!err) { u16 i; struct nlattr *app; /** * build the message, from here on the only possible failure * is due to the skb size */ err = -EMSGSIZE; app = nla_nest_start_noflag(skb, app_nested_type); if (!app) goto nla_put_failure; if (app_info_type && nla_put(skb, app_info_type, sizeof(info), &info)) goto nla_put_failure; for (i = 0; i < app_count; i++) { if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), &table[i])) goto nla_put_failure; } nla_nest_end(skb, app); } err = 0; nla_put_failure: kfree(table); return err; } static int dcbnl_getapptrust(struct net_device *netdev, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; enum ieee_attrs_app type; struct nlattr *apptrust; int nselectors, err, i; u8 *selectors; selectors = kzalloc(IEEE_8021QAZ_APP_SEL_MAX + 1, GFP_KERNEL); if (!selectors) return -ENOMEM; err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors); if (err) { err = 0; goto out; } apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE); if (!apptrust) { err = -EMSGSIZE; goto out; } for (i = 0; i < nselectors; i++) { type = dcbnl_app_attr_type_get(selectors[i]); err = nla_put_u8(skb, type, selectors[i]); if (err) { nla_nest_cancel(skb, apptrust); goto out; } } nla_nest_end(skb, apptrust); out: kfree(selectors); return err; } /* Set or delete APP table or rewrite table entries. The APP struct is validated * and the appropriate callback function is called. */ static int dcbnl_app_table_setdel(struct nlattr *attr, struct net_device *netdev, int (*setdel)(struct net_device *dev, struct dcb_app *app)) { struct dcb_app *app_data; enum ieee_attrs_app type; struct nlattr *attr_itr; int rem, err; nla_for_each_nested(attr_itr, attr, rem) { type = nla_type(attr_itr); if (!dcbnl_app_attr_type_validate(type)) continue; if (nla_len(attr_itr) < sizeof(struct dcb_app)) return -ERANGE; app_data = nla_data(attr_itr); if (!dcbnl_app_selector_validate(type, app_data->selector)) return -EINVAL; err = setdel(netdev, app_data); if (err) return err; } return 0; } /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee, *app, *rewr; struct dcb_app_type *itr; int dcbx; int err; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) return -EMSGSIZE; ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE); if (!ieee) return -EMSGSIZE; if (ops->ieee_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, sizeof(maxrate), &maxrate); if (err) return -EMSGSIZE; } } if (ops->ieee_getqcn) { struct ieee_qcn qcn; memset(&qcn, 0, sizeof(qcn)); err = ops->ieee_getqcn(netdev, &qcn); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_QCN, sizeof(qcn), &qcn); if (err) return -EMSGSIZE; } } if (ops->ieee_getqcnstats) { struct ieee_qcn_stats qcn_stats; memset(&qcn_stats, 0, sizeof(qcn_stats)); err = ops->ieee_getqcnstats(netdev, &qcn_stats); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS, sizeof(qcn_stats), &qcn_stats); if (err) return -EMSGSIZE; } } if (ops->ieee_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } if (ops->dcbnl_getbuffer) { struct dcbnl_buffer buffer; memset(&buffer, 0, sizeof(buffer)); err = ops->dcbnl_getbuffer(netdev, &buffer); if (!err && nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer)) return -EMSGSIZE; } app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE); if (!app) return -EMSGSIZE; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { enum ieee_attrs_app type = dcbnl_app_attr_type_get(itr->app.selector); err = nla_put(skb, type, sizeof(itr->app), &itr->app); if (err) { spin_unlock_bh(&dcb_lock); return -EMSGSIZE; } } } if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock_bh(&dcb_lock); nla_nest_end(skb, app); rewr = nla_nest_start(skb, DCB_ATTR_DCB_REWR_TABLE); if (!rewr) return -EMSGSIZE; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->ifindex == netdev->ifindex) { enum ieee_attrs_app type = dcbnl_app_attr_type_get(itr->app.selector); err = nla_put(skb, type, sizeof(itr->app), &itr->app); if (err) { spin_unlock_bh(&dcb_lock); nla_nest_cancel(skb, rewr); return -EMSGSIZE; } } } spin_unlock_bh(&dcb_lock); nla_nest_end(skb, rewr); if (ops->dcbnl_getapptrust) { err = dcbnl_getapptrust(netdev, skb); if (err) return err; } /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_APP_UNSPEC, DCB_ATTR_IEEE_APP); if (err) return -EMSGSIZE; } nla_nest_end(skb, ieee); if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) return -EMSGSIZE; } return 0; } static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, int dir) { u8 pgid, up_map, prio, tc_pct; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; struct nlattr *pg = nla_nest_start_noflag(skb, i); if (!pg) return -EMSGSIZE; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { struct nlattr *tc_nest = nla_nest_start_noflag(skb, i); if (!tc_nest) return -EMSGSIZE; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); else ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) return -EMSGSIZE; nla_nest_end(skb, tc_nest); } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); else ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); if (nla_put_u8(skb, i, tc_pct)) return -EMSGSIZE; } nla_nest_end(skb, pg); return 0; } static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *cee, *app; struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int dcbx, i, err = -EMSGSIZE; u8 value; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) goto nla_put_failure; cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE); if (!cee) goto nla_put_failure; /* local pg */ if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { err = dcbnl_cee_pg_fill(skb, netdev, 1); if (err) goto nla_put_failure; } if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { err = dcbnl_cee_pg_fill(skb, netdev, 0); if (err) goto nla_put_failure; } /* local pfc */ if (ops->getpfccfg) { struct nlattr *pfc_nest = nla_nest_start_noflag(skb, DCB_ATTR_CEE_PFC); if (!pfc_nest) goto nla_put_failure; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); if (nla_put_u8(skb, i, value)) goto nla_put_failure; } nla_nest_end(skb, pfc_nest); } /* local app */ spin_lock_bh(&dcb_lock); app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { struct nlattr *app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP); if (!app_nest) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, itr->app.selector); if (err) goto dcb_unlock; err = nla_put_u16(skb, DCB_APP_ATTR_ID, itr->app.protocol); if (err) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, itr->app.priority); if (err) goto dcb_unlock; nla_nest_end(skb, app_nest); } } nla_nest_end(skb, app); if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock_bh(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { struct nlattr *feat = nla_nest_start_noflag(skb, DCB_ATTR_CEE_FEAT); if (!feat) goto nla_put_failure; for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; i++) if (!ops->getfeatcfg(netdev, i, &value) && nla_put_u8(skb, i, value)) goto nla_put_failure; nla_nest_end(skb, feat); } /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) goto nla_put_failure; } if (ops->cee_peer_getpfc) { struct cee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_CEE_PEER_APP_TABLE, DCB_ATTR_CEE_PEER_APP_INFO, DCB_ATTR_CEE_PEER_APP); if (err) goto nla_put_failure; } nla_nest_end(skb, cee); /* DCBX state */ if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) goto nla_put_failure; } return 0; dcb_unlock: spin_unlock_bh(&dcb_lock); nla_put_failure: err = -EMSGSIZE; return err; } static int dcbnl_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid, int dcbx_ver) { struct net *net = dev_net(dev); struct sk_buff *skb; struct nlmsghdr *nlh; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int err; if (!ops) return -EOPNOTSUPP; skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh); if (!skb) return -ENOMEM; if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) err = dcbnl_ieee_fill(skb, dev); else err = dcbnl_cee_fill(skb, dev); if (err < 0) { /* Report error to broadcast listeners */ nlmsg_free(skb); rtnl_set_sk_err(net, RTNLGRP_DCB, err); } else { /* End nlmsg and notify broadcast listeners */ nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); } return err; } int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid) { return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE); } EXPORT_SYMBOL(dcbnl_ieee_notify); int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, u32 se |